Merge tag 'nolibc-urgent.2022.10.28a' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 1 Nov 2022 20:15:14 +0000 (13:15 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 1 Nov 2022 20:15:14 +0000 (13:15 -0700)
Pull nolibc fixes from Paul McKenney:
 "This contains a couple of fixes for string-function bugs"

* tag 'nolibc-urgent.2022.10.28a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu:
  tools/nolibc/string: Fix memcmp() implementation
  tools/nolibc: Fix missing strlen() definition and infinite loop with gcc-12

857 files changed:
.mailmap
Documentation/ABI/testing/sysfs-kernel-mm-memory-tiers
Documentation/admin-guide/acpi/index.rst
Documentation/admin-guide/device-mapper/verity.rst
Documentation/admin-guide/media/vivid.rst
Documentation/block/ublk.rst
Documentation/core-api/kernel-api.rst
Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.txt [deleted file]
Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
Documentation/devicetree/bindings/pinctrl/xlnx,zynqmp-pinctrl.yaml
Documentation/driver-api/basics.rst
Documentation/driver-api/media/mc-core.rst
Documentation/hwmon/corsair-psu.rst
Documentation/process/maintainer-netdev.rst
Documentation/userspace-api/media/cec.h.rst.exceptions
Documentation/userspace-api/media/v4l/libv4l-introduction.rst
MAINTAINERS
Makefile
arch/arc/boot/dts/axc003.dtsi
arch/arc/boot/dts/axc003_idu.dtsi
arch/arc/boot/dts/axs10x_mb.dtsi
arch/arc/boot/dts/hsdk.dts
arch/arc/boot/dts/vdk_axs10x_mb.dtsi
arch/arc/configs/axs101_defconfig
arch/arc/configs/axs103_defconfig
arch/arc/configs/axs103_smp_defconfig
arch/arc/configs/haps_hs_defconfig
arch/arc/configs/haps_hs_smp_defconfig
arch/arc/configs/hsdk_defconfig
arch/arc/configs/nsim_700_defconfig
arch/arc/configs/nsimosci_defconfig
arch/arc/configs/nsimosci_hs_defconfig
arch/arc/configs/nsimosci_hs_smp_defconfig
arch/arc/configs/tb10x_defconfig
arch/arc/configs/vdk_hs38_defconfig
arch/arc/configs/vdk_hs38_smp_defconfig
arch/arc/include/asm/bitops.h
arch/arc/include/asm/entry-compact.h
arch/arc/include/asm/io.h
arch/arc/include/asm/pgtable-levels.h
arch/arc/kernel/smp.c
arch/arc/mm/cache.c
arch/arc/mm/ioremap.c
arch/arm64/include/asm/kvm_pgtable.h
arch/arm64/include/asm/stage2_pgtable.h
arch/arm64/kernel/entry-ftrace.S
arch/arm64/kvm/hyp/Makefile
arch/arm64/kvm/hyp/nvhe/Makefile
arch/arm64/kvm/mmu.c
arch/arm64/kvm/vgic/vgic-its.c
arch/loongarch/include/asm/processor.h
arch/loongarch/include/asm/ptrace.h
arch/loongarch/kernel/head.S
arch/loongarch/kernel/process.c
arch/loongarch/kernel/switch.S
arch/loongarch/net/bpf_jit.c
arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
arch/powerpc/kernel/exceptions-64e.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/interrupt.c
arch/powerpc/kernel/interrupt_64.S
arch/powerpc/kvm/Kconfig
arch/powerpc/lib/vmx-helper.c
arch/powerpc/mm/book3s64/hash_native.c
arch/powerpc/mm/book3s64/hash_pgtable.c
arch/powerpc/mm/book3s64/hash_utils.c
arch/powerpc/platforms/pseries/lparcfg.c
arch/powerpc/platforms/pseries/vas.c
arch/powerpc/platforms/pseries/vas.h
arch/riscv/Kconfig
arch/riscv/Makefile
arch/riscv/include/asm/cacheflush.h
arch/riscv/include/asm/jump_label.h
arch/riscv/include/asm/kvm_vcpu_timer.h
arch/riscv/include/asm/vdso/processor.h
arch/riscv/kernel/cpu.c
arch/riscv/kvm/vcpu.c
arch/riscv/kvm/vcpu_timer.c
arch/riscv/mm/cacheflush.c
arch/riscv/mm/dma-noncoherent.c
arch/riscv/mm/kasan_init.c
arch/s390/boot/vmlinux.lds.S
arch/s390/include/asm/futex.h
arch/s390/kernel/perf_pai_ext.c
arch/s390/lib/uaccess.c
arch/s390/pci/pci_mmio.c
arch/x86/Kconfig
arch/x86/crypto/polyval-clmulni_glue.c
arch/x86/events/amd/ibs.c
arch/x86/events/intel/lbr.c
arch/x86/events/rapl.c
arch/x86/include/asm/iommu.h
arch/x86/include/asm/string_64.h
arch/x86/include/asm/uaccess.h
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/cpu/resctrl/core.c
arch/x86/kernel/cpu/topology.c
arch/x86/kernel/fpu/init.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/ftrace_64.S
arch/x86/kernel/unwind_orc.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/debugfs.c
arch/x86/kvm/emulate.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/xen.c
arch/x86/mm/pat/set_memory.c
arch/x86/net/bpf_jit_comp.c
arch/x86/purgatory/Makefile
block/bfq-iosched.h
block/bio.c
block/blk-mq.c
block/genhd.c
drivers/acpi/acpi_extlog.c
drivers/acpi/acpi_pcc.c
drivers/acpi/apei/ghes.c
drivers/acpi/arm64/iort.c
drivers/acpi/pci_root.c
drivers/acpi/resource.c
drivers/acpi/scan.c
drivers/acpi/video_detect.c
drivers/ata/ahci.h
drivers/ata/ahci_brcm.c
drivers/ata/ahci_imx.c
drivers/ata/ahci_qoriq.c
drivers/ata/ahci_st.c
drivers/ata/ahci_xgene.c
drivers/ata/sata_rcar.c
drivers/base/power/domain.c
drivers/base/property.c
drivers/block/drbd/drbd_req.c
drivers/block/rbd.c
drivers/block/ublk_drv.c
drivers/char/hw_random/bcm2835-rng.c
drivers/char/random.c
drivers/counter/104-quad-8.c
drivers/counter/microchip-tcb-capture.c
drivers/counter/ti-ecap-capture.c
drivers/cpufreq/cpufreq-dt.c
drivers/cpufreq/imx6q-cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/qcom-cpufreq-nvmem.c
drivers/cpufreq/sun50i-cpufreq-nvmem.c
drivers/cpufreq/tegra194-cpufreq.c
drivers/firmware/efi/Kconfig
drivers/firmware/efi/arm-runtime.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/libstub/Makefile.zboot
drivers/firmware/efi/libstub/fdt.c
drivers/firmware/efi/libstub/x86-stub.c
drivers/firmware/efi/libstub/zboot.lds
drivers/firmware/efi/riscv-runtime.c
drivers/firmware/efi/vars.c
drivers/gpio/gpio-tegra.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/soc21.c
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
drivers/gpu/drm/amd/display/dc/dml/Makefile
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
drivers/gpu/drm/amd/pm/amdgpu_pm.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
drivers/gpu/drm/bridge/parade-ps8640.c
drivers/gpu/drm/drm_connector.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/gt/intel_workarounds.c
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
drivers/gpu/drm/msm/adreno/adreno_device.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
drivers/gpu/drm/msm/dp/dp_ctrl.c
drivers/gpu/drm/msm/dp/dp_display.c
drivers/gpu/drm/msm/dp/dp_drm.c
drivers/gpu/drm/msm/dp/dp_parser.c
drivers/gpu/drm/msm/dp/dp_parser.h
drivers/gpu/drm/msm/dsi/dsi.c
drivers/gpu/drm/msm/hdmi/hdmi.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/msm/msm_gpu.h
drivers/gpu/drm/msm/msm_ringbuffer.c
drivers/gpu/drm/nouveau/nouveau_dmem.c
drivers/gpu/drm/panfrost/panfrost_dump.c
drivers/gpu/drm/scheduler/sched_entity.c
drivers/gpu/drm/tests/drm_format_helper_test.c
drivers/gpu/drm/vc4/vc4_drv.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/hid/hid-ids.h
drivers/hid/hid-lenovo.c
drivers/hid/hid-magicmouse.c
drivers/hid/hid-playstation.c
drivers/hid/hid-quirks.c
drivers/hid/hid-saitek.c
drivers/hwmon/coretemp.c
drivers/hwmon/corsair-psu.c
drivers/hwmon/pwm-fan.c
drivers/hwtracing/coresight/coresight-core.c
drivers/hwtracing/coresight/coresight-cti-core.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-mlxbf.c
drivers/i2c/busses/i2c-mlxcpld.c
drivers/i2c/busses/i2c-qcom-cci.c
drivers/i2c/busses/i2c-sis630.c
drivers/i2c/busses/i2c-xiic.c
drivers/iio/accel/adxl367.c
drivers/iio/accel/adxl372.c
drivers/iio/accel/bmc150-accel-core.c
drivers/iio/adc/at91-sama5d2_adc.c
drivers/iio/adc/mcp3911.c
drivers/iio/adc/stm32-adc.c
drivers/iio/light/tsl2583.c
drivers/iio/temperature/ltc2983.c
drivers/iommu/amd/iommu.c
drivers/iommu/apple-dart.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
drivers/iommu/arm/arm-smmu/arm-smmu.c
drivers/iommu/intel/iommu.c
drivers/iommu/iommu.c
drivers/iommu/mtk_iommu.c
drivers/iommu/virtio-iommu.c
drivers/leds/simple/simatic-ipc-leds-gpio.c
drivers/md/dm-bufio.c
drivers/md/dm-cache-policy.h
drivers/md/dm-clone-target.c
drivers/md/dm-ioctl.c
drivers/md/dm-raid.c
drivers/md/dm-rq.c
drivers/md/dm-stats.c
drivers/md/dm-table.c
drivers/md/dm-verity-target.c
drivers/md/dm.c
drivers/media/Kconfig
drivers/media/cec/core/cec-adap.c
drivers/media/cec/platform/cros-ec/cros-ec-cec.c
drivers/media/cec/platform/s5p/s5p_cec.c
drivers/media/dvb-frontends/drxk_hard.c
drivers/media/i2c/ar0521.c
drivers/media/i2c/ir-kbd-i2c.c
drivers/media/i2c/isl7998x.c
drivers/media/i2c/mt9v111.c
drivers/media/i2c/ov5640.c
drivers/media/i2c/ov8865.c
drivers/media/mc/mc-device.c
drivers/media/mc/mc-entity.c
drivers/media/pci/cx18/cx18-av-core.c
drivers/media/pci/cx88/cx88-input.c
drivers/media/pci/cx88/cx88-video.c
drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
drivers/media/platform/amphion/vpu_v4l2.c
drivers/media/platform/chips-media/coda-jpeg.c
drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
drivers/media/platform/nxp/dw100/dw100.c
drivers/media/platform/qcom/camss/camss-video.c
drivers/media/platform/qcom/venus/helpers.c
drivers/media/platform/qcom/venus/hfi.c
drivers/media/platform/qcom/venus/vdec.c
drivers/media/platform/qcom/venus/venc.c
drivers/media/platform/qcom/venus/venc_ctrls.c
drivers/media/platform/renesas/rcar-vin/rcar-core.c
drivers/media/platform/renesas/rcar-vin/rcar-dma.c
drivers/media/platform/renesas/vsp1/vsp1_video.c
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
drivers/media/platform/samsung/exynos4-is/fimc-capture.c
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
drivers/media/platform/samsung/exynos4-is/fimc-lite.c
drivers/media/platform/samsung/s3c-camif/camif-capture.c
drivers/media/platform/st/stm32/stm32-dcmi.c
drivers/media/platform/sunxi/sun4i-csi/Kconfig
drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
drivers/media/platform/sunxi/sun6i-csi/Kconfig
drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h
drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
drivers/media/platform/sunxi/sun6i-csi/sun6i_video.h
drivers/media/platform/sunxi/sun6i-mipi-csi2/Kconfig
drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c
drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig
drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c
drivers/media/platform/sunxi/sun8i-di/Kconfig
drivers/media/platform/sunxi/sun8i-rotate/Kconfig
drivers/media/platform/ti/cal/cal-video.c
drivers/media/platform/ti/cal/cal.h
drivers/media/platform/ti/omap3isp/isp.c
drivers/media/platform/ti/omap3isp/ispvideo.c
drivers/media/platform/ti/omap3isp/ispvideo.h
drivers/media/platform/verisilicon/hantro_drv.c
drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
drivers/media/platform/verisilicon/hantro_hevc.c
drivers/media/platform/verisilicon/hantro_postproc.c
drivers/media/platform/verisilicon/imx8m_vpu_hw.c
drivers/media/platform/xilinx/xilinx-dma.c
drivers/media/platform/xilinx/xilinx-dma.h
drivers/media/radio/radio-si476x.c
drivers/media/radio/si4713/si4713.c
drivers/media/rc/imon.c
drivers/media/rc/mceusb.c
drivers/media/test-drivers/vimc/vimc-capture.c
drivers/media/test-drivers/vivid/vivid-core.c
drivers/media/test-drivers/vivid/vivid-core.h
drivers/media/test-drivers/vivid/vivid-osd.c
drivers/media/test-drivers/vivid/vivid-vid-cap.c
drivers/media/tuners/xc4000.c
drivers/media/usb/au0828/au0828-core.c
drivers/media/usb/dvb-usb-v2/af9035.c
drivers/media/usb/msi2500/msi2500.c
drivers/media/v4l2-core/v4l2-ctrls-api.c
drivers/media/v4l2-core/v4l2-ctrls-core.c
drivers/media/v4l2-core/v4l2-dev.c
drivers/media/v4l2-core/v4l2-dv-timings.c
drivers/mfd/syscon.c
drivers/misc/sgi-gru/grumain.c
drivers/misc/sgi-gru/grutables.h
drivers/mmc/core/block.c
drivers/mmc/core/queue.c
drivers/mmc/core/sdio_bus.c
drivers/mmc/host/Kconfig
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sdhci-pci-core.c
drivers/mtd/mtdcore.c
drivers/mtd/nand/raw/intel-nand-controller.c
drivers/mtd/nand/raw/marvell_nand.c
drivers/mtd/nand/raw/tegra_nand.c
drivers/mtd/parsers/bcm47xxpart.c
drivers/mtd/spi-nor/core.c
drivers/net/can/mscan/mpc5xxx_can.c
drivers/net/can/rcar/rcar_canfd.c
drivers/net/can/spi/mcp251x.c
drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
drivers/net/dsa/qca/qca8k-8xxx.c
drivers/net/ethernet/amd/xgbe/xgbe-pci.c
drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
drivers/net/ethernet/amd/xgbe/xgbe.h
drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.h
drivers/net/ethernet/broadcom/bcm4908_enet.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
drivers/net/ethernet/freescale/enetc/enetc.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fman/mac.c
drivers/net/ethernet/freescale/fman/mac.h
drivers/net/ethernet/hisilicon/hns/hnae.c
drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
drivers/net/ethernet/huawei/hinic/hinic_sriov.c
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/i40e/i40e_xsk.h
drivers/net/ethernet/lantiq_etop.c
drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_ppe.c
drivers/net/ethernet/mediatek/mtk_wed.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
drivers/net/ethernet/micrel/ksz884x.c
drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
drivers/net/ethernet/netronome/nfp/nfp_main.c
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/filter.h
drivers/net/ethernet/sfc/rx_common.c
drivers/net/ethernet/socionext/netsec.c
drivers/net/ethernet/socionext/sni_ave.c
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/sun/sunhme.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ipa/data/ipa_data-v3.5.1.c
drivers/net/ipa/ipa_main.c
drivers/net/ipa/reg/ipa_reg-v3.1.c
drivers/net/macvlan.c
drivers/net/netdevsim/bus.c
drivers/net/netdevsim/dev.c
drivers/net/phy/dp83822.c
drivers/net/phy/dp83867.c
drivers/net/phy/phylink.c
drivers/net/wwan/wwan_hwsim.c
drivers/nfc/virtual_ncidev.c
drivers/nvme/host/apple.c
drivers/nvme/host/core.c
drivers/nvme/host/hwmon.c
drivers/nvme/host/multipath.c
drivers/nvme/host/pci.c
drivers/nvme/host/tcp.c
drivers/nvme/target/configfs.c
drivers/nvme/target/core.c
drivers/pci/controller/pci-tegra.c
drivers/pinctrl/pinctrl-ingenic.c
drivers/pinctrl/pinctrl-ocelot.c
drivers/pinctrl/pinctrl-zynqmp.c
drivers/pinctrl/qcom/pinctrl-msm.c
drivers/platform/loongarch/loongson-laptop.c
drivers/platform/x86/amd/pmc.c
drivers/platform/x86/asus-nb-wmi.c
drivers/platform/x86/intel/pmc/core.c
drivers/platform/x86/thinkpad_acpi.c
drivers/rtc/rtc-cmos.c
drivers/s390/cio/css.c
drivers/s390/crypto/vfio_ap_private.h
drivers/scsi/lpfc/lpfc_bsg.c
drivers/scsi/lpfc/lpfc_ct.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/mpi3mr/Kconfig
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/scsi_sysfs.c
drivers/spi/spi-aspeed-smc.c
drivers/spi/spi-gxp.c
drivers/spi/spi-intel.c
drivers/spi/spi-mpc52xx.c
drivers/spi/spi-qup.c
drivers/spi/spi-tegra210-quad.c
drivers/staging/media/atomisp/Makefile
drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
drivers/staging/media/atomisp/include/hmm/hmm_bo.h
drivers/staging/media/atomisp/include/linux/atomisp.h
drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h
drivers/staging/media/atomisp/include/linux/atomisp_platform.h
drivers/staging/media/atomisp/notes.txt
drivers/staging/media/atomisp/pci/atomisp_cmd.c
drivers/staging/media/atomisp/pci/atomisp_cmd.h
drivers/staging/media/atomisp/pci/atomisp_compat.h
drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
drivers/staging/media/atomisp/pci/atomisp_file.c [deleted file]
drivers/staging/media/atomisp/pci/atomisp_file.h [deleted file]
drivers/staging/media/atomisp/pci/atomisp_fops.c
drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
drivers/staging/media/atomisp/pci/atomisp_internal.h
drivers/staging/media/atomisp/pci/atomisp_ioctl.c
drivers/staging/media/atomisp/pci/atomisp_ioctl.h
drivers/staging/media/atomisp/pci/atomisp_subdev.c
drivers/staging/media/atomisp/pci/atomisp_subdev.h
drivers/staging/media/atomisp/pci/atomisp_v4l2.c
drivers/staging/media/atomisp/pci/atomisp_v4l2.h
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
drivers/staging/media/atomisp/pci/sh_css_params.c
drivers/staging/media/imx/imx-media-utils.c
drivers/staging/media/imx/imx7-media-csi.c
drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
drivers/staging/media/ipu3/ipu3-v4l2.c
drivers/staging/media/meson/vdec/vdec.c
drivers/staging/media/omap4iss/iss.c
drivers/staging/media/omap4iss/iss_video.c
drivers/staging/media/omap4iss/iss_video.h
drivers/staging/media/sunxi/cedrus/Kconfig
drivers/staging/media/tegra-video/tegra210.c
drivers/target/target_core_device.c
drivers/target/target_core_iblock.c
drivers/target/target_core_internal.h
drivers/target/target_core_pr.c
drivers/target/target_core_transport.c
drivers/thermal/intel/intel_powerclamp.c
drivers/ufs/core/ufshcd.c
drivers/ufs/core/ufshpb.c
drivers/ufs/host/ufs-qcom-ice.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/drd.c
drivers/usb/dwc3/dwc3-st.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/uvc_queue.c
drivers/usb/gadget/function/uvc_video.c
drivers/usb/gadget/udc/aspeed-vhub/dev.c
drivers/usb/gadget/udc/bdc/bdc_udc.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/sisusbvga/sisusb_struct.h
drivers/usb/typec/ucsi/ucsi.c
drivers/usb/typec/ucsi/ucsi_acpi.c
drivers/video/aperture.c
drivers/video/fbdev/cyber2000fb.c
drivers/video/fbdev/da8xx-fb.c
drivers/video/fbdev/gbefb.c
drivers/video/fbdev/sis/sis_accel.c
drivers/video/fbdev/sis/vstruct.h
drivers/video/fbdev/sm501fb.c
drivers/video/fbdev/smscufx.c
drivers/video/fbdev/stifb.c
drivers/video/fbdev/xilinxfb.c
drivers/watchdog/exar_wdt.c
drivers/watchdog/sp805_wdt.c
drivers/watchdog/watchdog_core.c
drivers/watchdog/watchdog_dev.c
drivers/xen/grant-dma-ops.c
fs/binfmt_elf.c
fs/btrfs/backref.c
fs/btrfs/backref.h
fs/btrfs/block-group.c
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/export.c
fs/btrfs/export.h
fs/btrfs/extent-io-tree.c
fs/btrfs/extent-tree.c
fs/btrfs/raid56.c
fs/btrfs/send.c
fs/btrfs/send.h
fs/btrfs/super.c
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/cifs/cached_dir.c
fs/cifs/cached_dir.h
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/sess.c
fs/cifs/smb2inode.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/crypto/keyring.c
fs/efivarfs/vars.c
fs/erofs/fscache.c
fs/erofs/zdata.c
fs/erofs/zdata.h
fs/erofs/zmap.c
fs/exec.c
fs/ext4/super.c
fs/nfsd/nfsctl.c
fs/nfsd/nfsfh.c
fs/ocfs2/namei.c
fs/proc/task_mmu.c
fs/squashfs/file.c
fs/squashfs/page_actor.c
fs/squashfs/page_actor.h
fs/super.c
include/acpi/ghes.h
include/asm-generic/vmlinux.lds.h
include/drm/gpu_scheduler.h
include/linux/blk-mq.h
include/linux/bpf.h
include/linux/cgroup.h
include/linux/counter.h
include/linux/dsa/tag_qca.h
include/linux/efi.h
include/linux/fb.h
include/linux/fortify-string.h
include/linux/fscrypt.h
include/linux/iommu.h
include/linux/kmsan_string.h [new file with mode: 0644]
include/linux/kvm_host.h
include/linux/mlx5/driver.h
include/linux/net.h
include/linux/netdevice.h
include/linux/overflow.h
include/linux/perf_event.h
include/linux/phylink.h
include/linux/spi/spi-mem.h
include/linux/userfaultfd_k.h
include/linux/utsname.h
include/media/i2c/ir-kbd-i2c.h
include/media/media-device.h
include/media/media-entity.h
include/media/v4l2-common.h
include/media/v4l2-ctrls.h
include/media/v4l2-dev.h
include/media/v4l2-fwnode.h
include/media/v4l2-subdev.h
include/net/genetlink.h
include/net/sock.h
include/net/sock_reuseport.h
include/sound/control.h
include/sound/simple_card_utils.h
include/trace/events/watchdog.h [new file with mode: 0644]
include/uapi/drm/amdgpu_drm.h
include/uapi/drm/panfrost_drm.h
include/uapi/linux/cec-funcs.h
include/uapi/linux/cec.h
include/uapi/linux/perf_event.h
include/uapi/linux/rkisp1-config.h
include/uapi/linux/videodev2.h
init/Kconfig
io_uring/filetable.h
io_uring/io-wq.c
io_uring/io_uring.c
io_uring/io_uring.h
io_uring/msg_ring.c
io_uring/net.c
io_uring/rsrc.c
io_uring/rsrc.h
io_uring/rw.c
ipc/msg.c
kernel/bpf/btf.c
kernel/bpf/cgroup_iter.c
kernel/bpf/dispatcher.c
kernel/bpf/memalloc.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup.c
kernel/events/core.c
kernel/events/ring_buffer.c
kernel/gcov/gcc_4_7.c
kernel/power/hibernate.c
kernel/rcu/tree.c
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/trace/blktrace.c
kernel/trace/bpf_trace.c
kernel/utsname_sysctl.c
lib/Kconfig.debug
lib/kunit/string-stream.c
lib/kunit/test.c
lib/maple_tree.c
lib/overflow_kunit.c
lib/test_rhashtable.c
mm/huge_memory.c
mm/hugetlb.c
mm/kmemleak.c
mm/kmsan/instrumentation.c
mm/kmsan/shadow.c
mm/madvise.c
mm/memory-tiers.c
mm/mempolicy.c
mm/migrate.c
mm/mmap.c
mm/page_alloc.c
mm/page_isolation.c
mm/shmem.c
mm/userfaultfd.c
mm/zsmalloc.c
net/atm/mpoa_proc.c
net/can/j1939/transport.c
net/core/dev.c
net/core/net_namespace.c
net/core/skbuff.c
net/core/skmsg.c
net/core/sock_reuseport.c
net/dsa/slave.c
net/ethtool/eeprom.c
net/ethtool/pse-pd.c
net/hsr/hsr_forward.c
net/ieee802154/socket.c
net/ipv4/datagram.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/netfilter/ipt_rpfilter.c
net/ipv4/netfilter/nft_fib_ipv4.c
net/ipv4/nexthop.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/datagram.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_tunnel.c
net/ipv6/netfilter/ip6t_rpfilter.c
net/ipv6/netfilter/nft_fib_ipv6.c
net/ipv6/sit.c
net/ipv6/udp.c
net/kcm/kcmsock.c
net/mac802154/rx.c
net/mptcp/protocol.c
net/mptcp/protocol.h
net/mptcp/subflow.c
net/netfilter/nf_tables_api.c
net/netlink/genetlink.c
net/openvswitch/datapath.c
net/sched/sch_api.c
net/sched/sch_cake.c
net/sched/sch_fq_codel.c
net/sched/sch_sfb.c
net/smc/smc_core.c
net/tipc/discover.c
net/tipc/topsrv.c
net/tls/tls_strp.c
security/commoncap.c
security/selinux/ss/services.c
security/selinux/ss/sidtab.c
security/selinux/ss/sidtab.h
sound/aoa/soundbus/i2sbus/core.c
sound/core/control.c
sound/pci/ac97/ac97_codec.c
sound/pci/au88x0/au88x0.h
sound/pci/au88x0/au88x0_core.c
sound/pci/ca0106/ca0106_mixer.c
sound/pci/emu10k1/emumixer.c
sound/pci/hda/patch_realtek.c
sound/pci/rme9652/hdsp.c
sound/pci/rme9652/rme9652.c
sound/soc/amd/yc/acp6x-mach.c
sound/soc/codecs/Kconfig
sound/soc/codecs/cx2072x.h
sound/soc/codecs/jz4725b.c
sound/soc/codecs/mt6660.c
sound/soc/codecs/rt1019.c
sound/soc/codecs/rt1019.h
sound/soc/codecs/rt1308-sdw.c
sound/soc/codecs/rt1308-sdw.h
sound/soc/codecs/rt1308.h
sound/soc/codecs/rt5682s.c
sound/soc/codecs/rt5682s.h
sound/soc/codecs/tlv320adc3xxx.c
sound/soc/codecs/wm5102.c
sound/soc/codecs/wm5110.c
sound/soc/codecs/wm8962.c
sound/soc/codecs/wm8997.c
sound/soc/generic/audio-graph-card.c
sound/soc/generic/simple-card-utils.c
sound/soc/generic/simple-card.c
sound/soc/intel/boards/sof_rt5682.c
sound/soc/intel/boards/sof_sdw.c
sound/soc/intel/skylake/skl.c
sound/soc/qcom/Kconfig
sound/soc/qcom/lpass-cpu.c
sound/soc/soc-component.c
sound/soc/sof/intel/hda-codec.c
sound/soc/sof/intel/pci-mtl.c
sound/soc/sof/intel/pci-tgl.c
sound/soc/sof/ipc4-mtrace.c
sound/synth/emux/emux.c
sound/usb/implicit.c
sound/usb/mixer.c
tools/arch/arm64/include/asm/cputype.h
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/lib/memcpy_64.S
tools/build/Makefile.feature
tools/build/feature/Makefile
tools/build/feature/test-libbpf-bpf_program__set_insns.c [new file with mode: 0644]
tools/iio/iio_utils.c
tools/include/uapi/linux/in.h
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/perf_event.h
tools/include/uapi/linux/stat.h
tools/include/uapi/sound/asound.h
tools/perf/Documentation/arm-coresight.txt [moved from tools/perf/Documentation/perf-arm-coresight.txt with 100% similarity]
tools/perf/Makefile.config
tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
tools/perf/builtin-record.c
tools/perf/check-headers.sh
tools/perf/pmu-events/arch/arm64/hisilicon/hip08/metrics.json
tools/perf/pmu-events/arch/powerpc/power10/nest_metrics.json
tools/perf/pmu-events/arch/s390/cf_z16/pai_crypto.json [moved from tools/perf/pmu-events/arch/s390/cf_z16/pai.json with 100% similarity]
tools/perf/tests/shell/test_intel_pt.sh
tools/perf/trace/beauty/statx.c
tools/perf/util/auxtrace.c
tools/perf/util/bpf-event.c
tools/perf/util/bpf-loader.c
tools/perf/util/include/linux/linkage.h
tools/power/pm-graph/README
tools/power/pm-graph/sleepgraph.8
tools/power/pm-graph/sleepgraph.py
tools/testing/selftests/Makefile
tools/testing/selftests/bpf/prog_tests/btf.c
tools/testing/selftests/bpf/progs/user_ringbuf_success.c
tools/testing/selftests/drivers/net/bonding/Makefile
tools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh
tools/testing/selftests/drivers/net/bonding/net_forwarding_lib.sh [new symlink]
tools/testing/selftests/drivers/net/dsa/test_bridge_fdb_stress.sh
tools/testing/selftests/drivers/net/team/Makefile
tools/testing/selftests/drivers/net/team/dev_addr_lists.sh
tools/testing/selftests/drivers/net/team/lag_lib.sh [new symlink]
tools/testing/selftests/drivers/net/team/net_forwarding_lib.sh [new symlink]
tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-eprobe.tc
tools/testing/selftests/futex/functional/Makefile
tools/testing/selftests/intel_pstate/Makefile
tools/testing/selftests/kexec/Makefile
tools/testing/selftests/kvm/aarch64/vgic_init.c
tools/testing/selftests/kvm/memslot_modification_stress_test.c
tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
tools/testing/selftests/lib.mk
tools/testing/selftests/memory-hotplug/mem-on-off-test.sh
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/openvswitch/Makefile [new file with mode: 0644]
tools/testing/selftests/net/openvswitch/openvswitch.sh [new file with mode: 0755]
tools/testing/selftests/net/openvswitch/ovs-dpctl.py [new file with mode: 0644]
tools/testing/selftests/net/test_ingress_egress_chaining.sh [new file with mode: 0644]
tools/testing/selftests/perf_events/sigtrap_threads.c
tools/verification/dot2/dot2c.py
virt/kvm/kvm_main.c
virt/kvm/pfncache.c

index 380378e..fdd7989 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -104,6 +104,7 @@ Christoph Hellwig <hch@lst.de>
 Colin Ian King <colin.i.king@gmail.com> <colin.king@canonical.com>
 Corey Minyard <minyard@acm.org>
 Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Dan Carpenter <error27@gmail.com> <dan.carpenter@oracle.com>
 Daniel Borkmann <daniel@iogearbox.net> <danborkmann@googlemail.com>
 Daniel Borkmann <daniel@iogearbox.net> <danborkmann@iogearbox.net>
 Daniel Borkmann <daniel@iogearbox.net> <daniel.borkmann@tik.ee.ethz.ch>
@@ -353,7 +354,8 @@ Peter Oruba <peter@oruba.de>
 Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
 Praveen BP <praveenbp@ti.com>
 Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
-Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com>
+Qais Yousef <qyousef@layalina.io> <qais.yousef@imgtec.com>
+Qais Yousef <qyousef@layalina.io> <qais.yousef@arm.com>
 Quentin Monnet <quentin@isovalent.com> <quentin.monnet@netronome.com>
 Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
 Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
index 45985e4..721a05b 100644 (file)
@@ -10,7 +10,7 @@ Description:  A collection of all the memory tiers allocated.
 
 
 What:          /sys/devices/virtual/memory_tiering/memory_tierN/
-               /sys/devices/virtual/memory_tiering/memory_tierN/nodes
+               /sys/devices/virtual/memory_tiering/memory_tierN/nodelist
 Date:          August 2022
 Contact:       Linux memory management mailing list <linux-mm@kvack.org>
 Description:   Directory with details of a specific memory tier
@@ -21,5 +21,5 @@ Description:  Directory with details of a specific memory tier
                A smaller value of N implies a higher (faster) memory tier in the
                hierarchy.
 
-               nodes: NUMA nodes that are part of this memory tier.
+               nodelist: NUMA nodes that are part of this memory tier.
 
index 7127768..b078fdb 100644 (file)
@@ -9,7 +9,6 @@ the Linux ACPI support.
    :maxdepth: 1
 
    initrd_table_override
-   dsdt-override
    ssdt-overlays
    cppc_sysfs
    fan_performance_states
index 1a6b913..a65c160 100644 (file)
@@ -141,6 +141,10 @@ root_hash_sig_key_desc <key_description>
     also gain new certificates at run time if they are signed by a certificate
     already in the secondary trusted keyring.
 
+try_verify_in_tasklet
+    If verity hashes are in cache, verify data blocks in kernel tasklet instead
+    of workqueue. This option can reduce IO latency.
+
 Theory of operation
 ===================
 
index 4f680dc..abd90ed 100644 (file)
@@ -1318,7 +1318,7 @@ instance. This setup would require the following commands:
        $ v4l2-ctl -d2 -i2
        $ v4l2-ctl -d2 -c horizontal_movement=4
        $ v4l2-ctl -d1 --overlay=1
-       $ v4l2-ctl -d1 -c loop_video=1
+       $ v4l2-ctl -d0 -c loop_video=1
        $ v4l2-ctl -d2 --stream-mmap --overlay=1
 
 And from another console:
index 2122d1a..ba45c46 100644 (file)
@@ -144,6 +144,42 @@ managing and controlling ublk devices with help of several control commands:
   For retrieving device info via ``ublksrv_ctrl_dev_info``. It is the server's
   responsibility to save IO target specific info in userspace.
 
+- ``UBLK_CMD_START_USER_RECOVERY``
+
+  This command is valid if ``UBLK_F_USER_RECOVERY`` feature is enabled. This
+  command is accepted after the old process has exited, ublk device is quiesced
+  and ``/dev/ublkc*`` is released. User should send this command before he starts
+  a new process which re-opens ``/dev/ublkc*``. When this command returns, the
+  ublk device is ready for the new process.
+
+- ``UBLK_CMD_END_USER_RECOVERY``
+
+  This command is valid if ``UBLK_F_USER_RECOVERY`` feature is enabled. This
+  command is accepted after ublk device is quiesced and a new process has
+  opened ``/dev/ublkc*`` and get all ublk queues be ready. When this command
+  returns, ublk device is unquiesced and new I/O requests are passed to the
+  new process.
+
+- user recovery feature description
+
+  Two new features are added for user recovery: ``UBLK_F_USER_RECOVERY`` and
+  ``UBLK_F_USER_RECOVERY_REISSUE``.
+
+  With ``UBLK_F_USER_RECOVERY`` set, after one ubq_daemon(ublk server's io
+  handler) is dying, ublk does not delete ``/dev/ublkb*`` during the whole
+  recovery stage and ublk device ID is kept. It is ublk server's
+  responsibility to recover the device context by its own knowledge.
+  Requests which have not been issued to userspace are requeued. Requests
+  which have been issued to userspace are aborted.
+
+  With ``UBLK_F_USER_RECOVERY_REISSUE`` set, after one ubq_daemon(ublk
+  server's io handler) is dying, contrary to ``UBLK_F_USER_RECOVERY``,
+  requests which have been issued to userspace are requeued and will be
+  re-issued to the new process after handling ``UBLK_CMD_END_USER_RECOVERY``.
+  ``UBLK_F_USER_RECOVERY_REISSUE`` is designed for backends who tolerate
+  double-write since the driver may issue the same I/O request twice. It
+  might be useful to a read-only FS or a VM backend.
+
 Data plane
 ----------
 
index 0793c40..06f4ab1 100644 (file)
@@ -118,6 +118,12 @@ Text Searching
 CRC and Math Functions in Linux
 ===============================
 
+Arithmetic Overflow Checking
+----------------------------
+
+.. kernel-doc:: include/linux/overflow.h
+   :internal:
+
 CRC Functions
 -------------
 
diff --git a/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.txt b/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.txt
deleted file mode 100644 (file)
index b88dcdd..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-Dongwoon Anatech DW9714 camera voice coil lens driver
-
-DW9174 is a 10-bit DAC with current sink capability. It is intended
-for driving voice coil lenses in camera modules.
-
-Mandatory properties:
-
-- compatible: "dongwoon,dw9714"
-- reg: I²C slave address
diff --git a/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.yaml b/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.yaml
new file mode 100644 (file)
index 0000000..66229a3
--- /dev/null
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/i2c/dongwoon,dw9714.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Dongwoon Anatech DW9714 camera voice coil lens driver
+
+maintainers:
+  - Krzysztof Kozlowski <krzk@kernel.org>
+
+description:
+  DW9174 is a 10-bit DAC with current sink capability. It is intended for
+  driving voice coil lenses in camera modules.
+
+properties:
+  compatible:
+    const: dongwoon,dw9714
+
+  reg:
+    maxItems: 1
+
+  powerdown-gpios:
+    description:
+      XSD pin for shutdown (active low)
+
+  vcc-supply:
+    description: VDD power supply
+
+required:
+  - compatible
+  - reg
+
+additionalProperties: false
+
+examples:
+  - |
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        camera-lens@c {
+            compatible = "dongwoon,dw9714";
+            reg = <0x0c>;
+            vcc-supply = <&reg_csi_1v8>;
+        };
+    };
index 64995cb..41c9760 100644 (file)
@@ -8,7 +8,6 @@ title: Samsung S3FWRN5 NCI NFC Controller
 
 maintainers:
   - Krzysztof Kozlowski <krzk@kernel.org>
-  - Krzysztof Opasiak <k.opasiak@samsung.com>
 
 properties:
   compatible:
index 1e2b9b6..2722dc7 100644 (file)
@@ -274,10 +274,6 @@ patternProperties:
           slew-rate:
             enum: [0, 1]
 
-          output-enable:
-            description:
-              This will internally disable the tri-state for MIO pins.
-
           drive-strength:
             description:
               Selects the drive strength for MIO pins, in mA.
index 3e2dae9..4b4d8e2 100644 (file)
@@ -107,9 +107,6 @@ Kernel utility functions
 .. kernel-doc:: kernel/panic.c
    :export:
 
-.. kernel-doc:: include/linux/overflow.h
-   :internal:
-
 Device Resource Management
 --------------------------
 
index 84aa7cd..400b8ca 100644 (file)
@@ -214,18 +214,29 @@ Link properties can be modified at runtime by calling
 Pipelines and media streams
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
+A media stream is a stream of pixels or metadata originating from one or more
+source devices (such as a sensors) and flowing through media entity pads
+towards the final sinks. The stream can be modified on the route by the
+devices (e.g. scaling or pixel format conversions), or it can be split into
+multiple branches, or multiple branches can be merged.
+
+A media pipeline is a set of media streams which are interdependent. This
+interdependency can be caused by the hardware (e.g. configuration of a second
+stream cannot be changed if the first stream has been enabled) or by the driver
+due to the software design. Most commonly a media pipeline consists of a single
+stream which does not branch.
+
 When starting streaming, drivers must notify all entities in the pipeline to
 prevent link states from being modified during streaming by calling
 :c:func:`media_pipeline_start()`.
 
-The function will mark all entities connected to the given entity through
-enabled links, either directly or indirectly, as streaming.
+The function will mark all the pads which are part of the pipeline as streaming.
 
 The struct media_pipeline instance pointed to by
-the pipe argument will be stored in every entity in the pipeline.
+the pipe argument will be stored in every pad in the pipeline.
 Drivers should embed the struct media_pipeline
 in higher-level pipeline structures and can then access the
-pipeline through the struct media_entity
+pipeline through the struct media_pad
 pipe field.
 
 Calls to :c:func:`media_pipeline_start()` can be nested.
index 3c1b164..6a03edb 100644 (file)
@@ -19,6 +19,8 @@ Supported devices:
 
   Corsair HX1200i
 
+  Corsair HX1500i
+
   Corsair RM550i
 
   Corsair RM650i
index d140070..1fa5ab8 100644 (file)
@@ -319,3 +319,13 @@ unpatched tree to confirm infrastructure didn't mangle it.
 Finally, go back and read
 :ref:`Documentation/process/submitting-patches.rst <submittingpatches>`
 to be sure you are not repeating some common mistake documented there.
+
+My company uses peer feedback in employee performance reviews. Can I ask netdev maintainers for feedback?
+---------------------------------------------------------------------------------------------------------
+
+Yes, especially if you spend significant amount of time reviewing code
+and go out of your way to improve shared infrastructure.
+
+The feedback must be requested by you, the contributor, and will always
+be shared with you (even if you request for it to be submitted to your
+manager).
index 13de01d..15fa175 100644 (file)
@@ -239,6 +239,7 @@ ignore define CEC_OP_FEAT_DEV_HAS_DECK_CONTROL
 ignore define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_RATE
 ignore define CEC_OP_FEAT_DEV_SINK_HAS_ARC_TX
 ignore define CEC_OP_FEAT_DEV_SOURCE_HAS_ARC_RX
+ignore define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_VOLUME_LEVEL
 
 ignore define CEC_MSG_GIVE_FEATURES
 
@@ -487,6 +488,7 @@ ignore define CEC_OP_SYS_AUD_STATUS_ON
 
 ignore define CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST
 ignore define CEC_MSG_SYSTEM_AUDIO_MODE_STATUS
+ignore define CEC_MSG_SET_AUDIO_VOLUME_LEVEL
 
 ignore define CEC_OP_AUD_FMT_ID_CEA861
 ignore define CEC_OP_AUD_FMT_ID_CEA861_CXT
index 9021531..7c8bf16 100644 (file)
@@ -136,9 +136,9 @@ V4L2 functions
 
    operates like the :c:func:`read()` function.
 
-.. c:function:: void v4l2_mmap(void *start, size_t length, int prot, int flags, int fd, int64_t offset);
+.. c:function:: void *v4l2_mmap(void *start, size_t length, int prot, int flags, int fd, int64_t offset);
 
-   operates like the :c:func:`munmap()` function.
+   operates like the :c:func:`mmap()` function.
 
 .. c:function:: int v4l2_munmap(void *_start, size_t length);
 
index cf0f185..6bb0fea 100644 (file)
@@ -4101,6 +4101,7 @@ N:        bcm7038
 N:     bcm7120
 
 BROADCOM BDC DRIVER
+M:     Justin Chen <justinpopo6@gmail.com>
 M:     Al Cooper <alcooperx@gmail.com>
 L:     linux-usb@vger.kernel.org
 R:     Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
@@ -4207,6 +4208,7 @@ F:        Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml
 F:     drivers/tty/serial/8250/8250_bcm7271.c
 
 BROADCOM BRCMSTB USB EHCI DRIVER
+M:     Justin Chen <justinpopo6@gmail.com>
 M:     Al Cooper <alcooperx@gmail.com>
 R:     Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-usb@vger.kernel.org
@@ -4223,6 +4225,7 @@ F:        Documentation/devicetree/bindings/usb/brcm,usb-pinmap.yaml
 F:     drivers/usb/misc/brcmstb-usb-pinmap.c
 
 BROADCOM BRCMSTB USB2 and USB3 PHY DRIVER
+M:     Justin Chen <justinpopo6@gmail.com>
 M:     Al Cooper <alcooperx@gmail.com>
 R:     Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-kernel@vger.kernel.org
@@ -4459,13 +4462,15 @@ M:      Josef Bacik <josef@toxicpanda.com>
 M:     David Sterba <dsterba@suse.com>
 L:     linux-btrfs@vger.kernel.org
 S:     Maintained
-W:     http://btrfs.wiki.kernel.org/
-Q:     http://patchwork.kernel.org/project/linux-btrfs/list/
+W:     https://btrfs.readthedocs.io
+W:     https://btrfs.wiki.kernel.org/
+Q:     https://patchwork.kernel.org/project/linux-btrfs/list/
 C:     irc://irc.libera.chat/btrfs
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux.git
 F:     Documentation/filesystems/btrfs.rst
 F:     fs/btrfs/
 F:     include/linux/btrfs*
+F:     include/trace/events/btrfs.h
 F:     include/uapi/linux/btrfs*
 
 BTTV VIDEO4LINUX DRIVER
@@ -5266,6 +5271,7 @@ F:        tools/testing/selftests/cgroup/
 
 CONTROL GROUP - BLOCK IO CONTROLLER (BLKIO)
 M:     Tejun Heo <tj@kernel.org>
+M:     Josef Bacik <josef@toxicpanda.com>
 M:     Jens Axboe <axboe@kernel.dk>
 L:     cgroups@vger.kernel.org
 L:     linux-block@vger.kernel.org
@@ -5273,6 +5279,7 @@ T:        git git://git.kernel.dk/linux-block
 F:     Documentation/admin-guide/cgroup-v1/blkio-controller.rst
 F:     block/bfq-cgroup.c
 F:     block/blk-cgroup.c
+F:     block/blk-iocost.c
 F:     block/blk-iolatency.c
 F:     block/blk-throttle.c
 F:     include/linux/blk-cgroup.h
@@ -6280,7 +6287,7 @@ M:        Sakari Ailus <sakari.ailus@linux.intel.com>
 L:     linux-media@vger.kernel.org
 S:     Maintained
 T:     git git://linuxtv.org/media_tree.git
-F:     Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.txt
+F:     Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.yaml
 F:     drivers/media/i2c/dw9714.c
 
 DONGWOON DW9768 LENS VOICE COIL DRIVER
@@ -11241,7 +11248,7 @@ L:      kvm@vger.kernel.org
 L:     kvm-riscv@lists.infradead.org
 L:     linux-riscv@lists.infradead.org
 S:     Maintained
-T:     git git://github.com/kvm-riscv/linux.git
+T:     git https://github.com/kvm-riscv/linux.git
 F:     arch/riscv/include/asm/kvm*
 F:     arch/riscv/include/uapi/asm/kvm*
 F:     arch/riscv/kvm/
@@ -11254,7 +11261,6 @@ M:      Claudio Imbrenda <imbrenda@linux.ibm.com>
 R:     David Hildenbrand <david@redhat.com>
 L:     kvm@vger.kernel.org
 S:     Supported
-W:     http://www.ibm.com/developerworks/linux/linux390/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git
 F:     Documentation/virt/kvm/s390*
 F:     arch/s390/include/asm/gmap.h
@@ -14520,7 +14526,7 @@ L:      linux-nilfs@vger.kernel.org
 S:     Supported
 W:     https://nilfs.sourceforge.io/
 W:     https://nilfs.osdn.jp/
-T:     git git://github.com/konis/nilfs2.git
+T:     git https://github.com/konis/nilfs2.git
 F:     Documentation/filesystems/nilfs2.rst
 F:     fs/nilfs2/
 F:     include/trace/events/nilfs2.h
@@ -14709,6 +14715,12 @@ F:     drivers/nvme/target/auth.c
 F:     drivers/nvme/target/fabrics-cmd-auth.c
 F:     include/linux/nvme-auth.h
 
+NVM EXPRESS HARDWARE MONITORING SUPPORT
+M:     Guenter Roeck <linux@roeck-us.net>
+L:     linux-nvme@lists.infradead.org
+S:     Supported
+F:     drivers/nvme/host/hwmon.c
+
 NVM EXPRESS FC TRANSPORT DRIVERS
 M:     James Smart <james.smart@broadcom.com>
 L:     linux-nvme@lists.infradead.org
@@ -15426,6 +15438,7 @@ S:      Maintained
 W:     http://openvswitch.org
 F:     include/uapi/linux/openvswitch.h
 F:     net/openvswitch/
+F:     tools/testing/selftests/net/openvswitch/
 
 OPERATING PERFORMANCE POINTS (OPP)
 M:     Viresh Kumar <vireshk@kernel.org>
@@ -15839,7 +15852,7 @@ F:      Documentation/devicetree/bindings/pci/snps,dw-pcie-ep.yaml
 F:     drivers/pci/controller/dwc/*designware*
 
 PCI DRIVER FOR TI DRA7XX/J721E
-M:     Kishon Vijay Abraham I <kishon@ti.com>
+M:     Vignesh Raghavendra <vigneshr@ti.com>
 L:     linux-omap@vger.kernel.org
 L:     linux-pci@vger.kernel.org
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -15856,10 +15869,10 @@ F:    Documentation/devicetree/bindings/pci/v3-v360epc-pci.txt
 F:     drivers/pci/controller/pci-v3-semi.c
 
 PCI ENDPOINT SUBSYSTEM
-M:     Kishon Vijay Abraham I <kishon@ti.com>
 M:     Lorenzo Pieralisi <lpieralisi@kernel.org>
 R:     Krzysztof Wilczyński <kw@linux.com>
 R:     Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+R:     Kishon Vijay Abraham I <kishon@kernel.org>
 L:     linux-pci@vger.kernel.org
 S:     Supported
 Q:     https://patchwork.kernel.org/project/linux-pci/list/
@@ -16665,6 +16678,7 @@ F:      Documentation/driver-api/ptp.rst
 F:     drivers/net/phy/dp83640*
 F:     drivers/ptp/*
 F:     include/linux/ptp_cl*
+K:     (?:\b|_)ptp(?:\b|_)
 
 PTP VIRTUAL CLOCK SUPPORT
 M:     Yangbo Lu <yangbo.lu@nxp.com>
@@ -17801,7 +17815,7 @@ S:      Odd Fixes
 F:     drivers/tty/serial/rp2.*
 
 ROHM BD99954 CHARGER IC
-R:     Matti Vaittinen <mazziesaccount@gmail.com>
+M:     Matti Vaittinen <mazziesaccount@gmail.com>
 S:     Supported
 F:     drivers/power/supply/bd99954-charger.c
 F:     drivers/power/supply/bd99954-charger.h
@@ -17824,7 +17838,7 @@ F:      drivers/regulator/bd9571mwv-regulator.c
 F:     include/linux/mfd/bd9571mwv.h
 
 ROHM POWER MANAGEMENT IC DEVICE DRIVERS
-R:     Matti Vaittinen <mazziesaccount@gmail.com>
+M:     Matti Vaittinen <mazziesaccount@gmail.com>
 S:     Supported
 F:     drivers/clk/clk-bd718x7.c
 F:     drivers/gpio/gpio-bd71815.c
@@ -17986,7 +18000,6 @@ R:      Christian Borntraeger <borntraeger@linux.ibm.com>
 R:     Sven Schnelle <svens@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
-W:     http://www.ibm.com/developerworks/linux/linux390/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux.git
 F:     Documentation/driver-api/s390-drivers.rst
 F:     Documentation/s390/
@@ -17998,7 +18011,6 @@ M:      Vineeth Vijayan <vneethv@linux.ibm.com>
 M:     Peter Oberparleiter <oberpar@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
-W:     http://www.ibm.com/developerworks/linux/linux390/
 F:     drivers/s390/cio/
 
 S390 DASD DRIVER
@@ -18006,7 +18018,6 @@ M:      Stefan Haberland <sth@linux.ibm.com>
 M:     Jan Hoeppner <hoeppner@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
-W:     http://www.ibm.com/developerworks/linux/linux390/
 F:     block/partitions/ibm.c
 F:     drivers/s390/block/dasd*
 F:     include/linux/dasd_mod.h
@@ -18016,7 +18027,6 @@ M:      Matthew Rosato <mjrosato@linux.ibm.com>
 M:     Gerald Schaefer <gerald.schaefer@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
-W:     http://www.ibm.com/developerworks/linux/linux390/
 F:     drivers/iommu/s390-iommu.c
 
 S390 IUCV NETWORK LAYER
@@ -18025,7 +18035,6 @@ M:      Wenjia Zhang <wenjia@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 L:     netdev@vger.kernel.org
 S:     Supported
-W:     http://www.ibm.com/developerworks/linux/linux390/
 F:     drivers/s390/net/*iucv*
 F:     include/net/iucv/
 F:     net/iucv/
@@ -18036,7 +18045,6 @@ M:      Wenjia Zhang <wenjia@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 L:     netdev@vger.kernel.org
 S:     Supported
-W:     http://www.ibm.com/developerworks/linux/linux390/
 F:     drivers/s390/net/
 
 S390 PCI SUBSYSTEM
@@ -18044,7 +18052,6 @@ M:      Niklas Schnelle <schnelle@linux.ibm.com>
 M:     Gerald Schaefer <gerald.schaefer@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
-W:     http://www.ibm.com/developerworks/linux/linux390/
 F:     arch/s390/pci/
 F:     drivers/pci/hotplug/s390_pci_hpc.c
 F:     Documentation/s390/pci.rst
@@ -18055,7 +18062,6 @@ M:      Halil Pasic <pasic@linux.ibm.com>
 M:     Jason Herne <jjherne@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
-W:     http://www.ibm.com/developerworks/linux/linux390/
 F:     Documentation/s390/vfio-ap*
 F:     drivers/s390/crypto/vfio_ap*
 
@@ -18084,7 +18090,6 @@ S390 ZCRYPT DRIVER
 M:     Harald Freudenberger <freude@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
-W:     http://www.ibm.com/developerworks/linux/linux390/
 F:     drivers/s390/crypto/
 
 S390 ZFCP DRIVER
@@ -18092,7 +18097,6 @@ M:      Steffen Maier <maier@linux.ibm.com>
 M:     Benjamin Block <bblock@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
-W:     http://www.ibm.com/developerworks/linux/linux390/
 F:     drivers/s390/scsi/zfcp_*
 
 S3C ADC BATTERY DRIVER
@@ -18131,7 +18135,6 @@ L:      linux-media@vger.kernel.org
 S:     Maintained
 T:     git git://linuxtv.org/media_tree.git
 F:     drivers/staging/media/deprecated/saa7146/
-F:     include/media/drv-intf/saa7146*
 
 SAFESETID SECURITY MODULE
 M:     Micah Morton <mortonm@chromium.org>
@@ -18211,7 +18214,6 @@ F:      include/media/drv-intf/s3c_camif.h
 
 SAMSUNG S3FWRN5 NFC DRIVER
 M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
-M:     Krzysztof Opasiak <k.opasiak@samsung.com>
 L:     linux-nfc@lists.01.org (subscribers-only)
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
@@ -18666,7 +18668,6 @@ M:      Wenjia Zhang <wenjia@linux.ibm.com>
 M:     Jan Karcher <jaka@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
-W:     http://www.ibm.com/developerworks/linux/linux390/
 F:     net/smc/
 
 SHARP GP2AP002A00F/GP2AP002S00F SENSOR DRIVER
@@ -18777,7 +18778,7 @@ M:      Palmer Dabbelt <palmer@dabbelt.com>
 M:     Paul Walmsley <paul.walmsley@sifive.com>
 L:     linux-riscv@lists.infradead.org
 S:     Supported
-T:     git git://github.com/sifive/riscv-linux.git
+T:     git https://github.com/sifive/riscv-linux.git
 N:     sifive
 K:     [^@]sifive
 
@@ -21181,15 +21182,6 @@ S:     Maintained
 F:     Documentation/usb/ehci.rst
 F:     drivers/usb/host/ehci*
 
-USB GADGET/PERIPHERAL SUBSYSTEM
-M:     Felipe Balbi <balbi@kernel.org>
-L:     linux-usb@vger.kernel.org
-S:     Maintained
-W:     http://www.linux-usb.org/gadget
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
-F:     drivers/usb/gadget/
-F:     include/linux/usb/gadget*
-
 USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...)
 M:     Jiri Kosina <jikos@kernel.org>
 M:     Benjamin Tissoires <benjamin.tissoires@redhat.com>
@@ -21293,16 +21285,9 @@ L:     linux-usb@vger.kernel.org
 L:     netdev@vger.kernel.org
 S:     Maintained
 W:     https://github.com/petkan/pegasus
-T:     git git://github.com/petkan/pegasus.git
+T:     git https://github.com/petkan/pegasus.git
 F:     drivers/net/usb/pegasus.*
 
-USB PHY LAYER
-M:     Felipe Balbi <balbi@kernel.org>
-L:     linux-usb@vger.kernel.org
-S:     Maintained
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
-F:     drivers/usb/phy/
-
 USB PRINTER DRIVER (usblp)
 M:     Pete Zaitcev <zaitcev@redhat.com>
 L:     linux-usb@vger.kernel.org
@@ -21330,7 +21315,7 @@ L:      linux-usb@vger.kernel.org
 L:     netdev@vger.kernel.org
 S:     Maintained
 W:     https://github.com/petkan/rtl8150
-T:     git git://github.com/petkan/rtl8150.git
+T:     git https://github.com/petkan/rtl8150.git
 F:     drivers/net/usb/rtl8150.c
 
 USB SERIAL SUBSYSTEM
@@ -22121,6 +22106,7 @@ F:      Documentation/watchdog/
 F:     drivers/watchdog/
 F:     include/linux/watchdog.h
 F:     include/uapi/linux/watchdog.h
+F:     include/trace/events/watchdog.h
 
 WHISKEYCOVE PMIC GPIO DRIVER
 M:     Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@linux.intel.com>
@@ -22761,7 +22747,7 @@ S:      Maintained
 W:     http://mjpeg.sourceforge.net/driver-zoran/
 Q:     https://patchwork.linuxtv.org/project/linux-media/list/
 F:     Documentation/driver-api/media/drivers/zoran.rst
-F:     drivers/staging/media/zoran/
+F:     drivers/media/pci/zoran/
 
 ZRAM COMPRESSED RAM BLOCK DEVICE DRVIER
 M:     Minchan Kim <minchan@kernel.org>
index f41ec8c..28026d1 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 6
 PATCHLEVEL = 1
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc3
 NAME = Hurr durr I'ma ninja sloth
 
 # *DOCUMENTATION*
index cd1edcf..3434c81 100644 (file)
                        dma-coherent;
                };
 
-               ehci@40000 {
+               usb@40000 {
                        dma-coherent;
                };
 
-               ohci@60000 {
+               usb@60000 {
                        dma-coherent;
                };
 
index 7077938..67556f4 100644 (file)
                        dma-coherent;
                };
 
-               ehci@40000 {
+               usb@40000 {
                        dma-coherent;
                };
 
-               ohci@60000 {
+               usb@60000 {
                        dma-coherent;
                };
 
index 99d3e71..b644353 100644 (file)
                        mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
                };
 
-               ehci@40000 {
+               usb@40000 {
                        compatible = "generic-ehci";
                        reg = < 0x40000 0x100 >;
                        interrupts = < 8 >;
                };
 
-               ohci@60000 {
+               usb@60000 {
                        compatible = "generic-ohci";
                        reg = < 0x60000 0x100 >;
                        interrupts = < 8 >;
index f48ba03..6691f42 100644 (file)
                        };
                };
 
-               ohci@60000 {
+               usb@60000 {
                        compatible = "snps,hsdk-v1.0-ohci", "generic-ohci";
                        reg = <0x60000 0x100>;
                        interrupts = <15>;
                        dma-coherent;
                };
 
-               ehci@40000 {
+               usb@40000 {
                        compatible = "snps,hsdk-v1.0-ehci", "generic-ehci";
                        reg = <0x40000 0x100>;
                        interrupts = <15>;
index cbb1797..90a4120 100644 (file)
@@ -46,7 +46,7 @@
                        clock-names = "stmmaceth";
                };
 
-               ehci@40000 {
+               usb@40000 {
                        compatible = "generic-ehci";
                        reg = < 0x40000 0x100 >;
                        interrupts = < 8 >;
index e31a8eb..8176416 100644 (file)
@@ -35,9 +35,6 @@ CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
 # CONFIG_IPV6 is not set
 CONFIG_DEVTMPFS=y
 # CONFIG_STANDALONE is not set
@@ -99,7 +96,6 @@ CONFIG_NFS_FS=y
 CONFIG_NFS_V3_ACL=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
-# CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_STRIP_ASM_SYMS=y
 CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
index e0e8567..d518127 100644 (file)
@@ -34,9 +34,6 @@ CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
 # CONFIG_IPV6 is not set
 CONFIG_DEVTMPFS=y
 # CONFIG_STANDALONE is not set
@@ -97,7 +94,6 @@ CONFIG_NFS_FS=y
 CONFIG_NFS_V3_ACL=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
-# CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_STRIP_ASM_SYMS=y
 CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
index fcbc952..2f336d9 100644 (file)
@@ -35,9 +35,6 @@ CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
 # CONFIG_IPV6 is not set
 CONFIG_DEVTMPFS=y
 # CONFIG_STANDALONE is not set
@@ -100,7 +97,6 @@ CONFIG_NFS_FS=y
 CONFIG_NFS_V3_ACL=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
-# CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_STRIP_ASM_SYMS=y
 CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
index d87ad7e..899b2fd 100644 (file)
@@ -59,6 +59,5 @@ CONFIG_EXT2_FS_XATTR=y
 CONFIG_TMPFS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 CONFIG_NFS_FS=y
-# CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_DEBUG_MEMORY_INIT=y
 # CONFIG_DEBUG_PREEMPT is not set
index 8d82cdb..0d32aac 100644 (file)
@@ -59,6 +59,5 @@ CONFIG_EXT2_FS_XATTR=y
 CONFIG_TMPFS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 CONFIG_NFS_FS=y
-# CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_SOFTLOCKUP_DETECTOR=y
 # CONFIG_DEBUG_PREEMPT is not set
index f856b03..d18378d 100644 (file)
@@ -85,7 +85,6 @@ CONFIG_NFS_FS=y
 CONFIG_NFS_V3_ACL=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
-# CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_STRIP_ASM_SYMS=y
 CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
index a1ce12b..3e98297 100644 (file)
@@ -56,5 +56,4 @@ CONFIG_EXT2_FS_XATTR=y
 CONFIG_TMPFS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 CONFIG_NFS_FS=y
-# CONFIG_ENABLE_MUST_CHECK is not set
 # CONFIG_DEBUG_PREEMPT is not set
index ca10f4a..502c87f 100644 (file)
@@ -65,4 +65,3 @@ CONFIG_TMPFS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3_ACL=y
-# CONFIG_ENABLE_MUST_CHECK is not set
index 31b6ec3..f721cc3 100644 (file)
@@ -63,4 +63,3 @@ CONFIG_TMPFS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3_ACL=y
-# CONFIG_ENABLE_MUST_CHECK is not set
index 41a0037..1419fc9 100644 (file)
@@ -26,9 +26,6 @@ CONFIG_UNIX=y
 CONFIG_UNIX_DIAG=y
 CONFIG_NET_KEY=y
 CONFIG_INET=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
 # CONFIG_IPV6 is not set
 # CONFIG_WIRELESS is not set
 CONFIG_DEVTMPFS=y
@@ -37,7 +34,6 @@ CONFIG_DEVTMPFS=y
 # CONFIG_BLK_DEV is not set
 CONFIG_NETDEVICES=y
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
 CONFIG_EZCHIP_NPS_MANAGEMENT_ENET=y
 # CONFIG_NET_VENDOR_INTEL is not set
@@ -74,5 +70,5 @@ CONFIG_TMPFS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3_ACL=y
-# CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_FTRACE=y
+# CONFIG_NET_VENDOR_CADENCE is not set
index 4a94d16..6f0d2be 100644 (file)
@@ -35,15 +35,11 @@ CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
 # CONFIG_INET_DIAG is not set
 # CONFIG_IPV6 is not set
 # CONFIG_WIRELESS is not set
 CONFIG_DEVTMPFS=y
 CONFIG_NETDEVICES=y
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
@@ -94,12 +90,11 @@ CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
 CONFIG_STRIP_ASM_SYMS=y
 CONFIG_DEBUG_FS=y
 CONFIG_HEADERS_INSTALL=y
-CONFIG_HEADERS_CHECK=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_DEBUG_STACKOVERFLOW=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_SCHEDSTATS=y
-CONFIG_TIMER_STATS=y
 # CONFIG_CRYPTO_HW is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
index 0c3b214..d3ef189 100644 (file)
@@ -58,8 +58,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
 CONFIG_FB=y
-CONFIG_ARCPGU_RGB888=y
-CONFIG_ARCPGU_DISPTYPE=0
 # CONFIG_VGA_CONSOLE is not set
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
@@ -87,7 +85,6 @@ CONFIG_NFS_FS=y
 CONFIG_NFS_V3_ACL=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
-# CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_STRIP_ASM_SYMS=y
 CONFIG_DEBUG_SHIRQ=y
 CONFIG_SOFTLOCKUP_DETECTOR=y
index f9ad9d3..944b347 100644 (file)
@@ -91,7 +91,6 @@ CONFIG_NFS_FS=y
 CONFIG_NFS_V3_ACL=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
-# CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_STRIP_ASM_SYMS=y
 CONFIG_DEBUG_SHIRQ=y
 CONFIG_SOFTLOCKUP_DETECTOR=y
index bdb7e19..f5a9364 100644 (file)
@@ -82,7 +82,7 @@ static inline __attribute__ ((const)) int fls(unsigned int x)
 /*
  * __fls: Similar to fls, but zero based (0-31)
  */
-static inline __attribute__ ((const)) int __fls(unsigned long x)
+static inline __attribute__ ((const)) unsigned long __fls(unsigned long x)
 {
        if (!x)
                return 0;
@@ -131,7 +131,7 @@ static inline __attribute__ ((const)) int fls(unsigned int x)
 /*
  * __fls: Similar to fls, but zero based (0-31). Also 0 if no bit set
  */
-static inline __attribute__ ((const)) int __fls(unsigned long x)
+static inline __attribute__ ((const)) unsigned long __fls(unsigned long x)
 {
        /* FLS insn has exactly same semantics as the API */
        return  __builtin_arc_fls(x);
index 5aab4f9..67ff06e 100644 (file)
@@ -21,7 +21,7 @@
  *      r25 contains the kernel current task ptr
  *  - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
  *  - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
- *      address Write back load ld.ab instead of seperate ld/add instn
+ *      address Write back load ld.ab instead of separate ld/add instn
  *
  * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
  */
index 8f777d6..8034738 100644 (file)
@@ -32,7 +32,7 @@ static inline void ioport_unmap(void __iomem *addr)
 {
 }
 
-extern void iounmap(const void __iomem *addr);
+extern void iounmap(const volatile void __iomem *addr);
 
 /*
  * io{read,write}{16,32}be() macros
index 64ca25d..ef68758 100644 (file)
 #define pmd_pfn(pmd)           ((pmd_val(pmd) & PAGE_MASK) >> PAGE_SHIFT)
 #define pmd_page(pmd)          virt_to_page(pmd_page_vaddr(pmd))
 #define set_pmd(pmdp, pmd)     (*(pmdp) = pmd)
-#define pmd_pgtable(pmd)       ((pgtable_t) pmd_page_vaddr(pmd))
+#define pmd_pgtable(pmd)       ((pgtable_t) pmd_page(pmd))
 
 /*
  * 4th level paging: pte
index ab9e75e..ad93fe6 100644 (file)
@@ -385,7 +385,7 @@ irqreturn_t do_IPI(int irq, void *dev_id)
  * API called by platform code to hookup arch-common ISR to their IPI IRQ
  *
  * Note: If IPI is provided by platform (vs. say ARC MCIP), their intc setup/map
- * function needs to call call irq_set_percpu_devid() for IPI IRQ, otherwise
+ * function needs to call irq_set_percpu_devid() for IPI IRQ, otherwise
  * request_percpu_irq() below will fail
  */
 static DEFINE_PER_CPU(int, ipi_dev);
index 5446967..55c6de1 100644 (file)
@@ -750,7 +750,7 @@ static inline void arc_slc_enable(void)
  *  -In SMP, if hardware caches are coherent
  *
  * There's a corollary case, where kernel READs from a userspace mapped page.
- * If the U-mapping is not congruent to to K-mapping, former needs flushing.
+ * If the U-mapping is not congruent to K-mapping, former needs flushing.
  */
 void flush_dcache_page(struct page *page)
 {
@@ -910,7 +910,7 @@ EXPORT_SYMBOL(flush_icache_range);
  * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
  *    However in one instance, when called by kprobe (for a breakpt in
  *    builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
- *    use a paddr to index the cache (despite VIPT). This is fine since since a
+ *    use a paddr to index the cache (despite VIPT). This is fine since a
  *    builtin kernel page will not have any virtual mappings.
  *    kprobe on loadable module will be kernel vaddr.
  */
index 0ee75ac..712c231 100644 (file)
@@ -94,7 +94,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
 EXPORT_SYMBOL(ioremap_prot);
 
 
-void iounmap(const void __iomem *addr)
+void iounmap(const volatile void __iomem *addr)
 {
        /* weird double cast to handle phys_addr_t > 32 bits */
        if (arc_uncached_addr_space((phys_addr_t)(u32)addr))
index 1b098bd..3252eb5 100644 (file)
 
 #define KVM_PGTABLE_MAX_LEVELS         4U
 
+/*
+ * The largest supported block sizes for KVM (no 52-bit PA support):
+ *  - 4K (level 1):    1GB
+ *  - 16K (level 2):   32MB
+ *  - 64K (level 2):   512MB
+ */
+#ifdef CONFIG_ARM64_4K_PAGES
+#define KVM_PGTABLE_MIN_BLOCK_LEVEL    1U
+#else
+#define KVM_PGTABLE_MIN_BLOCK_LEVEL    2U
+#endif
+
 static inline u64 kvm_get_parange(u64 mmfr0)
 {
        u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
@@ -58,11 +70,7 @@ static inline u64 kvm_granule_size(u32 level)
 
 static inline bool kvm_level_supports_block_mapping(u32 level)
 {
-       /*
-        * Reject invalid block mappings and don't bother with 4TB mappings for
-        * 52-bit PAs.
-        */
-       return !(level == 0 || (PAGE_SIZE != SZ_4K && level == 1));
+       return level >= KVM_PGTABLE_MIN_BLOCK_LEVEL;
 }
 
 /**
index fe341a6..c8dca8a 100644 (file)
 #include <linux/pgtable.h>
 
 /*
- * PGDIR_SHIFT determines the size a top-level page table entry can map
- * and depends on the number of levels in the page table. Compute the
- * PGDIR_SHIFT for a given number of levels.
- */
-#define pt_levels_pgdir_shift(lvls)    ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - (lvls))
-
-/*
  * The hardware supports concatenation of up to 16 tables at stage2 entry
  * level and we use the feature whenever possible, which means we resolve 4
  * additional bits of address at the entry level.
 #define stage2_pgtable_levels(ipa)     ARM64_HW_PGTABLE_LEVELS((ipa) - 4)
 #define kvm_stage2_levels(kvm)         VTCR_EL2_LVLS(kvm->arch.vtcr)
 
-/* stage2_pgdir_shift() is the size mapped by top-level stage2 entry for the VM */
-#define stage2_pgdir_shift(kvm)                pt_levels_pgdir_shift(kvm_stage2_levels(kvm))
-#define stage2_pgdir_size(kvm)         (1ULL << stage2_pgdir_shift(kvm))
-#define stage2_pgdir_mask(kvm)         ~(stage2_pgdir_size(kvm) - 1)
-
 /*
  * kvm_mmmu_cache_min_pages() is the number of pages required to install
  * a stage-2 translation. We pre-allocate the entry level page table at
  */
 #define kvm_mmu_cache_min_pages(kvm)   (kvm_stage2_levels(kvm) - 1)
 
-static inline phys_addr_t
-stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
-{
-       phys_addr_t boundary = (addr + stage2_pgdir_size(kvm)) & stage2_pgdir_mask(kvm);
-
-       return (boundary - 1 < end - 1) ? boundary : end;
-}
-
 #endif /* __ARM64_S2_PGTABLE_H_ */
index bd5df50..795344a 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/linkage.h>
+#include <linux/cfi_types.h>
 #include <asm/asm-offsets.h>
 #include <asm/assembler.h>
 #include <asm/ftrace.h>
@@ -294,10 +295,14 @@ SYM_FUNC_END(ftrace_graph_caller)
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
 
-SYM_FUNC_START(ftrace_stub)
+SYM_TYPED_FUNC_START(ftrace_stub)
        ret
 SYM_FUNC_END(ftrace_stub)
 
+SYM_TYPED_FUNC_START(ftrace_stub_graph)
+       ret
+SYM_FUNC_END(ftrace_stub_graph)
+
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 /*
  * void return_to_handler(void)
index 687598e..a38dea6 100644 (file)
@@ -5,9 +5,6 @@
 
 incdir := $(srctree)/$(src)/include
 subdir-asflags-y := -I$(incdir)
-subdir-ccflags-y := -I$(incdir)                                \
-                   -fno-stack-protector                \
-                   -DDISABLE_BRANCH_PROFILING          \
-                   $(DISABLE_STACKLEAK_PLUGIN)
+subdir-ccflags-y := -I$(incdir)
 
 obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o
index b5c5119..be0a2bc 100644 (file)
@@ -10,6 +10,9 @@ asflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
 # will explode instantly (Words of Marc Zyngier). So introduce a generic flag
 # __DISABLE_TRACE_MMIO__ to disable MMIO tracing for nVHE KVM.
 ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS -D__DISABLE_TRACE_MMIO__
+ccflags-y += -fno-stack-protector      \
+            -DDISABLE_BRANCH_PROFILING \
+            $(DISABLE_STACKLEAK_PLUGIN)
 
 hostprogs := gen-hyprel
 HOST_EXTRACFLAGS += -I$(objtree)/include
@@ -89,6 +92,10 @@ quiet_cmd_hypcopy = HYPCOPY $@
 # Remove ftrace, Shadow Call Stack, and CFI CFLAGS.
 # This is equivalent to the 'notrace', '__noscs', and '__nocfi' annotations.
 KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS) $(CC_FLAGS_CFI), $(KBUILD_CFLAGS))
+# Starting from 13.0.0 llvm emits SHT_REL section '.llvm.call-graph-profile'
+# when profile optimization is applied. gen-hyprel does not support SHT_REL and
+# causes a build failure. Remove profile optimization flags.
+KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%, $(KBUILD_CFLAGS))
 
 # KVM nVHE code is run at a different exception code with a different map, so
 # compiler instrumentation that inserts callbacks or checks into the code may
index 34c5fee..60ee3d9 100644 (file)
@@ -31,6 +31,13 @@ static phys_addr_t hyp_idmap_vector;
 
 static unsigned long io_map_base;
 
+static phys_addr_t stage2_range_addr_end(phys_addr_t addr, phys_addr_t end)
+{
+       phys_addr_t size = kvm_granule_size(KVM_PGTABLE_MIN_BLOCK_LEVEL);
+       phys_addr_t boundary = ALIGN_DOWN(addr + size, size);
+
+       return (boundary - 1 < end - 1) ? boundary : end;
+}
 
 /*
  * Release kvm_mmu_lock periodically if the memory region is large. Otherwise,
@@ -52,7 +59,7 @@ static int stage2_apply_range(struct kvm *kvm, phys_addr_t addr,
                if (!pgt)
                        return -EINVAL;
 
-               next = stage2_pgd_addr_end(kvm, addr, end);
+               next = stage2_range_addr_end(addr, end);
                ret = fn(pgt, addr, next - addr);
                if (ret)
                        break;
index 24d7778..733b530 100644 (file)
@@ -2149,7 +2149,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
 
        memset(entry, 0, esz);
 
-       while (len > 0) {
+       while (true) {
                int next_offset;
                size_t byte_offset;
 
@@ -2162,6 +2162,9 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
                        return next_offset;
 
                byte_offset = next_offset * esz;
+               if (byte_offset >= len)
+                       break;
+
                id += next_offset;
                gpa += byte_offset;
                len -= byte_offset;
index 6954dc5..7184f1d 100644 (file)
@@ -191,7 +191,7 @@ static inline void flush_thread(void)
 unsigned long __get_wchan(struct task_struct *p);
 
 #define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \
-                        THREAD_SIZE - 32 - sizeof(struct pt_regs))
+                        THREAD_SIZE - sizeof(struct pt_regs))
 #define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk))
 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->csr_era)
 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[3])
index 17838c6..59c4608 100644 (file)
@@ -29,7 +29,7 @@ struct pt_regs {
        unsigned long csr_euen;
        unsigned long csr_ecfg;
        unsigned long csr_estat;
-       unsigned long __last[0];
+       unsigned long __last[];
 } __aligned(8);
 
 static inline int regs_irqs_disabled(struct pt_regs *regs)
@@ -133,7 +133,7 @@ static inline void die_if_kernel(const char *str, struct pt_regs *regs)
 #define current_pt_regs()                                              \
 ({                                                                     \
        unsigned long sp = (unsigned long)__builtin_frame_address(0);   \
-       (struct pt_regs *)((sp | (THREAD_SIZE - 1)) + 1 - 32) - 1;      \
+       (struct pt_regs *)((sp | (THREAD_SIZE - 1)) + 1) - 1;           \
 })
 
 /* Helpers for working with the user stack pointer */
index 9742577..84970e2 100644 (file)
@@ -84,10 +84,9 @@ SYM_CODE_START(kernel_entry)                 # kernel entry point
 
        la.pcrel        tp, init_thread_union
        /* Set the SP after an empty pt_regs.  */
-       PTR_LI          sp, (_THREAD_SIZE - 32 - PT_SIZE)
+       PTR_LI          sp, (_THREAD_SIZE - PT_SIZE)
        PTR_ADD         sp, sp, tp
        set_saved_sp    sp, t0, t1
-       PTR_ADDI        sp, sp, -4 * SZREG      # init stack pointer
 
        bl              start_kernel
        ASM_BUG()
index 1256e35..2526b68 100644 (file)
@@ -129,7 +129,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
        unsigned long clone_flags = args->flags;
        struct pt_regs *childregs, *regs = current_pt_regs();
 
-       childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
+       childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
 
        /* set up new TSS. */
        childregs = (struct pt_regs *) childksp - 1;
@@ -236,7 +236,7 @@ bool in_task_stack(unsigned long stack, struct task_struct *task,
                        struct stack_info *info)
 {
        unsigned long begin = (unsigned long)task_stack_page(task);
-       unsigned long end = begin + THREAD_SIZE - 32;
+       unsigned long end = begin + THREAD_SIZE;
 
        if (stack < begin || stack >= end)
                return false;
index 43ebbc3..202a163 100644 (file)
@@ -26,7 +26,7 @@ SYM_FUNC_START(__switch_to)
        move    tp, a2
        cpu_restore_nonscratch a1
 
-       li.w            t0, _THREAD_SIZE - 32
+       li.w            t0, _THREAD_SIZE
        PTR_ADD         t0, t0, tp
        set_saved_sp    t0, t1, t2
 
index 43f0a98..bdcd0c7 100644 (file)
@@ -279,6 +279,7 @@ static void emit_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
        const u8 t1 = LOONGARCH_GPR_T1;
        const u8 t2 = LOONGARCH_GPR_T2;
        const u8 t3 = LOONGARCH_GPR_T3;
+       const u8 r0 = regmap[BPF_REG_0];
        const u8 src = regmap[insn->src_reg];
        const u8 dst = regmap[insn->dst_reg];
        const s16 off = insn->off;
@@ -359,8 +360,6 @@ static void emit_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
                break;
        /* r0 = atomic_cmpxchg(dst + off, r0, src); */
        case BPF_CMPXCHG:
-               u8 r0 = regmap[BPF_REG_0];
-
                move_reg(ctx, t2, r0);
                if (isdw) {
                        emit_insn(ctx, lld, r0, t1, 0);
@@ -390,8 +389,11 @@ static bool is_signed_bpf_cond(u8 cond)
 
 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool extra_pass)
 {
-       const bool is32 = BPF_CLASS(insn->code) == BPF_ALU ||
-                         BPF_CLASS(insn->code) == BPF_JMP32;
+       u8 tm = -1;
+       u64 func_addr;
+       bool func_addr_fixed;
+       int i = insn - ctx->prog->insnsi;
+       int ret, jmp_offset;
        const u8 code = insn->code;
        const u8 cond = BPF_OP(code);
        const u8 t1 = LOONGARCH_GPR_T1;
@@ -400,8 +402,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
        const u8 dst = regmap[insn->dst_reg];
        const s16 off = insn->off;
        const s32 imm = insn->imm;
-       int jmp_offset;
-       int i = insn - ctx->prog->insnsi;
+       const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
+       const bool is32 = BPF_CLASS(insn->code) == BPF_ALU || BPF_CLASS(insn->code) == BPF_JMP32;
 
        switch (code) {
        /* dst = src */
@@ -724,24 +726,23 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
        case BPF_JMP32 | BPF_JSGE | BPF_K:
        case BPF_JMP32 | BPF_JSLT | BPF_K:
        case BPF_JMP32 | BPF_JSLE | BPF_K:
-               u8 t7 = -1;
                jmp_offset = bpf2la_offset(i, off, ctx);
                if (imm) {
                        move_imm(ctx, t1, imm, false);
-                       t7 = t1;
+                       tm = t1;
                } else {
                        /* If imm is 0, simply use zero register. */
-                       t7 = LOONGARCH_GPR_ZERO;
+                       tm = LOONGARCH_GPR_ZERO;
                }
                move_reg(ctx, t2, dst);
                if (is_signed_bpf_cond(BPF_OP(code))) {
-                       emit_sext_32(ctx, t7, is32);
+                       emit_sext_32(ctx, tm, is32);
                        emit_sext_32(ctx, t2, is32);
                } else {
-                       emit_zext_32(ctx, t7, is32);
+                       emit_zext_32(ctx, tm, is32);
                        emit_zext_32(ctx, t2, is32);
                }
-               if (emit_cond_jmp(ctx, cond, t2, t7, jmp_offset) < 0)
+               if (emit_cond_jmp(ctx, cond, t2, tm, jmp_offset) < 0)
                        goto toofar;
                break;
 
@@ -775,10 +776,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
 
        /* function call */
        case BPF_JMP | BPF_CALL:
-               int ret;
-               u64 func_addr;
-               bool func_addr_fixed;
-
                mark_call(ctx);
                ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
                                            &func_addr, &func_addr_fixed);
@@ -811,8 +808,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
 
        /* dst = imm64 */
        case BPF_LD | BPF_IMM | BPF_DW:
-               u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
-
                move_imm(ctx, dst, imm64, is32);
                return 1;
 
index fab8332..751921f 100644 (file)
@@ -32,6 +32,11 @@ static inline void arch_enter_lazy_mmu_mode(void)
 
        if (radix_enabled())
                return;
+       /*
+        * apply_to_page_range can call us this preempt enabled when
+        * operating on kernel page tables.
+        */
+       preempt_disable();
        batch = this_cpu_ptr(&ppc64_tlb_batch);
        batch->active = 1;
 }
@@ -47,6 +52,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
        if (batch->index)
                __flush_tlb_pending(batch);
        batch->active = 0;
+       preempt_enable();
 }
 
 #define arch_flush_lazy_mmu_mode()      do {} while (0)
index 930e360..2f68fb2 100644 (file)
@@ -813,6 +813,13 @@ kernel_dbg_exc:
        EXCEPTION_COMMON(0x260)
        CHECK_NAPPING()
        addi    r3,r1,STACK_FRAME_OVERHEAD
+       /*
+        * XXX: Returning from performance_monitor_exception taken as a
+        * soft-NMI (Linux irqs disabled) may be risky to use interrupt_return
+        * and could cause bugs in return or elsewhere. That case should just
+        * restore registers and return. There is a workaround for one known
+        * problem in interrupt_exit_kernel_prepare().
+        */
        bl      performance_monitor_exception
        b       interrupt_return
 
index 5381a43..651c36b 100644 (file)
@@ -2357,9 +2357,21 @@ EXC_VIRT_END(performance_monitor, 0x4f00, 0x20)
 EXC_COMMON_BEGIN(performance_monitor_common)
        GEN_COMMON performance_monitor
        addi    r3,r1,STACK_FRAME_OVERHEAD
-       bl      performance_monitor_exception
+       lbz     r4,PACAIRQSOFTMASK(r13)
+       cmpdi   r4,IRQS_ENABLED
+       bne     1f
+       bl      performance_monitor_exception_async
        b       interrupt_return_srr
+1:
+       bl      performance_monitor_exception_nmi
+       /* Clear MSR_RI before setting SRR0 and SRR1. */
+       li      r9,0
+       mtmsrd  r9,1
 
+       kuap_kernel_restore r9, r10
+
+       EXCEPTION_RESTORE_REGS hsrr=0
+       RFI_TO_KERNEL
 
 /**
  * Interrupt 0xf20 - Vector Unavailable Interrupt.
index f9db0a1..fc6631a 100644 (file)
@@ -374,10 +374,18 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
        if (regs_is_unrecoverable(regs))
                unrecoverable_exception(regs);
        /*
-        * CT_WARN_ON comes here via program_check_exception,
-        * so avoid recursion.
+        * CT_WARN_ON comes here via program_check_exception, so avoid
+        * recursion.
+        *
+        * Skip the assertion on PMIs on 64e to work around a problem caused
+        * by NMI PMIs incorrectly taking this interrupt return path, it's
+        * possible for this to hit after interrupt exit to user switches
+        * context to user. See also the comment in the performance monitor
+        * handler in exceptions-64e.S
         */
-       if (TRAP(regs) != INTERRUPT_PROGRAM)
+       if (!IS_ENABLED(CONFIG_PPC_BOOK3E_64) &&
+           TRAP(regs) != INTERRUPT_PROGRAM &&
+           TRAP(regs) != INTERRUPT_PERFMON)
                CT_WARN_ON(ct_state() == CONTEXT_USER);
 
        kuap = kuap_get_and_assert_locked();
index 978a173..a019ed6 100644 (file)
@@ -532,15 +532,24 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
         * Returning to soft-disabled context.
         * Check if a MUST_HARD_MASK interrupt has become pending, in which
         * case we need to disable MSR[EE] in the return context.
+        *
+        * The MSR[EE] check catches among other things the short incoherency
+        * in hard_irq_disable() between clearing MSR[EE] and setting
+        * PACA_IRQ_HARD_DIS.
         */
        ld      r12,_MSR(r1)
        andi.   r10,r12,MSR_EE
        beq     .Lfast_kernel_interrupt_return_\srr\() // EE already disabled
        lbz     r11,PACAIRQHAPPENED(r13)
        andi.   r10,r11,PACA_IRQ_MUST_HARD_MASK
-       beq     .Lfast_kernel_interrupt_return_\srr\() // No HARD_MASK pending
+       bne     1f // HARD_MASK is pending
+       // No HARD_MASK pending, clear possible HARD_DIS set by interrupt
+       andi.   r11,r11,(~PACA_IRQ_HARD_DIS)@l
+       stb     r11,PACAIRQHAPPENED(r13)
+       b       .Lfast_kernel_interrupt_return_\srr\()
+
 
-       /* Must clear MSR_EE from _MSR */
+1:     /* Must clear MSR_EE from _MSR */
 #ifdef CONFIG_PPC_BOOK3S
        li      r10,0
        /* Clear valid before changing _MSR */
index 61cdd78..a9f57da 100644 (file)
@@ -51,6 +51,7 @@ config KVM_BOOK3S_HV_POSSIBLE
 config KVM_BOOK3S_32
        tristate "KVM support for PowerPC book3s_32 processors"
        depends on PPC_BOOK3S_32 && !SMP && !PTE_64BIT
+       depends on !CONTEXT_TRACKING_USER
        select KVM
        select KVM_BOOK3S_32_HANDLER
        select KVM_BOOK3S_PR_POSSIBLE
@@ -105,6 +106,7 @@ config KVM_BOOK3S_64_HV
 config KVM_BOOK3S_64_PR
        tristate "KVM support without using hypervisor mode in host"
        depends on KVM_BOOK3S_64
+       depends on !CONTEXT_TRACKING_USER
        select KVM_BOOK3S_PR_POSSIBLE
        help
          Support running guest kernels in virtual machines on processors
@@ -190,6 +192,7 @@ config KVM_EXIT_TIMING
 config KVM_E500V2
        bool "KVM support for PowerPC E500v2 processors"
        depends on PPC_E500 && !PPC_E500MC
+       depends on !CONTEXT_TRACKING_USER
        select KVM
        select KVM_MMIO
        select MMU_NOTIFIER
@@ -205,6 +208,7 @@ config KVM_E500V2
 config KVM_E500MC
        bool "KVM support for PowerPC E500MC/E5500/E6500 processors"
        depends on PPC_E500MC
+       depends on !CONTEXT_TRACKING_USER
        select KVM
        select KVM_MMIO
        select KVM_BOOKE_HV
index f76a502..d491da8 100644 (file)
@@ -36,7 +36,17 @@ int exit_vmx_usercopy(void)
 {
        disable_kernel_altivec();
        pagefault_enable();
-       preempt_enable();
+       preempt_enable_no_resched();
+       /*
+        * Must never explicitly call schedule (including preempt_enable())
+        * while in a kuap-unlocked user copy, because the AMR register will
+        * not be saved and restored across context switch. However preempt
+        * kernels need to be preempted as soon as possible if need_resched is
+        * set and we are preemptible. The hack here is to schedule a
+        * decrementer to fire here and reschedule for us if necessary.
+        */
+       if (IS_ENABLED(CONFIG_PREEMPT) && need_resched())
+               set_dec(1);
        return 0;
 }
 
index 623a7b7..9342e79 100644 (file)
 
 static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
 
+#ifdef CONFIG_LOCKDEP
+static struct lockdep_map hpte_lock_map =
+       STATIC_LOCKDEP_MAP_INIT("hpte_lock", &hpte_lock_map);
+
+static void acquire_hpte_lock(void)
+{
+       lock_map_acquire(&hpte_lock_map);
+}
+
+static void release_hpte_lock(void)
+{
+       lock_map_release(&hpte_lock_map);
+}
+#else
+static void acquire_hpte_lock(void)
+{
+}
+
+static void release_hpte_lock(void)
+{
+}
+#endif
+
 static inline unsigned long  ___tlbie(unsigned long vpn, int psize,
                                                int apsize, int ssize)
 {
@@ -220,6 +243,7 @@ static inline void native_lock_hpte(struct hash_pte *hptep)
 {
        unsigned long *word = (unsigned long *)&hptep->v;
 
+       acquire_hpte_lock();
        while (1) {
                if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
                        break;
@@ -234,6 +258,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep)
 {
        unsigned long *word = (unsigned long *)&hptep->v;
 
+       release_hpte_lock();
        clear_bit_unlock(HPTE_LOCK_BIT, word);
 }
 
@@ -243,8 +268,11 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
 {
        struct hash_pte *hptep = htab_address + hpte_group;
        unsigned long hpte_v, hpte_r;
+       unsigned long flags;
        int i;
 
+       local_irq_save(flags);
+
        if (!(vflags & HPTE_V_BOLTED)) {
                DBG_LOW("    insert(group=%lx, vpn=%016lx, pa=%016lx,"
                        " rflags=%lx, vflags=%lx, psize=%d)\n",
@@ -263,8 +291,10 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
                hptep++;
        }
 
-       if (i == HPTES_PER_GROUP)
+       if (i == HPTES_PER_GROUP) {
+               local_irq_restore(flags);
                return -1;
+       }
 
        hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
        hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
@@ -286,10 +316,13 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
         * Now set the first dword including the valid bit
         * NOTE: this also unlocks the hpte
         */
+       release_hpte_lock();
        hptep->v = cpu_to_be64(hpte_v);
 
        __asm__ __volatile__ ("ptesync" : : : "memory");
 
+       local_irq_restore(flags);
+
        return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
 }
 
@@ -327,6 +360,7 @@ static long native_hpte_remove(unsigned long hpte_group)
                return -1;
 
        /* Invalidate the hpte. NOTE: this also unlocks it */
+       release_hpte_lock();
        hptep->v = 0;
 
        return i;
@@ -339,6 +373,9 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
        struct hash_pte *hptep = htab_address + slot;
        unsigned long hpte_v, want_v;
        int ret = 0, local = 0;
+       unsigned long irqflags;
+
+       local_irq_save(irqflags);
 
        want_v = hpte_encode_avpn(vpn, bpsize, ssize);
 
@@ -382,6 +419,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
        if (!(flags & HPTE_NOHPTE_UPDATE))
                tlbie(vpn, bpsize, apsize, ssize, local);
 
+       local_irq_restore(irqflags);
+
        return ret;
 }
 
@@ -445,6 +484,9 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
        unsigned long vsid;
        long slot;
        struct hash_pte *hptep;
+       unsigned long flags;
+
+       local_irq_save(flags);
 
        vsid = get_kernel_vsid(ea, ssize);
        vpn = hpt_vpn(ea, vsid, ssize);
@@ -463,6 +505,8 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
         * actual page size will be same.
         */
        tlbie(vpn, psize, psize, ssize, 0);
+
+       local_irq_restore(flags);
 }
 
 /*
@@ -476,6 +520,9 @@ static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
        unsigned long vsid;
        long slot;
        struct hash_pte *hptep;
+       unsigned long flags;
+
+       local_irq_save(flags);
 
        vsid = get_kernel_vsid(ea, ssize);
        vpn = hpt_vpn(ea, vsid, ssize);
@@ -493,6 +540,9 @@ static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
 
        /* Invalidate the TLB */
        tlbie(vpn, psize, psize, ssize, 0);
+
+       local_irq_restore(flags);
+
        return 0;
 }
 
@@ -517,10 +567,11 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
                /* recheck with locks held */
                hpte_v = hpte_get_old_v(hptep);
 
-               if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
+               if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
                        /* Invalidate the hpte. NOTE: this also unlocks it */
+                       release_hpte_lock();
                        hptep->v = 0;
-               else
+               else
                        native_unlock_hpte(hptep);
        }
        /*
@@ -580,10 +631,8 @@ static void native_hugepage_invalidate(unsigned long vsid,
                        hpte_v = hpte_get_old_v(hptep);
 
                        if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
-                               /*
-                                * Invalidate the hpte. NOTE: this also unlocks it
-                                */
-
+                               /* Invalidate the hpte. NOTE: this also unlocks it */
+                               release_hpte_lock();
                                hptep->v = 0;
                        } else
                                native_unlock_hpte(hptep);
@@ -765,8 +814,10 @@ static void native_flush_hash_range(unsigned long number, int local)
 
                        if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
                                native_unlock_hpte(hptep);
-                       else
+                       else {
+                               release_hpte_lock();
                                hptep->v = 0;
+                       }
 
                } pte_iterate_hashed_end();
        }
index 747492e..51f4898 100644 (file)
@@ -404,7 +404,8 @@ EXPORT_SYMBOL_GPL(hash__has_transparent_hugepage);
 
 struct change_memory_parms {
        unsigned long start, end, newpp;
-       unsigned int step, nr_cpus, master_cpu;
+       unsigned int step, nr_cpus;
+       atomic_t master_cpu;
        atomic_t cpu_counter;
 };
 
@@ -478,7 +479,8 @@ static int change_memory_range_fn(void *data)
 {
        struct change_memory_parms *parms = data;
 
-       if (parms->master_cpu != smp_processor_id())
+       // First CPU goes through, all others wait.
+       if (atomic_xchg(&parms->master_cpu, 1) == 1)
                return chmem_secondary_loop(parms);
 
        // Wait for all but one CPU (this one) to call-in
@@ -516,7 +518,7 @@ static bool hash__change_memory_range(unsigned long start, unsigned long end,
                chmem_parms.end = end;
                chmem_parms.step = step;
                chmem_parms.newpp = newpp;
-               chmem_parms.master_cpu = smp_processor_id();
+               atomic_set(&chmem_parms.master_cpu, 0);
 
                cpus_read_lock();
 
index df008ed..6df4c6d 100644 (file)
@@ -1981,7 +1981,7 @@ repeat:
 }
 
 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
-static DEFINE_SPINLOCK(linear_map_hash_lock);
+static DEFINE_RAW_SPINLOCK(linear_map_hash_lock);
 
 static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
 {
@@ -2005,10 +2005,10 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
                                    mmu_linear_psize, mmu_kernel_ssize);
 
        BUG_ON (ret < 0);
-       spin_lock(&linear_map_hash_lock);
+       raw_spin_lock(&linear_map_hash_lock);
        BUG_ON(linear_map_hash_slots[lmi] & 0x80);
        linear_map_hash_slots[lmi] = ret | 0x80;
-       spin_unlock(&linear_map_hash_lock);
+       raw_spin_unlock(&linear_map_hash_lock);
 }
 
 static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
@@ -2018,14 +2018,14 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
        unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
 
        hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
-       spin_lock(&linear_map_hash_lock);
+       raw_spin_lock(&linear_map_hash_lock);
        if (!(linear_map_hash_slots[lmi] & 0x80)) {
-               spin_unlock(&linear_map_hash_lock);
+               raw_spin_unlock(&linear_map_hash_lock);
                return;
        }
        hidx = linear_map_hash_slots[lmi] & 0x7f;
        linear_map_hash_slots[lmi] = 0;
-       spin_unlock(&linear_map_hash_lock);
+       raw_spin_unlock(&linear_map_hash_lock);
        if (hidx & _PTEIDX_SECONDARY)
                hash = ~hash;
        slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
index 507dc0b..63fd925 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/drmem.h>
 
 #include "pseries.h"
+#include "vas.h"       /* pseries_vas_dlpar_cpu() */
 
 /*
  * This isn't a module but we expose that to userspace
@@ -748,6 +749,16 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
                        return -EINVAL;
 
                retval = update_ppp(new_entitled_ptr, NULL);
+
+               if (retval == H_SUCCESS || retval == H_CONSTRAINED) {
+                       /*
+                        * The hypervisor assigns VAS resources based
+                        * on entitled capacity for shared mode.
+                        * Reconfig VAS windows based on DLPAR CPU events.
+                        */
+                       if (pseries_vas_dlpar_cpu() != 0)
+                               retval = H_HARDWARE;
+               }
        } else if (!strcmp(kbuf, "capacity_weight")) {
                char *endp;
                *new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);
index 0e0524c..4ad6e51 100644 (file)
@@ -200,17 +200,42 @@ static irqreturn_t pseries_vas_fault_thread_fn(int irq, void *data)
        struct vas_user_win_ref *tsk_ref;
        int rc;
 
-       rc = h_get_nx_fault(txwin->vas_win.winid, (u64)virt_to_phys(&crb));
-       if (!rc) {
-               tsk_ref = &txwin->vas_win.task_ref;
-               vas_dump_crb(&crb);
-               vas_update_csb(&crb, tsk_ref);
+       while (atomic_read(&txwin->pending_faults)) {
+               rc = h_get_nx_fault(txwin->vas_win.winid, (u64)virt_to_phys(&crb));
+               if (!rc) {
+                       tsk_ref = &txwin->vas_win.task_ref;
+                       vas_dump_crb(&crb);
+                       vas_update_csb(&crb, tsk_ref);
+               }
+               atomic_dec(&txwin->pending_faults);
        }
 
        return IRQ_HANDLED;
 }
 
 /*
+ * irq_default_primary_handler() can be used only with IRQF_ONESHOT
+ * which disables IRQ before executing the thread handler and enables
+ * it after. But this disabling interrupt sets the VAS IRQ OFF
+ * state in the hypervisor. If the NX generates fault interrupt
+ * during this window, the hypervisor will not deliver this
+ * interrupt to the LPAR. So use VAS specific IRQ handler instead
+ * of calling the default primary handler.
+ */
+static irqreturn_t pseries_vas_irq_handler(int irq, void *data)
+{
+       struct pseries_vas_window *txwin = data;
+
+       /*
+        * The thread hanlder will process this interrupt if it is
+        * already running.
+        */
+       atomic_inc(&txwin->pending_faults);
+
+       return IRQ_WAKE_THREAD;
+}
+
+/*
  * Allocate window and setup IRQ mapping.
  */
 static int allocate_setup_window(struct pseries_vas_window *txwin,
@@ -240,8 +265,9 @@ static int allocate_setup_window(struct pseries_vas_window *txwin,
                goto out_irq;
        }
 
-       rc = request_threaded_irq(txwin->fault_virq, NULL,
-                                 pseries_vas_fault_thread_fn, IRQF_ONESHOT,
+       rc = request_threaded_irq(txwin->fault_virq,
+                                 pseries_vas_irq_handler,
+                                 pseries_vas_fault_thread_fn, 0,
                                  txwin->name, txwin);
        if (rc) {
                pr_err("VAS-Window[%d]: Request IRQ(%u) failed with %d\n",
@@ -826,6 +852,25 @@ int vas_reconfig_capabilties(u8 type, int new_nr_creds)
        mutex_unlock(&vas_pseries_mutex);
        return rc;
 }
+
+int pseries_vas_dlpar_cpu(void)
+{
+       int new_nr_creds, rc;
+
+       rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES,
+                                     vascaps[VAS_GZIP_DEF_FEAT_TYPE].feat,
+                                     (u64)virt_to_phys(&hv_cop_caps));
+       if (!rc) {
+               new_nr_creds = be16_to_cpu(hv_cop_caps.target_lpar_creds);
+               rc = vas_reconfig_capabilties(VAS_GZIP_DEF_FEAT_TYPE, new_nr_creds);
+       }
+
+       if (rc)
+               pr_err("Failed reconfig VAS capabilities with DLPAR\n");
+
+       return rc;
+}
+
 /*
  * Total number of default credits available (target_credits)
  * in LPAR depends on number of cores configured. It varies based on
@@ -840,7 +885,15 @@ static int pseries_vas_notifier(struct notifier_block *nb,
        struct of_reconfig_data *rd = data;
        struct device_node *dn = rd->dn;
        const __be32 *intserv = NULL;
-       int new_nr_creds, len, rc = 0;
+       int len;
+
+       /*
+        * For shared CPU partition, the hypervisor assigns total credits
+        * based on entitled core capacity. So updating VAS windows will
+        * be called from lparcfg_write().
+        */
+       if (is_shared_processor())
+               return NOTIFY_OK;
 
        if ((action == OF_RECONFIG_ATTACH_NODE) ||
                (action == OF_RECONFIG_DETACH_NODE))
@@ -852,19 +905,7 @@ static int pseries_vas_notifier(struct notifier_block *nb,
        if (!intserv)
                return NOTIFY_OK;
 
-       rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES,
-                                       vascaps[VAS_GZIP_DEF_FEAT_TYPE].feat,
-                                       (u64)virt_to_phys(&hv_cop_caps));
-       if (!rc) {
-               new_nr_creds = be16_to_cpu(hv_cop_caps.target_lpar_creds);
-               rc = vas_reconfig_capabilties(VAS_GZIP_DEF_FEAT_TYPE,
-                                               new_nr_creds);
-       }
-
-       if (rc)
-               pr_err("Failed reconfig VAS capabilities with DLPAR\n");
-
-       return rc;
+       return pseries_vas_dlpar_cpu();
 }
 
 static struct notifier_block pseries_vas_nb = {
index 333ffa2..7115043 100644 (file)
@@ -132,6 +132,7 @@ struct pseries_vas_window {
        u64 flags;
        char *name;
        int fault_virq;
+       atomic_t pending_faults; /* Number of pending faults */
 };
 
 int sysfs_add_vas_caps(struct vas_cop_feat_caps *caps);
@@ -140,10 +141,15 @@ int __init sysfs_pseries_vas_init(struct vas_all_caps *vas_caps);
 
 #ifdef CONFIG_PPC_VAS
 int vas_migration_handler(int action);
+int pseries_vas_dlpar_cpu(void);
 #else
 static inline int vas_migration_handler(int action)
 {
        return 0;
 }
+static inline int pseries_vas_dlpar_cpu(void)
+{
+       return 0;
+}
 #endif
 #endif /* _VAS_H */
index 6b48a3a..fa78595 100644 (file)
@@ -411,14 +411,16 @@ config RISCV_ISA_SVPBMT
 
           If you don't know what to do here, say Y.
 
-config CC_HAS_ZICBOM
+config TOOLCHAIN_HAS_ZICBOM
        bool
-       default y if 64BIT && $(cc-option,-mabi=lp64 -march=rv64ima_zicbom)
-       default y if 32BIT && $(cc-option,-mabi=ilp32 -march=rv32ima_zicbom)
+       default y
+       depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zicbom)
+       depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zicbom)
+       depends on LLD_VERSION >= 150000 || LD_VERSION >= 23800
 
 config RISCV_ISA_ZICBOM
        bool "Zicbom extension support for non-coherent DMA operation"
-       depends on CC_HAS_ZICBOM
+       depends on TOOLCHAIN_HAS_ZICBOM
        depends on !XIP_KERNEL && MMU
        select RISCV_DMA_NONCOHERENT
        select RISCV_ALTERNATIVE
@@ -433,6 +435,13 @@ config RISCV_ISA_ZICBOM
 
           If you don't know what to do here, say Y.
 
+config TOOLCHAIN_HAS_ZIHINTPAUSE
+       bool
+       default y
+       depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zihintpause)
+       depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zihintpause)
+       depends on LLD_VERSION >= 150000 || LD_VERSION >= 23600
+
 config FPU
        bool "FPU support"
        default y
index 1c8ec65..0d13b59 100644 (file)
@@ -59,12 +59,10 @@ toolchain-need-zicsr-zifencei := $(call cc-option-yn, -march=$(riscv-march-y)_zi
 riscv-march-$(toolchain-need-zicsr-zifencei) := $(riscv-march-y)_zicsr_zifencei
 
 # Check if the toolchain supports Zicbom extension
-toolchain-supports-zicbom := $(call cc-option-yn, -march=$(riscv-march-y)_zicbom)
-riscv-march-$(toolchain-supports-zicbom) := $(riscv-march-y)_zicbom
+riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZICBOM) := $(riscv-march-y)_zicbom
 
 # Check if the toolchain supports Zihintpause extension
-toolchain-supports-zihintpause := $(call cc-option-yn, -march=$(riscv-march-y)_zihintpause)
-riscv-march-$(toolchain-supports-zihintpause) := $(riscv-march-y)_zihintpause
+riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE) := $(riscv-march-y)_zihintpause
 
 KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y))
 KBUILD_AFLAGS += -march=$(riscv-march-y)
index 8a5c246..f6fbe70 100644 (file)
@@ -42,16 +42,8 @@ void flush_icache_mm(struct mm_struct *mm, bool local);
 
 #endif /* CONFIG_SMP */
 
-/*
- * The T-Head CMO errata internally probe the CBOM block size, but otherwise
- * don't depend on Zicbom.
- */
 extern unsigned int riscv_cbom_block_size;
-#ifdef CONFIG_RISCV_ISA_ZICBOM
 void riscv_init_cbom_blocksize(void);
-#else
-static inline void riscv_init_cbom_blocksize(void) { }
-#endif
 
 #ifdef CONFIG_RISCV_DMA_NONCOHERENT
 void riscv_noncoherent_supported(void);
index 38af2ec..6d58bbb 100644 (file)
@@ -14,8 +14,8 @@
 
 #define JUMP_LABEL_NOP_SIZE 4
 
-static __always_inline bool arch_static_branch(struct static_key *key,
-                                              bool branch)
+static __always_inline bool arch_static_branch(struct static_key * const key,
+                                              const bool branch)
 {
        asm_volatile_goto(
                "       .option push                            \n\t"
@@ -35,8 +35,8 @@ label:
        return true;
 }
 
-static __always_inline bool arch_static_branch_jump(struct static_key *key,
-                                                   bool branch)
+static __always_inline bool arch_static_branch_jump(struct static_key * const key,
+                                                   const bool branch)
 {
        asm_volatile_goto(
                "       .option push                            \n\t"
index 0d8fdb8..82f7260 100644 (file)
@@ -45,6 +45,7 @@ int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu);
 int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu);
 void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu);
 void kvm_riscv_guest_timer_init(struct kvm *kvm);
+void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu);
 void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu);
 bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu);
 
index 1e4f8b4..fa70cfe 100644 (file)
@@ -21,7 +21,7 @@ static inline void cpu_relax(void)
                 * Reduce instruction retirement.
                 * This assumes the PC changes.
                 */
-#ifdef __riscv_zihintpause
+#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE
                __asm__ __volatile__ ("pause");
 #else
                /* Encoding of the pause instruction */
index fa427bd..852eccc 100644 (file)
@@ -213,6 +213,9 @@ static void print_mmu(struct seq_file *f)
 
 static void *c_start(struct seq_file *m, loff_t *pos)
 {
+       if (*pos == nr_cpu_ids)
+               return NULL;
+
        *pos = cpumask_next(*pos - 1, cpu_online_mask);
        if ((*pos) < nr_cpu_ids)
                return (void *)(uintptr_t)(1 + *pos);
index a032c4f..71ebbc4 100644 (file)
@@ -708,6 +708,9 @@ void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
                                clear_bit(IRQ_VS_SOFT, &v->irqs_pending);
                }
        }
+
+       /* Sync-up timer CSRs */
+       kvm_riscv_vcpu_timer_sync(vcpu);
 }
 
 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
index 185f238..ad34519 100644 (file)
@@ -320,20 +320,33 @@ void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
        kvm_riscv_vcpu_timer_unblocking(vcpu);
 }
 
-void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
+void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu)
 {
        struct kvm_vcpu_timer *t = &vcpu->arch.timer;
 
        if (!t->sstc_enabled)
                return;
 
-       t = &vcpu->arch.timer;
 #if defined(CONFIG_32BIT)
        t->next_cycles = csr_read(CSR_VSTIMECMP);
        t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32;
 #else
        t->next_cycles = csr_read(CSR_VSTIMECMP);
 #endif
+}
+
+void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
+{
+       struct kvm_vcpu_timer *t = &vcpu->arch.timer;
+
+       if (!t->sstc_enabled)
+               return;
+
+       /*
+        * The vstimecmp CSRs are saved by kvm_riscv_vcpu_timer_sync()
+        * upon every VM exit so no need to save here.
+        */
+
        /* timer should be enabled for the remaining operations */
        if (unlikely(!t->init_done))
                return;
index 6cb7d96..57b40a3 100644 (file)
@@ -3,6 +3,7 @@
  * Copyright (C) 2017 SiFive
  */
 
+#include <linux/of.h>
 #include <asm/cacheflush.h>
 
 #ifdef CONFIG_SMP
@@ -86,3 +87,40 @@ void flush_icache_pte(pte_t pte)
                flush_icache_all();
 }
 #endif /* CONFIG_MMU */
+
+unsigned int riscv_cbom_block_size;
+EXPORT_SYMBOL_GPL(riscv_cbom_block_size);
+
+void riscv_init_cbom_blocksize(void)
+{
+       struct device_node *node;
+       unsigned long cbom_hartid;
+       u32 val, probed_block_size;
+       int ret;
+
+       probed_block_size = 0;
+       for_each_of_cpu_node(node) {
+               unsigned long hartid;
+
+               ret = riscv_of_processor_hartid(node, &hartid);
+               if (ret)
+                       continue;
+
+               /* set block-size for cbom extension if available */
+               ret = of_property_read_u32(node, "riscv,cbom-block-size", &val);
+               if (ret)
+                       continue;
+
+               if (!probed_block_size) {
+                       probed_block_size = val;
+                       cbom_hartid = hartid;
+               } else {
+                       if (probed_block_size != val)
+                               pr_warn("cbom-block-size mismatched between harts %lu and %lu\n",
+                                       cbom_hartid, hartid);
+               }
+       }
+
+       if (probed_block_size)
+               riscv_cbom_block_size = probed_block_size;
+}
index b0add98..d919efa 100644 (file)
@@ -8,13 +8,8 @@
 #include <linux/dma-direct.h>
 #include <linux/dma-map-ops.h>
 #include <linux/mm.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
 #include <asm/cacheflush.h>
 
-unsigned int riscv_cbom_block_size;
-EXPORT_SYMBOL_GPL(riscv_cbom_block_size);
-
 static bool noncoherent_supported;
 
 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
@@ -77,42 +72,6 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
        dev->dma_coherent = coherent;
 }
 
-#ifdef CONFIG_RISCV_ISA_ZICBOM
-void riscv_init_cbom_blocksize(void)
-{
-       struct device_node *node;
-       unsigned long cbom_hartid;
-       u32 val, probed_block_size;
-       int ret;
-
-       probed_block_size = 0;
-       for_each_of_cpu_node(node) {
-               unsigned long hartid;
-
-               ret = riscv_of_processor_hartid(node, &hartid);
-               if (ret)
-                       continue;
-
-               /* set block-size for cbom extension if available */
-               ret = of_property_read_u32(node, "riscv,cbom-block-size", &val);
-               if (ret)
-                       continue;
-
-               if (!probed_block_size) {
-                       probed_block_size = val;
-                       cbom_hartid = hartid;
-               } else {
-                       if (probed_block_size != val)
-                               pr_warn("cbom-block-size mismatched between harts %lu and %lu\n",
-                                       cbom_hartid, hartid);
-               }
-       }
-
-       if (probed_block_size)
-               riscv_cbom_block_size = probed_block_size;
-}
-#endif
-
 void riscv_noncoherent_supported(void)
 {
        WARN(!riscv_cbom_block_size,
index a22e418..e122670 100644 (file)
@@ -113,6 +113,8 @@ static void __init kasan_populate_pud(pgd_t *pgd,
                base_pud = pt_ops.get_pud_virt(pfn_to_phys(_pgd_pfn(*pgd)));
        } else if (pgd_none(*pgd)) {
                base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
+               memcpy(base_pud, (void *)kasan_early_shadow_pud,
+                       sizeof(pud_t) * PTRS_PER_PUD);
        } else {
                base_pud = (pud_t *)pgd_page_vaddr(*pgd);
                if (base_pud == lm_alias(kasan_early_shadow_pud)) {
@@ -173,8 +175,11 @@ static void __init kasan_populate_p4d(pgd_t *pgd,
                base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgd)));
        } else {
                base_p4d = (p4d_t *)pgd_page_vaddr(*pgd);
-               if (base_p4d == lm_alias(kasan_early_shadow_p4d))
+               if (base_p4d == lm_alias(kasan_early_shadow_p4d)) {
                        base_p4d = memblock_alloc(PTRS_PER_PUD * sizeof(p4d_t), PAGE_SIZE);
+                       memcpy(base_p4d, (void *)kasan_early_shadow_p4d,
+                               sizeof(p4d_t) * PTRS_PER_P4D);
+               }
        }
 
        p4dp = base_p4d + p4d_index(vaddr);
index af5c686..fa9d33b 100644 (file)
@@ -102,8 +102,17 @@ SECTIONS
                _compressed_start = .;
                *(.vmlinux.bin.compressed)
                _compressed_end = .;
-               FILL(0xff);
-               . = ALIGN(4096);
+       }
+
+#define SB_TRAILER_SIZE 32
+       /* Trailer needed for Secure Boot */
+       . += SB_TRAILER_SIZE; /* make sure .sb.trailer does not overwrite the previous section */
+       . = ALIGN(4096) - SB_TRAILER_SIZE;
+       .sb.trailer : {
+               QUAD(0)
+               QUAD(0)
+               QUAD(0)
+               QUAD(0x000000207a49504c)
        }
        _end = .;
 
index e08c882..eaeaeb3 100644 (file)
@@ -17,7 +17,8 @@
                "3: jl    1b\n"                                         \
                "   lhi   %0,0\n"                                       \
                "4: sacf  768\n"                                        \
-               EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b)         \
+               EX_TABLE(0b,4b) EX_TABLE(1b,4b)                         \
+               EX_TABLE(2b,4b) EX_TABLE(3b,4b)                         \
                : "=d" (ret), "=&d" (oldval), "=&d" (newval),           \
                  "=m" (*uaddr)                                         \
                : "0" (-EFAULT), "d" (oparg), "a" (uaddr),              \
index d5c7c1e..74b53c5 100644 (file)
@@ -459,6 +459,7 @@ static int paiext_push_sample(void)
                raw.frag.data = cpump->save;
                raw.size = raw.frag.size;
                data.raw = &raw;
+               data.sample_flags |= PERF_SAMPLE_RAW;
        }
 
        overflow = perf_event_overflow(event, &data, &regs);
index 58033df..720036f 100644 (file)
@@ -157,7 +157,7 @@ unsigned long __clear_user(void __user *to, unsigned long size)
        asm volatile(
                "   lr    0,%[spec]\n"
                "0: mvcos 0(%1),0(%4),%0\n"
-               "   jz    4f\n"
+               "6: jz    4f\n"
                "1: algr  %0,%2\n"
                "   slgr  %1,%2\n"
                "   j     0b\n"
@@ -167,11 +167,11 @@ unsigned long __clear_user(void __user *to, unsigned long size)
                "   clgr  %0,%3\n"      /* copy crosses next page boundary? */
                "   jnh   5f\n"
                "3: mvcos 0(%1),0(%4),%3\n"
-               "   slgr  %0,%3\n"
+               "7: slgr  %0,%3\n"
                "   j     5f\n"
                "4: slgr  %0,%0\n"
                "5:\n"
-               EX_TABLE(0b,2b) EX_TABLE(3b,5b)
+               EX_TABLE(0b,2b) EX_TABLE(6b,2b) EX_TABLE(3b,5b) EX_TABLE(7b,5b)
                : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
                : "a" (empty_zero_page), [spec] "d" (spec.val)
                : "cc", "memory", "0");
index 080c886..5880893 100644 (file)
@@ -64,7 +64,7 @@ static inline int __pcistg_mio_inuser(
        asm volatile (
                "       sacf    256\n"
                "0:     llgc    %[tmp],0(%[src])\n"
-               "       sllg    %[val],%[val],8\n"
+               "4:     sllg    %[val],%[val],8\n"
                "       aghi    %[src],1\n"
                "       ogr     %[val],%[tmp]\n"
                "       brctg   %[cnt],0b\n"
@@ -72,7 +72,7 @@ static inline int __pcistg_mio_inuser(
                "2:     ipm     %[cc]\n"
                "       srl     %[cc],28\n"
                "3:     sacf    768\n"
-               EX_TABLE(0b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b)
+               EX_TABLE(0b, 3b) EX_TABLE(4b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b)
                :
                [src] "+a" (src), [cnt] "+d" (cnt),
                [val] "+d" (val), [tmp] "=d" (tmp),
@@ -215,10 +215,10 @@ static inline int __pcilg_mio_inuser(
                "2:     ahi     %[shift],-8\n"
                "       srlg    %[tmp],%[val],0(%[shift])\n"
                "3:     stc     %[tmp],0(%[dst])\n"
-               "       aghi    %[dst],1\n"
+               "5:     aghi    %[dst],1\n"
                "       brctg   %[cnt],2b\n"
                "4:     sacf    768\n"
-               EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b)
+               EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) EX_TABLE(5b, 4b)
                :
                [ioaddr_len] "+&d" (ioaddr_len.pair),
                [cc] "+d" (cc), [val] "=d" (val),
index 6d1879e..67745ce 100644 (file)
@@ -1973,7 +1973,6 @@ config EFI
 config EFI_STUB
        bool "EFI stub support"
        depends on EFI
-       depends on $(cc-option,-mabi=ms) || X86_32
        select RELOCATABLE
        help
          This kernel feature allows a bzImage to be loaded directly
index b7664d0..8fa58b0 100644 (file)
 #include <asm/cpu_device_id.h>
 #include <asm/simd.h>
 
+#define POLYVAL_ALIGN  16
+#define POLYVAL_ALIGN_ATTR __aligned(POLYVAL_ALIGN)
+#define POLYVAL_ALIGN_EXTRA ((POLYVAL_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
+#define POLYVAL_CTX_SIZE (sizeof(struct polyval_tfm_ctx) + POLYVAL_ALIGN_EXTRA)
 #define NUM_KEY_POWERS 8
 
 struct polyval_tfm_ctx {
        /*
         * These powers must be in the order h^8, ..., h^1.
         */
-       u8 key_powers[NUM_KEY_POWERS][POLYVAL_BLOCK_SIZE];
+       u8 key_powers[NUM_KEY_POWERS][POLYVAL_BLOCK_SIZE] POLYVAL_ALIGN_ATTR;
 };
 
 struct polyval_desc_ctx {
@@ -45,6 +49,11 @@ asmlinkage void clmul_polyval_update(const struct polyval_tfm_ctx *keys,
        const u8 *in, size_t nblocks, u8 *accumulator);
 asmlinkage void clmul_polyval_mul(u8 *op1, const u8 *op2);
 
+static inline struct polyval_tfm_ctx *polyval_tfm_ctx(struct crypto_shash *tfm)
+{
+       return PTR_ALIGN(crypto_shash_ctx(tfm), POLYVAL_ALIGN);
+}
+
 static void internal_polyval_update(const struct polyval_tfm_ctx *keys,
        const u8 *in, size_t nblocks, u8 *accumulator)
 {
@@ -72,7 +81,7 @@ static void internal_polyval_mul(u8 *op1, const u8 *op2)
 static int polyval_x86_setkey(struct crypto_shash *tfm,
                        const u8 *key, unsigned int keylen)
 {
-       struct polyval_tfm_ctx *tctx = crypto_shash_ctx(tfm);
+       struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(tfm);
        int i;
 
        if (keylen != POLYVAL_BLOCK_SIZE)
@@ -102,7 +111,7 @@ static int polyval_x86_update(struct shash_desc *desc,
                         const u8 *src, unsigned int srclen)
 {
        struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
-       const struct polyval_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+       const struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(desc->tfm);
        u8 *pos;
        unsigned int nblocks;
        unsigned int n;
@@ -143,7 +152,7 @@ static int polyval_x86_update(struct shash_desc *desc,
 static int polyval_x86_final(struct shash_desc *desc, u8 *dst)
 {
        struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
-       const struct polyval_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+       const struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(desc->tfm);
 
        if (dctx->bytes) {
                internal_polyval_mul(dctx->buffer,
@@ -167,7 +176,7 @@ static struct shash_alg polyval_alg = {
                .cra_driver_name        = "polyval-clmulni",
                .cra_priority           = 200,
                .cra_blocksize          = POLYVAL_BLOCK_SIZE,
-               .cra_ctxsize            = sizeof(struct polyval_tfm_ctx),
+               .cra_ctxsize            = POLYVAL_CTX_SIZE,
                .cra_module             = THIS_MODULE,
        },
 };
index 3271735..4cb710e 100644 (file)
@@ -801,7 +801,7 @@ static void perf_ibs_get_mem_lvl(union ibs_op_data2 *op_data2,
        /* Extension Memory */
        if (ibs_caps & IBS_CAPS_ZEN4 &&
            ibs_data_src == IBS_DATA_SRC_EXT_EXT_MEM) {
-               data_src->mem_lvl_num = PERF_MEM_LVLNUM_EXTN_MEM;
+               data_src->mem_lvl_num = PERF_MEM_LVLNUM_CXL;
                if (op_data2->rmt_node) {
                        data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
                        /* IBS doesn't provide Remote socket detail */
index 4fce1a4..8259d72 100644 (file)
@@ -1596,7 +1596,7 @@ void __init intel_pmu_arch_lbr_init(void)
        return;
 
 clear_arch_lbr:
-       clear_cpu_cap(&boot_cpu_data, X86_FEATURE_ARCH_LBR);
+       setup_clear_cpu_cap(X86_FEATURE_ARCH_LBR);
 }
 
 /**
index 77e3a47..fea544e 100644 (file)
@@ -806,7 +806,11 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE,           &model_skl),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE,           &model_skl),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L,         &model_skl),
+       X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N,         &model_skl),
        X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,    &model_spr),
+       X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE,          &model_skl),
+       X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P,        &model_skl),
+       X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S,        &model_skl),
        {},
 };
 MODULE_DEVICE_TABLE(x86cpu, rapl_model_match);
index 0bef44d..2fd52b6 100644 (file)
@@ -25,8 +25,10 @@ arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
 {
        u64 start = rmrr->base_address;
        u64 end = rmrr->end_address + 1;
+       int entry_type;
 
-       if (e820__mapped_all(start, end, E820_TYPE_RESERVED))
+       entry_type = e820__get_entry_type(start, end);
+       if (entry_type == E820_TYPE_RESERVED || entry_type == E820_TYPE_NVS)
                return 0;
 
        pr_err(FW_BUG "No firmware reserved region can cover this RMRR [%#018Lx-%#018Lx], contact BIOS vendor for fixes\n",
index 3b87d88..888731c 100644 (file)
 /* Even with __builtin_ the compiler may decide to use the out of line
    function. */
 
+#if defined(__SANITIZE_MEMORY__) && defined(__NO_FORTIFY)
+#include <linux/kmsan_string.h>
+#endif
+
 #define __HAVE_ARCH_MEMCPY 1
-#if defined(__SANITIZE_MEMORY__)
+#if defined(__SANITIZE_MEMORY__) && defined(__NO_FORTIFY)
 #undef memcpy
-void *__msan_memcpy(void *dst, const void *src, size_t size);
 #define memcpy __msan_memcpy
 #else
 extern void *memcpy(void *to, const void *from, size_t len);
@@ -21,7 +24,7 @@ extern void *memcpy(void *to, const void *from, size_t len);
 extern void *__memcpy(void *to, const void *from, size_t len);
 
 #define __HAVE_ARCH_MEMSET
-#if defined(__SANITIZE_MEMORY__)
+#if defined(__SANITIZE_MEMORY__) && defined(__NO_FORTIFY)
 extern void *__msan_memset(void *s, int c, size_t n);
 #undef memset
 #define memset __msan_memset
@@ -67,7 +70,7 @@ static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
 }
 
 #define __HAVE_ARCH_MEMMOVE
-#if defined(__SANITIZE_MEMORY__)
+#if defined(__SANITIZE_MEMORY__) && defined(__NO_FORTIFY)
 #undef memmove
 void *__msan_memmove(void *dest, const void *src, size_t len);
 #define memmove __msan_memmove
index 8bc614c..1cc756e 100644 (file)
@@ -254,24 +254,25 @@ extern void __put_user_nocheck_8(void);
 #define __put_user_size(x, ptr, size, label)                           \
 do {                                                                   \
        __typeof__(*(ptr)) __x = (x); /* eval x once */                 \
-       __chk_user_ptr(ptr);                                            \
+       __typeof__(ptr) __ptr = (ptr); /* eval ptr once */              \
+       __chk_user_ptr(__ptr);                                          \
        switch (size) {                                                 \
        case 1:                                                         \
-               __put_user_goto(__x, ptr, "b", "iq", label);            \
+               __put_user_goto(__x, __ptr, "b", "iq", label);          \
                break;                                                  \
        case 2:                                                         \
-               __put_user_goto(__x, ptr, "w", "ir", label);            \
+               __put_user_goto(__x, __ptr, "w", "ir", label);          \
                break;                                                  \
        case 4:                                                         \
-               __put_user_goto(__x, ptr, "l", "ir", label);            \
+               __put_user_goto(__x, __ptr, "l", "ir", label);          \
                break;                                                  \
        case 8:                                                         \
-               __put_user_goto_u64(__x, ptr, label);                   \
+               __put_user_goto_u64(__x, __ptr, label);                 \
                break;                                                  \
        default:                                                        \
                __put_user_bad();                                       \
        }                                                               \
-       instrument_put_user(__x, ptr, size);                            \
+       instrument_put_user(__x, __ptr, size);                          \
 } while (0)
 
 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
index e7410e9..3a35dec 100644 (file)
@@ -440,7 +440,13 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p
                return ret;
 
        native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
-       if (rev >= mc->hdr.patch_id)
+
+       /*
+        * Allow application of the same revision to pick up SMT-specific
+        * changes even if the revision of the other SMT thread is already
+        * up-to-date.
+        */
+       if (rev > mc->hdr.patch_id)
                return ret;
 
        if (!__apply_microcode_amd(mc)) {
@@ -528,8 +534,12 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
 
        native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
 
-       /* Check whether we have saved a new patch already: */
-       if (*new_rev && rev < mc->hdr.patch_id) {
+       /*
+        * Check whether a new patch has been saved already. Also, allow application of
+        * the same revision in order to pick up SMT-thread-specific configuration even
+        * if the sibling SMT thread already has an up-to-date revision.
+        */
+       if (*new_rev && rev <= mc->hdr.patch_id) {
                if (!__apply_microcode_amd(mc)) {
                        *new_rev = mc->hdr.patch_id;
                        return;
index de62b0b..3266ea3 100644 (file)
@@ -66,9 +66,6 @@ struct rdt_hw_resource rdt_resources_all[] = {
                        .rid                    = RDT_RESOURCE_L3,
                        .name                   = "L3",
                        .cache_level            = 3,
-                       .cache = {
-                               .min_cbm_bits   = 1,
-                       },
                        .domains                = domain_init(RDT_RESOURCE_L3),
                        .parse_ctrlval          = parse_cbm,
                        .format_str             = "%d=%0*x",
@@ -83,9 +80,6 @@ struct rdt_hw_resource rdt_resources_all[] = {
                        .rid                    = RDT_RESOURCE_L2,
                        .name                   = "L2",
                        .cache_level            = 2,
-                       .cache = {
-                               .min_cbm_bits   = 1,
-                       },
                        .domains                = domain_init(RDT_RESOURCE_L2),
                        .parse_ctrlval          = parse_cbm,
                        .format_str             = "%d=%0*x",
@@ -836,6 +830,7 @@ static __init void rdt_init_res_defs_intel(void)
                        r->cache.arch_has_sparse_bitmaps = false;
                        r->cache.arch_has_empty_bitmaps = false;
                        r->cache.arch_has_per_cpu_cfg = false;
+                       r->cache.min_cbm_bits = 1;
                } else if (r->rid == RDT_RESOURCE_MBA) {
                        hw_res->msr_base = MSR_IA32_MBA_THRTL_BASE;
                        hw_res->msr_update = mba_wrmsr_intel;
@@ -856,6 +851,7 @@ static __init void rdt_init_res_defs_amd(void)
                        r->cache.arch_has_sparse_bitmaps = true;
                        r->cache.arch_has_empty_bitmaps = true;
                        r->cache.arch_has_per_cpu_cfg = true;
+                       r->cache.min_cbm_bits = 0;
                } else if (r->rid == RDT_RESOURCE_MBA) {
                        hw_res->msr_base = MSR_IA32_MBA_BW_BASE;
                        hw_res->msr_update = mba_wrmsr_amd;
index 132a2de..5e868b6 100644 (file)
@@ -96,6 +96,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
        unsigned int ht_mask_width, core_plus_mask_width, die_plus_mask_width;
        unsigned int core_select_mask, core_level_siblings;
        unsigned int die_select_mask, die_level_siblings;
+       unsigned int pkg_mask_width;
        bool die_level_present = false;
        int leaf;
 
@@ -111,10 +112,10 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
        core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
        core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
        die_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
-       die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
+       pkg_mask_width = die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
 
        sub_index = 1;
-       do {
+       while (true) {
                cpuid_count(leaf, sub_index, &eax, &ebx, &ecx, &edx);
 
                /*
@@ -132,10 +133,15 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
                        die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
                }
 
+               if (LEAFB_SUBTYPE(ecx) != INVALID_TYPE)
+                       pkg_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
+               else
+                       break;
+
                sub_index++;
-       } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
+       }
 
-       core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
+       core_select_mask = (~(-1 << pkg_mask_width)) >> ht_mask_width;
        die_select_mask = (~(-1 << die_plus_mask_width)) >>
                                core_plus_mask_width;
 
@@ -148,7 +154,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
        }
 
        c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid,
-                               die_plus_mask_width);
+                               pkg_mask_width);
        /*
         * Reinit the apicid, now that we have extended initial_apicid.
         */
index 621f4b6..8946f89 100644 (file)
@@ -210,13 +210,6 @@ static void __init fpu__init_system_xstate_size_legacy(void)
        fpstate_reset(&current->thread.fpu);
 }
 
-static void __init fpu__init_init_fpstate(void)
-{
-       /* Bring init_fpstate size and features up to date */
-       init_fpstate.size               = fpu_kernel_cfg.max_size;
-       init_fpstate.xfeatures          = fpu_kernel_cfg.max_features;
-}
-
 /*
  * Called on the boot CPU once per system bootup, to set up the initial
  * FPU state that is later cloned into all processes:
@@ -236,5 +229,4 @@ void __init fpu__init_system(struct cpuinfo_x86 *c)
        fpu__init_system_xstate_size_legacy();
        fpu__init_system_xstate(fpu_kernel_cfg.max_size);
        fpu__init_task_struct_size();
-       fpu__init_init_fpstate();
 }
index c834015..59e543b 100644 (file)
@@ -360,7 +360,7 @@ static void __init setup_init_fpu_buf(void)
 
        print_xstate_features();
 
-       xstate_init_xcomp_bv(&init_fpstate.regs.xsave, fpu_kernel_cfg.max_features);
+       xstate_init_xcomp_bv(&init_fpstate.regs.xsave, init_fpstate.xfeatures);
 
        /*
         * Init all the features state with header.xfeatures being 0x0
@@ -678,20 +678,6 @@ static unsigned int __init get_xsave_size_user(void)
        return ebx;
 }
 
-/*
- * Will the runtime-enumerated 'xstate_size' fit in the init
- * task's statically-allocated buffer?
- */
-static bool __init is_supported_xstate_size(unsigned int test_xstate_size)
-{
-       if (test_xstate_size <= sizeof(init_fpstate.regs))
-               return true;
-
-       pr_warn("x86/fpu: xstate buffer too small (%zu < %d), disabling xsave\n",
-                       sizeof(init_fpstate.regs), test_xstate_size);
-       return false;
-}
-
 static int __init init_xstate_size(void)
 {
        /* Recompute the context size for enabled features: */
@@ -717,10 +703,6 @@ static int __init init_xstate_size(void)
        kernel_default_size =
                xstate_calculate_size(fpu_kernel_cfg.default_features, compacted);
 
-       /* Ensure we have the space to store all default enabled features. */
-       if (!is_supported_xstate_size(kernel_default_size))
-               return -EINVAL;
-
        if (!paranoid_xstate_size_valid(kernel_size))
                return -EINVAL;
 
@@ -875,6 +857,19 @@ void __init fpu__init_system_xstate(unsigned int legacy_size)
        update_regset_xstate_info(fpu_user_cfg.max_size,
                                  fpu_user_cfg.max_features);
 
+       /*
+        * init_fpstate excludes dynamic states as they are large but init
+        * state is zero.
+        */
+       init_fpstate.size               = fpu_kernel_cfg.default_size;
+       init_fpstate.xfeatures          = fpu_kernel_cfg.default_features;
+
+       if (init_fpstate.size > sizeof(init_fpstate.regs)) {
+               pr_warn("x86/fpu: init_fpstate buffer too small (%zu < %d), disabling XSAVE\n",
+                       sizeof(init_fpstate.regs), init_fpstate.size);
+               goto out_disable;
+       }
+
        setup_init_fpu_buf();
 
        /*
@@ -1130,6 +1125,15 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
         */
        mask = fpstate->user_xfeatures;
 
+       /*
+        * Dynamic features are not present in init_fpstate. When they are
+        * in an all zeros init state, remove those from 'mask' to zero
+        * those features in the user buffer instead of retrieving them
+        * from init_fpstate.
+        */
+       if (fpu_state_size_dynamic())
+               mask &= (header.xfeatures | xinit->header.xcomp_bv);
+
        for_each_extended_xfeature(i, mask) {
                /*
                 * If there was a feature or alignment gap, zero the space
index dfeb227..2a4be92 100644 (file)
@@ -4,6 +4,7 @@
  */
 
 #include <linux/linkage.h>
+#include <linux/cfi_types.h>
 #include <asm/ptrace.h>
 #include <asm/ftrace.h>
 #include <asm/export.h>
 
        .endm
 
+SYM_TYPED_FUNC_START(ftrace_stub)
+       RET
+SYM_FUNC_END(ftrace_stub)
+
+SYM_TYPED_FUNC_START(ftrace_stub_graph)
+       RET
+SYM_FUNC_END(ftrace_stub_graph)
+
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 SYM_FUNC_START(__fentry__)
@@ -172,21 +181,10 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
         */
 SYM_INNER_LABEL(ftrace_caller_end, SYM_L_GLOBAL)
        ANNOTATE_NOENDBR
-
-       jmp ftrace_epilogue
+       RET
 SYM_FUNC_END(ftrace_caller);
 STACK_FRAME_NON_STANDARD_FP(ftrace_caller)
 
-SYM_FUNC_START(ftrace_epilogue)
-/*
- * This is weak to keep gas from relaxing the jumps.
- */
-SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK)
-       UNWIND_HINT_FUNC
-       ENDBR
-       RET
-SYM_FUNC_END(ftrace_epilogue)
-
 SYM_FUNC_START(ftrace_regs_caller)
        /* Save the current flags before any operations that can change them */
        pushfq
@@ -262,14 +260,11 @@ SYM_INNER_LABEL(ftrace_regs_caller_jmp, SYM_L_GLOBAL)
        popfq
 
        /*
-        * As this jmp to ftrace_epilogue can be a short jump
-        * it must not be copied into the trampoline.
-        * The trampoline will add the code to jump
-        * to the return.
+        * The trampoline will add the return.
         */
 SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
        ANNOTATE_NOENDBR
-       jmp ftrace_epilogue
+       RET
 
        /* Swap the flags with orig_rax */
 1:     movq MCOUNT_REG_SIZE(%rsp), %rdi
@@ -280,7 +275,7 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
        /* Restore flags */
        popfq
        UNWIND_HINT_FUNC
-       jmp     ftrace_epilogue
+       RET
 
 SYM_FUNC_END(ftrace_regs_caller)
 STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller)
@@ -291,9 +286,6 @@ STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller)
 SYM_FUNC_START(__fentry__)
        cmpq $ftrace_stub, ftrace_trace_function
        jnz trace
-
-SYM_INNER_LABEL(ftrace_stub, SYM_L_GLOBAL)
-       ENDBR
        RET
 
 trace:
index 0ea57da..c059820 100644 (file)
@@ -713,7 +713,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
        /* Otherwise, skip ahead to the user-specified starting frame: */
        while (!unwind_done(state) &&
               (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
-                       state->sp < (unsigned long)first_frame))
+                       state->sp <= (unsigned long)first_frame))
                unwind_next_frame(state);
 
        return;
index 7065462..0810e93 100644 (file)
@@ -1133,11 +1133,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
                        entry->eax = max(entry->eax, 0x80000021);
                break;
        case 0x80000001:
+               entry->ebx &= ~GENMASK(27, 16);
                cpuid_entry_override(entry, CPUID_8000_0001_EDX);
                cpuid_entry_override(entry, CPUID_8000_0001_ECX);
                break;
        case 0x80000006:
-               /* L2 cache and TLB: pass through host info. */
+               /* Drop reserved bits, pass host L2 cache and TLB info. */
+               entry->edx &= ~GENMASK(17, 16);
                break;
        case 0x80000007: /* Advanced power management */
                /* invariant TSC is CPUID.80000007H:EDX[8] */
@@ -1167,6 +1169,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
                        g_phys_as = phys_as;
 
                entry->eax = g_phys_as | (virt_as << 8);
+               entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
                entry->edx = 0;
                cpuid_entry_override(entry, CPUID_8000_0008_EBX);
                break;
@@ -1186,6 +1189,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
                entry->ecx = entry->edx = 0;
                break;
        case 0x8000001a:
+               entry->eax &= GENMASK(2, 0);
+               entry->ebx = entry->ecx = entry->edx = 0;
+               break;
        case 0x8000001e:
                break;
        case 0x8000001F:
@@ -1193,7 +1199,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
                        entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
                } else {
                        cpuid_entry_override(entry, CPUID_8000_001F_EAX);
-
+                       /* Clear NumVMPL since KVM does not support VMPL.  */
+                       entry->ebx &= ~GENMASK(31, 12);
                        /*
                         * Enumerate '0' for "PA bits reduction", the adjusted
                         * MAXPHYADDR is enumerated directly (see 0x80000008).
index cfed36a..c139035 100644 (file)
@@ -158,11 +158,16 @@ out:
 static int kvm_mmu_rmaps_stat_open(struct inode *inode, struct file *file)
 {
        struct kvm *kvm = inode->i_private;
+       int r;
 
        if (!kvm_get_kvm_safe(kvm))
                return -ENOENT;
 
-       return single_open(file, kvm_mmu_rmaps_stat_show, kvm);
+       r = single_open(file, kvm_mmu_rmaps_stat_show, kvm);
+       if (r < 0)
+               kvm_put_kvm(kvm);
+
+       return r;
 }
 
 static int kvm_mmu_rmaps_stat_release(struct inode *inode, struct file *file)
index 3b27622..4a43261 100644 (file)
@@ -791,8 +791,7 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
                           ctxt->mode, linear);
 }
 
-static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
-                            enum x86emul_mode mode)
+static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
 {
        ulong linear;
        int rc;
@@ -802,41 +801,71 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
 
        if (ctxt->op_bytes != sizeof(unsigned long))
                addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
-       rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
+       rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear);
        if (rc == X86EMUL_CONTINUE)
                ctxt->_eip = addr.ea;
        return rc;
 }
 
+static inline int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt)
+{
+       u64 efer;
+       struct desc_struct cs;
+       u16 selector;
+       u32 base3;
+
+       ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
+
+       if (!(ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PE)) {
+               /* Real mode. cpu must not have long mode active */
+               if (efer & EFER_LMA)
+                       return X86EMUL_UNHANDLEABLE;
+               ctxt->mode = X86EMUL_MODE_REAL;
+               return X86EMUL_CONTINUE;
+       }
+
+       if (ctxt->eflags & X86_EFLAGS_VM) {
+               /* Protected/VM86 mode. cpu must not have long mode active */
+               if (efer & EFER_LMA)
+                       return X86EMUL_UNHANDLEABLE;
+               ctxt->mode = X86EMUL_MODE_VM86;
+               return X86EMUL_CONTINUE;
+       }
+
+       if (!ctxt->ops->get_segment(ctxt, &selector, &cs, &base3, VCPU_SREG_CS))
+               return X86EMUL_UNHANDLEABLE;
+
+       if (efer & EFER_LMA) {
+               if (cs.l) {
+                       /* Proper long mode */
+                       ctxt->mode = X86EMUL_MODE_PROT64;
+               } else if (cs.d) {
+                       /* 32 bit compatibility mode*/
+                       ctxt->mode = X86EMUL_MODE_PROT32;
+               } else {
+                       ctxt->mode = X86EMUL_MODE_PROT16;
+               }
+       } else {
+               /* Legacy 32 bit / 16 bit mode */
+               ctxt->mode = cs.d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
+       }
+
+       return X86EMUL_CONTINUE;
+}
+
 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
 {
-       return assign_eip(ctxt, dst, ctxt->mode);
+       return assign_eip(ctxt, dst);
 }
 
-static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
-                         const struct desc_struct *cs_desc)
+static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst)
 {
-       enum x86emul_mode mode = ctxt->mode;
-       int rc;
+       int rc = emulator_recalc_and_set_mode(ctxt);
 
-#ifdef CONFIG_X86_64
-       if (ctxt->mode >= X86EMUL_MODE_PROT16) {
-               if (cs_desc->l) {
-                       u64 efer = 0;
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
 
-                       ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
-                       if (efer & EFER_LMA)
-                               mode = X86EMUL_MODE_PROT64;
-               } else
-                       mode = X86EMUL_MODE_PROT32; /* temporary value */
-       }
-#endif
-       if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
-               mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
-       rc = assign_eip(ctxt, dst, mode);
-       if (rc == X86EMUL_CONTINUE)
-               ctxt->mode = mode;
-       return rc;
+       return assign_eip(ctxt, dst);
 }
 
 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
@@ -2172,7 +2201,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
-       rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
+       rc = assign_eip_far(ctxt, ctxt->src.val);
        /* Error handling is not implemented. */
        if (rc != X86EMUL_CONTINUE)
                return X86EMUL_UNHANDLEABLE;
@@ -2250,7 +2279,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
                                       &new_desc);
        if (rc != X86EMUL_CONTINUE)
                return rc;
-       rc = assign_eip_far(ctxt, eip, &new_desc);
+       rc = assign_eip_far(ctxt, eip);
        /* Error handling is not implemented. */
        if (rc != X86EMUL_CONTINUE)
                return X86EMUL_UNHANDLEABLE;
@@ -2432,7 +2461,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
        ctxt->eflags =             GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
        ctxt->_eip =               GET_SMSTATE(u32, smstate, 0x7ff0);
 
-       for (i = 0; i < NR_EMULATOR_GPRS; i++)
+       for (i = 0; i < 8; i++)
                *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
 
        val = GET_SMSTATE(u32, smstate, 0x7fcc);
@@ -2489,7 +2518,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
        u16 selector;
        int i, r;
 
-       for (i = 0; i < NR_EMULATOR_GPRS; i++)
+       for (i = 0; i < 16; i++)
                *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
 
        ctxt->_eip   = GET_SMSTATE(u64, smstate, 0x7f78);
@@ -2633,7 +2662,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
         * those side effects need to be explicitly handled for both success
         * and shutdown.
         */
-       return X86EMUL_CONTINUE;
+       return emulator_recalc_and_set_mode(ctxt);
 
 emulate_shutdown:
        ctxt->ops->triple_fault(ctxt);
@@ -2876,6 +2905,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
        ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
 
        ctxt->_eip = rdx;
+       ctxt->mode = usermode;
        *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
 
        return X86EMUL_CONTINUE;
@@ -3469,7 +3499,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
-       rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
+       rc = assign_eip_far(ctxt, ctxt->src.val);
        if (rc != X86EMUL_CONTINUE)
                goto fail;
 
@@ -3611,11 +3641,25 @@ static int em_movbe(struct x86_emulate_ctxt *ctxt)
 
 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
 {
-       if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
+       int cr_num = ctxt->modrm_reg;
+       int r;
+
+       if (ctxt->ops->set_cr(ctxt, cr_num, ctxt->src.val))
                return emulate_gp(ctxt, 0);
 
        /* Disable writeback. */
        ctxt->dst.type = OP_NONE;
+
+       if (cr_num == 0) {
+               /*
+                * CR0 write might have updated CR0.PE and/or CR0.PG
+                * which can affect the cpu's execution mode.
+                */
+               r = emulator_recalc_and_set_mode(ctxt);
+               if (r != X86EMUL_CONTINUE)
+                       return r;
+       }
+
        return X86EMUL_CONTINUE;
 }
 
index 9dba04b..65f092e 100644 (file)
@@ -8263,6 +8263,11 @@ static __init int hardware_setup(void)
        if (!cpu_has_virtual_nmis())
                enable_vnmi = 0;
 
+#ifdef CONFIG_X86_SGX_KVM
+       if (!cpu_has_vmx_encls_vmexit())
+               enable_sgx = false;
+#endif
+
        /*
         * set_apic_access_page_addr() is used to reload apic access
         * page upon invalidation.  No need to do anything if not
index 4bd5f8a..521b433 100644 (file)
@@ -2315,11 +2315,11 @@ static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time,
 
        /* we verify if the enable bit is set... */
        if (system_time & 1) {
-               kvm_gfn_to_pfn_cache_init(vcpu->kvm, &vcpu->arch.pv_time, vcpu,
-                                         KVM_HOST_USES_PFN, system_time & ~1ULL,
-                                         sizeof(struct pvclock_vcpu_time_info));
+               kvm_gpc_activate(vcpu->kvm, &vcpu->arch.pv_time, vcpu,
+                                KVM_HOST_USES_PFN, system_time & ~1ULL,
+                                sizeof(struct pvclock_vcpu_time_info));
        } else {
-               kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, &vcpu->arch.pv_time);
+               kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.pv_time);
        }
 
        return;
@@ -3388,7 +3388,7 @@ static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data)
 
 static void kvmclock_reset(struct kvm_vcpu *vcpu)
 {
-       kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, &vcpu->arch.pv_time);
+       kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.pv_time);
        vcpu->arch.time = 0;
 }
 
@@ -6442,26 +6442,22 @@ static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
        return 0;
 }
 
-static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
+static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm,
+                                      struct kvm_msr_filter *filter)
 {
-       struct kvm_msr_filter __user *user_msr_filter = argp;
        struct kvm_x86_msr_filter *new_filter, *old_filter;
-       struct kvm_msr_filter filter;
        bool default_allow;
        bool empty = true;
        int r = 0;
        u32 i;
 
-       if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
-               return -EFAULT;
-
-       if (filter.flags & ~KVM_MSR_FILTER_DEFAULT_DENY)
+       if (filter->flags & ~KVM_MSR_FILTER_DEFAULT_DENY)
                return -EINVAL;
 
-       for (i = 0; i < ARRAY_SIZE(filter.ranges); i++)
-               empty &= !filter.ranges[i].nmsrs;
+       for (i = 0; i < ARRAY_SIZE(filter->ranges); i++)
+               empty &= !filter->ranges[i].nmsrs;
 
-       default_allow = !(filter.flags & KVM_MSR_FILTER_DEFAULT_DENY);
+       default_allow = !(filter->flags & KVM_MSR_FILTER_DEFAULT_DENY);
        if (empty && !default_allow)
                return -EINVAL;
 
@@ -6469,8 +6465,8 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
        if (!new_filter)
                return -ENOMEM;
 
-       for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
-               r = kvm_add_msr_filter(new_filter, &filter.ranges[i]);
+       for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) {
+               r = kvm_add_msr_filter(new_filter, &filter->ranges[i]);
                if (r) {
                        kvm_free_msr_filter(new_filter);
                        return r;
@@ -6493,6 +6489,62 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
        return 0;
 }
 
+#ifdef CONFIG_KVM_COMPAT
+/* for KVM_X86_SET_MSR_FILTER */
+struct kvm_msr_filter_range_compat {
+       __u32 flags;
+       __u32 nmsrs;
+       __u32 base;
+       __u32 bitmap;
+};
+
+struct kvm_msr_filter_compat {
+       __u32 flags;
+       struct kvm_msr_filter_range_compat ranges[KVM_MSR_FILTER_MAX_RANGES];
+};
+
+#define KVM_X86_SET_MSR_FILTER_COMPAT _IOW(KVMIO, 0xc6, struct kvm_msr_filter_compat)
+
+long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
+                             unsigned long arg)
+{
+       void __user *argp = (void __user *)arg;
+       struct kvm *kvm = filp->private_data;
+       long r = -ENOTTY;
+
+       switch (ioctl) {
+       case KVM_X86_SET_MSR_FILTER_COMPAT: {
+               struct kvm_msr_filter __user *user_msr_filter = argp;
+               struct kvm_msr_filter_compat filter_compat;
+               struct kvm_msr_filter filter;
+               int i;
+
+               if (copy_from_user(&filter_compat, user_msr_filter,
+                                  sizeof(filter_compat)))
+                       return -EFAULT;
+
+               filter.flags = filter_compat.flags;
+               for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
+                       struct kvm_msr_filter_range_compat *cr;
+
+                       cr = &filter_compat.ranges[i];
+                       filter.ranges[i] = (struct kvm_msr_filter_range) {
+                               .flags = cr->flags,
+                               .nmsrs = cr->nmsrs,
+                               .base = cr->base,
+                               .bitmap = (__u8 *)(ulong)cr->bitmap,
+                       };
+               }
+
+               r = kvm_vm_ioctl_set_msr_filter(kvm, &filter);
+               break;
+       }
+       }
+
+       return r;
+}
+#endif
+
 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
 static int kvm_arch_suspend_notifier(struct kvm *kvm)
 {
@@ -6915,9 +6967,16 @@ set_pit2_out:
        case KVM_SET_PMU_EVENT_FILTER:
                r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp);
                break;
-       case KVM_X86_SET_MSR_FILTER:
-               r = kvm_vm_ioctl_set_msr_filter(kvm, argp);
+       case KVM_X86_SET_MSR_FILTER: {
+               struct kvm_msr_filter __user *user_msr_filter = argp;
+               struct kvm_msr_filter filter;
+
+               if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
+                       return -EFAULT;
+
+               r = kvm_vm_ioctl_set_msr_filter(kvm, &filter);
                break;
+       }
        default:
                r = -ENOTTY;
        }
@@ -9985,7 +10044,20 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
            kvm_x86_ops.nested_ops->has_events(vcpu))
                *req_immediate_exit = true;
 
-       WARN_ON(kvm_is_exception_pending(vcpu));
+       /*
+        * KVM must never queue a new exception while injecting an event; KVM
+        * is done emulating and should only propagate the to-be-injected event
+        * to the VMCS/VMCB.  Queueing a new exception can put the vCPU into an
+        * infinite loop as KVM will bail from VM-Enter to inject the pending
+        * exception and start the cycle all over.
+        *
+        * Exempt triple faults as they have special handling and won't put the
+        * vCPU into an infinite loop.  Triple fault can be queued when running
+        * VMX without unrestricted guest, as that requires KVM to emulate Real
+        * Mode events (see kvm_inject_realmode_interrupt()).
+        */
+       WARN_ON_ONCE(vcpu->arch.exception.pending ||
+                    vcpu->arch.exception_vmexit.pending);
        return 0;
 
 out:
@@ -11757,6 +11829,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
        vcpu->arch.regs_avail = ~0;
        vcpu->arch.regs_dirty = ~0;
 
+       kvm_gpc_init(&vcpu->arch.pv_time);
+
        if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
                vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
        else
index 93c628d..2dae413 100644 (file)
@@ -42,13 +42,13 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
        int idx = srcu_read_lock(&kvm->srcu);
 
        if (gfn == GPA_INVALID) {
-               kvm_gfn_to_pfn_cache_destroy(kvm, gpc);
+               kvm_gpc_deactivate(kvm, gpc);
                goto out;
        }
 
        do {
-               ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, KVM_HOST_USES_PFN,
-                                               gpa, PAGE_SIZE);
+               ret = kvm_gpc_activate(kvm, gpc, NULL, KVM_HOST_USES_PFN, gpa,
+                                      PAGE_SIZE);
                if (ret)
                        goto out;
 
@@ -554,15 +554,15 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
                             offsetof(struct compat_vcpu_info, time));
 
                if (data->u.gpa == GPA_INVALID) {
-                       kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, &vcpu->arch.xen.vcpu_info_cache);
+                       kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.vcpu_info_cache);
                        r = 0;
                        break;
                }
 
-               r = kvm_gfn_to_pfn_cache_init(vcpu->kvm,
-                                             &vcpu->arch.xen.vcpu_info_cache,
-                                             NULL, KVM_HOST_USES_PFN, data->u.gpa,
-                                             sizeof(struct vcpu_info));
+               r = kvm_gpc_activate(vcpu->kvm,
+                                    &vcpu->arch.xen.vcpu_info_cache, NULL,
+                                    KVM_HOST_USES_PFN, data->u.gpa,
+                                    sizeof(struct vcpu_info));
                if (!r)
                        kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
 
@@ -570,16 +570,16 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
 
        case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
                if (data->u.gpa == GPA_INVALID) {
-                       kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
-                                                    &vcpu->arch.xen.vcpu_time_info_cache);
+                       kvm_gpc_deactivate(vcpu->kvm,
+                                          &vcpu->arch.xen.vcpu_time_info_cache);
                        r = 0;
                        break;
                }
 
-               r = kvm_gfn_to_pfn_cache_init(vcpu->kvm,
-                                             &vcpu->arch.xen.vcpu_time_info_cache,
-                                             NULL, KVM_HOST_USES_PFN, data->u.gpa,
-                                             sizeof(struct pvclock_vcpu_time_info));
+               r = kvm_gpc_activate(vcpu->kvm,
+                                    &vcpu->arch.xen.vcpu_time_info_cache,
+                                    NULL, KVM_HOST_USES_PFN, data->u.gpa,
+                                    sizeof(struct pvclock_vcpu_time_info));
                if (!r)
                        kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
                break;
@@ -590,16 +590,15 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
                        break;
                }
                if (data->u.gpa == GPA_INVALID) {
-                       kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
-                                                    &vcpu->arch.xen.runstate_cache);
+                       kvm_gpc_deactivate(vcpu->kvm,
+                                          &vcpu->arch.xen.runstate_cache);
                        r = 0;
                        break;
                }
 
-               r = kvm_gfn_to_pfn_cache_init(vcpu->kvm,
-                                             &vcpu->arch.xen.runstate_cache,
-                                             NULL, KVM_HOST_USES_PFN, data->u.gpa,
-                                             sizeof(struct vcpu_runstate_info));
+               r = kvm_gpc_activate(vcpu->kvm, &vcpu->arch.xen.runstate_cache,
+                                    NULL, KVM_HOST_USES_PFN, data->u.gpa,
+                                    sizeof(struct vcpu_runstate_info));
                break;
 
        case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
@@ -1667,18 +1666,18 @@ static int kvm_xen_eventfd_assign(struct kvm *kvm,
        case EVTCHNSTAT_ipi:
                /* IPI  must map back to the same port# */
                if (data->u.evtchn.deliver.port.port != data->u.evtchn.send_port)
-                       goto out; /* -EINVAL */
+                       goto out_noeventfd; /* -EINVAL */
                break;
 
        case EVTCHNSTAT_interdomain:
                if (data->u.evtchn.deliver.port.port) {
                        if (data->u.evtchn.deliver.port.port >= max_evtchn_port(kvm))
-                               goto out; /* -EINVAL */
+                               goto out_noeventfd; /* -EINVAL */
                } else {
                        eventfd = eventfd_ctx_fdget(data->u.evtchn.deliver.eventfd.fd);
                        if (IS_ERR(eventfd)) {
                                ret = PTR_ERR(eventfd);
-                               goto out;
+                               goto out_noeventfd;
                        }
                }
                break;
@@ -1718,6 +1717,7 @@ static int kvm_xen_eventfd_assign(struct kvm *kvm,
 out:
        if (eventfd)
                eventfd_ctx_put(eventfd);
+out_noeventfd:
        kfree(evtchnfd);
        return ret;
 }
@@ -1816,7 +1816,12 @@ void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.xen.vcpu_id = vcpu->vcpu_idx;
        vcpu->arch.xen.poll_evtchn = 0;
+
        timer_setup(&vcpu->arch.xen.poll_timer, cancel_evtchn_poll, 0);
+
+       kvm_gpc_init(&vcpu->arch.xen.runstate_cache);
+       kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache);
+       kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache);
 }
 
 void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
@@ -1824,18 +1829,17 @@ void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
        if (kvm_xen_timer_enabled(vcpu))
                kvm_xen_stop_timer(vcpu);
 
-       kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
-                                    &vcpu->arch.xen.runstate_cache);
-       kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
-                                    &vcpu->arch.xen.vcpu_info_cache);
-       kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
-                                    &vcpu->arch.xen.vcpu_time_info_cache);
+       kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.runstate_cache);
+       kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.vcpu_info_cache);
+       kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.vcpu_time_info_cache);
+
        del_timer_sync(&vcpu->arch.xen.poll_timer);
 }
 
 void kvm_xen_init_vm(struct kvm *kvm)
 {
        idr_init(&kvm->arch.xen.evtchn_ports);
+       kvm_gpc_init(&kvm->arch.xen.shinfo_cache);
 }
 
 void kvm_xen_destroy_vm(struct kvm *kvm)
@@ -1843,7 +1847,7 @@ void kvm_xen_destroy_vm(struct kvm *kvm)
        struct evtchnfd *evtchnfd;
        int i;
 
-       kvm_gfn_to_pfn_cache_destroy(kvm, &kvm->arch.xen.shinfo_cache);
+       kvm_gpc_deactivate(kvm, &kvm->arch.xen.shinfo_cache);
 
        idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
                if (!evtchnfd->deliver.port.port)
index 97342c4..2e5a045 100644 (file)
@@ -587,6 +587,10 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star
 {
        unsigned long end;
 
+       /* Kernel text is rw at boot up */
+       if (system_state == SYSTEM_BOOTING)
+               return new;
+
        /*
         * 32-bit has some unfixable W+X issues, like EFI code
         * and writeable data being in the same page.  Disable
index 9962042..00127ab 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/bpf.h>
 #include <linux/memory.h>
 #include <linux/sort.h>
+#include <linux/init.h>
 #include <asm/extable.h>
 #include <asm/set_memory.h>
 #include <asm/nospec-branch.h>
@@ -388,6 +389,18 @@ out:
        return ret;
 }
 
+int __init bpf_arch_init_dispatcher_early(void *ip)
+{
+       const u8 *nop_insn = x86_nops[5];
+
+       if (is_endbr(*(u32 *)ip))
+               ip += ENDBR_INSN_SIZE;
+
+       if (memcmp(ip, nop_insn, X86_PATCH_SIZE))
+               text_poke_early(ip, nop_insn, X86_PATCH_SIZE);
+       return 0;
+}
+
 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
                       void *old_addr, void *new_addr)
 {
index 58a200d..17f09dc 100644 (file)
@@ -26,6 +26,7 @@ GCOV_PROFILE  := n
 KASAN_SANITIZE := n
 UBSAN_SANITIZE := n
 KCSAN_SANITIZE := n
+KMSAN_SANITIZE := n
 KCOV_INSTRUMENT := n
 
 # These are adjustments to the compiler flags used for objects that
index 64ee618..71f7216 100644 (file)
@@ -369,12 +369,8 @@ struct bfq_queue {
        unsigned long split_time; /* time of last split */
 
        unsigned long first_IO_time; /* time of first I/O for this queue */
-
        unsigned long creation_time; /* when this queue is created */
 
-       /* max service rate measured so far */
-       u32 max_service_rate;
-
        /*
         * Pointer to the waker queue for this queue, i.e., to the
         * queue Q such that this queue happens to get new I/O right
index 633a902..57c2f32 100644 (file)
@@ -741,7 +741,7 @@ void bio_put(struct bio *bio)
                        return;
        }
 
-       if (bio->bi_opf & REQ_ALLOC_CACHE) {
+       if ((bio->bi_opf & REQ_ALLOC_CACHE) && !WARN_ON_ONCE(in_interrupt())) {
                struct bio_alloc_cache *cache;
 
                bio_uninit(bio);
index 8070b6c..75c8296 100644 (file)
@@ -611,6 +611,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
                .nr_tags        = 1,
        };
        u64 alloc_time_ns = 0;
+       struct request *rq;
        unsigned int cpu;
        unsigned int tag;
        int ret;
@@ -660,8 +661,12 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
        tag = blk_mq_get_tag(&data);
        if (tag == BLK_MQ_NO_TAG)
                goto out_queue_exit;
-       return blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag,
+       rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag,
                                        alloc_time_ns);
+       rq->__data_len = 0;
+       rq->__sector = (sector_t) -1;
+       rq->bio = rq->biotail = NULL;
+       return rq;
 
 out_queue_exit:
        blk_queue_exit(q);
@@ -3112,8 +3117,11 @@ static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
        struct page *page;
        unsigned long flags;
 
-       /* There is no need to clear a driver tags own mapping */
-       if (drv_tags == tags)
+       /*
+        * There is no need to clear mapping if driver tags is not initialized
+        * or the mapping belongs to the driver tags.
+        */
+       if (!drv_tags || drv_tags == tags)
                return;
 
        list_for_each_entry(page, &tags->page_list, lru) {
index 17b33c6..fee90eb 100644 (file)
@@ -410,9 +410,10 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
         * Otherwise just allocate the device numbers for both the whole device
         * and all partitions from the extended dev_t space.
         */
+       ret = -EINVAL;
        if (disk->major) {
                if (WARN_ON(!disk->minors))
-                       return -EINVAL;
+                       goto out_exit_elevator;
 
                if (disk->minors > DISK_MAX_PARTS) {
                        pr_err("block: can't allocate more than %d partitions\n",
@@ -420,14 +421,14 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
                        disk->minors = DISK_MAX_PARTS;
                }
                if (disk->first_minor + disk->minors > MINORMASK + 1)
-                       return -EINVAL;
+                       goto out_exit_elevator;
        } else {
                if (WARN_ON(disk->minors))
-                       return -EINVAL;
+                       goto out_exit_elevator;
 
                ret = blk_alloc_ext_minor();
                if (ret < 0)
-                       return ret;
+                       goto out_exit_elevator;
                disk->major = BLOCK_EXT_MAJOR;
                disk->first_minor = ret;
        }
@@ -540,6 +541,9 @@ out_device_del:
 out_free_ext_minor:
        if (disk->major == BLOCK_EXT_MAJOR)
                blk_free_ext_minor(disk->first_minor);
+out_exit_elevator:
+       if (disk->queue->elevator)
+               elevator_exit(disk->queue);
        return ret;
 }
 EXPORT_SYMBOL(device_add_disk);
index 72f1fb7..e648158 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/ratelimit.h>
 #include <linux/edac.h>
 #include <linux/ras.h>
+#include <acpi/ghes.h>
 #include <asm/cpu.h>
 #include <asm/mce.h>
 
@@ -138,8 +139,8 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
        int     cpu = mce->extcpu;
        struct acpi_hest_generic_status *estatus, *tmp;
        struct acpi_hest_generic_data *gdata;
-       const guid_t *fru_id = &guid_null;
-       char *fru_text = "";
+       const guid_t *fru_id;
+       char *fru_text;
        guid_t *sec_type;
        static u32 err_seq;
 
@@ -160,17 +161,23 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
 
        /* log event via trace */
        err_seq++;
-       gdata = (struct acpi_hest_generic_data *)(tmp + 1);
-       if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
-               fru_id = (guid_t *)gdata->fru_id;
-       if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
-               fru_text = gdata->fru_text;
-       sec_type = (guid_t *)gdata->section_type;
-       if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
-               struct cper_sec_mem_err *mem = (void *)(gdata + 1);
-               if (gdata->error_data_length >= sizeof(*mem))
-                       trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
-                                              (u8)gdata->error_severity);
+       apei_estatus_for_each_section(tmp, gdata) {
+               if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
+                       fru_id = (guid_t *)gdata->fru_id;
+               else
+                       fru_id = &guid_null;
+               if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
+                       fru_text = gdata->fru_text;
+               else
+                       fru_text = "";
+               sec_type = (guid_t *)gdata->section_type;
+               if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
+                       struct cper_sec_mem_err *mem = (void *)(gdata + 1);
+
+                       if (gdata->error_data_length >= sizeof(*mem))
+                               trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
+                                                      (u8)gdata->error_severity);
+               }
        }
 
 out:
index ee4ce5b..3e252be 100644 (file)
@@ -27,7 +27,7 @@
  * Arbitrary retries in case the remote processor is slow to respond
  * to PCC commands
  */
-#define PCC_CMD_WAIT_RETRIES_NUM       500
+#define PCC_CMD_WAIT_RETRIES_NUM       500ULL
 
 struct pcc_data {
        struct pcc_mbox_chan *pcc_chan;
index 80ad530..9952f3a 100644 (file)
@@ -163,7 +163,7 @@ static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx)
        clear_fixmap(fixmap_idx);
 }
 
-int ghes_estatus_pool_init(int num_ghes)
+int ghes_estatus_pool_init(unsigned int num_ghes)
 {
        unsigned long addr, len;
        int rc;
index ca2aed8..8059baf 100644 (file)
@@ -1142,7 +1142,8 @@ static void iort_iommu_msi_get_resv_regions(struct device *dev,
                        struct iommu_resv_region *region;
 
                        region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
-                                                        prot, IOMMU_RESV_MSI);
+                                                        prot, IOMMU_RESV_MSI,
+                                                        GFP_KERNEL);
                        if (region)
                                list_add_tail(&region->list, head);
                }
index c8385ef..4e3db20 100644 (file)
@@ -323,6 +323,7 @@ struct pci_dev *acpi_get_pci_dev(acpi_handle handle)
 
        list_for_each_entry(pn, &adev->physical_node_list, node) {
                if (dev_is_pci(pn->dev)) {
+                       get_device(pn->dev);
                        pci_dev = to_pci_dev(pn->dev);
                        break;
                }
index 6f9489e..f27914a 100644 (file)
@@ -425,6 +425,24 @@ static const struct dmi_system_id asus_laptop[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "S5402ZA"),
                },
        },
+       {
+               .ident = "Asus Vivobook S5602ZA",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
+               },
+       },
+       { }
+};
+
+static const struct dmi_system_id lenovo_82ra[] = {
+       {
+               .ident = "LENOVO IdeaPad Flex 5 16ALC7",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "82RA"),
+               },
+       },
        { }
 };
 
@@ -434,11 +452,14 @@ struct irq_override_cmp {
        unsigned char triggering;
        unsigned char polarity;
        unsigned char shareable;
+       bool override;
 };
 
-static const struct irq_override_cmp skip_override_table[] = {
-       { medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0 },
-       { asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0 },
+static const struct irq_override_cmp override_table[] = {
+       { medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
+       { asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
+       { lenovo_82ra, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
+       { lenovo_82ra, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
 };
 
 static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
@@ -446,6 +467,17 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
 {
        int i;
 
+       for (i = 0; i < ARRAY_SIZE(override_table); i++) {
+               const struct irq_override_cmp *entry = &override_table[i];
+
+               if (dmi_check_system(entry->system) &&
+                   entry->irq == gsi &&
+                   entry->triggering == triggering &&
+                   entry->polarity == polarity &&
+                   entry->shareable == shareable)
+                       return entry->override;
+       }
+
 #ifdef CONFIG_X86
        /*
         * IRQ override isn't needed on modern AMD Zen systems and
@@ -456,17 +488,6 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
                return false;
 #endif
 
-       for (i = 0; i < ARRAY_SIZE(skip_override_table); i++) {
-               const struct irq_override_cmp *entry = &skip_override_table[i];
-
-               if (dmi_check_system(entry->system) &&
-                   entry->irq == gsi &&
-                   entry->triggering == triggering &&
-                   entry->polarity == polarity &&
-                   entry->shareable == shareable)
-                       return false;
-       }
-
        return true;
 }
 
@@ -498,8 +519,11 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
                u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
 
                if (triggering != trig || polarity != pol) {
-                       pr_warn("ACPI: IRQ %d override to %s, %s\n", gsi,
-                               t ? "level" : "edge", p ? "low" : "high");
+                       pr_warn("ACPI: IRQ %d override to %s%s, %s%s\n", gsi,
+                               t ? "level" : "edge",
+                               trig == triggering ? "" : "(!)",
+                               p ? "low" : "high",
+                               pol == polarity ? "" : "(!)");
                        triggering = trig;
                        polarity = pol;
                }
index 558664d..b47e93a 100644 (file)
@@ -789,6 +789,7 @@ static bool acpi_info_matches_ids(struct acpi_device_info *info,
 static const char * const acpi_ignore_dep_ids[] = {
        "PNP0D80", /* Windows-compatible System Power Management Controller */
        "INT33BD", /* Intel Baytrail Mailbox Device */
+       "LATT2021", /* Lattice FW Update Client Driver */
        NULL
 };
 
@@ -1509,9 +1510,12 @@ int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map)
                        goto out;
                }
 
+               *map = r;
+
                list_for_each_entry(rentry, &list, node) {
                        if (rentry->res->start >= rentry->res->end) {
-                               kfree(r);
+                               kfree(*map);
+                               *map = NULL;
                                ret = -EINVAL;
                                dev_dbg(dma_dev, "Invalid DMA regions configuration\n");
                                goto out;
@@ -1523,8 +1527,6 @@ int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map)
                        r->offset = rentry->offset;
                        r++;
                }
-
-               *map = r;
        }
  out:
        acpi_dev_free_resource_list(&list);
index 0d9064a..9cd8797 100644 (file)
@@ -668,6 +668,11 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
        { },
 };
 
+static bool google_cros_ec_present(void)
+{
+       return acpi_dev_found("GOOG0004");
+}
+
 /*
  * Determine which type of backlight interface to use on this system,
  * First check cmdline, then dmi quirks, then do autodetect.
@@ -730,6 +735,13 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
                        return acpi_backlight_video;
        }
 
+       /*
+        * Chromebooks that don't have backlight handle in ACPI table
+        * are supposed to use native backlight if it's available.
+        */
+       if (google_cros_ec_present() && native_available)
+               return acpi_backlight_native;
+
        /* No ACPI video (old hw), use vendor specific fw methods. */
        return acpi_backlight_vendor;
 }
index da7ee8b..7add8e7 100644 (file)
@@ -257,7 +257,7 @@ enum {
        PCS_7                           = 0x94, /* 7+ port PCS (Denverton) */
 
        /* em constants */
-       EM_MAX_SLOTS                    = 8,
+       EM_MAX_SLOTS                    = SATA_PMP_MAX_PORTS,
        EM_MAX_RETRY                    = 5,
 
        /* em_ctl bits */
index f61795c..6f216eb 100644 (file)
@@ -448,7 +448,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
        if (!of_id)
                return -ENODEV;
 
-       priv->version = (enum brcm_ahci_version)of_id->data;
+       priv->version = (unsigned long)of_id->data;
        priv->dev = dev;
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "top-ctrl");
index b734e06..a950767 100644 (file)
@@ -1067,7 +1067,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
        imxpriv->ahci_pdev = pdev;
        imxpriv->no_device = false;
        imxpriv->first_time = true;
-       imxpriv->type = (enum ahci_imx_type)of_id->data;
+       imxpriv->type = (unsigned long)of_id->data;
 
        imxpriv->sata_clk = devm_clk_get(dev, "sata");
        if (IS_ERR(imxpriv->sata_clk)) {
@@ -1235,4 +1235,4 @@ module_platform_driver(imx_ahci_driver);
 MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver");
 MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("ahci:imx");
+MODULE_ALIAS("platform:" DRV_NAME);
index 6cd6184..9cf9bf3 100644 (file)
@@ -280,7 +280,7 @@ static int ahci_qoriq_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        if (of_id)
-               qoriq_priv->type = (enum ahci_qoriq_type)of_id->data;
+               qoriq_priv->type = (unsigned long)of_id->data;
        else
                qoriq_priv->type = (enum ahci_qoriq_type)acpi_id->driver_data;
 
index 5a2cac6..8607b68 100644 (file)
@@ -236,7 +236,7 @@ static struct platform_driver st_ahci_driver = {
        .driver = {
                .name = DRV_NAME,
                .pm = &st_ahci_pm_ops,
-               .of_match_table = of_match_ptr(st_ahci_match),
+               .of_match_table = st_ahci_match,
        },
        .probe = st_ahci_probe,
        .remove = ata_platform_remove_one,
index 7bb5db1..1e08704 100644 (file)
@@ -785,7 +785,7 @@ static int xgene_ahci_probe(struct platform_device *pdev)
        of_devid = of_match_device(xgene_ahci_of_match, dev);
        if (of_devid) {
                if (of_devid->data)
-                       version = (enum xgene_ahci_version) of_devid->data;
+                       version = (unsigned long) of_devid->data;
        }
 #ifdef CONFIG_ACPI
        else {
index 590ebea..0195eb2 100644 (file)
@@ -875,7 +875,7 @@ static int sata_rcar_probe(struct platform_device *pdev)
        if (!priv)
                return -ENOMEM;
 
-       priv->type = (enum sata_rcar_type)of_device_get_match_data(dev);
+       priv->type = (unsigned long)of_device_get_match_data(dev);
 
        pm_runtime_enable(dev);
        ret = pm_runtime_get_sync(dev);
index ead135c..6471b55 100644 (file)
@@ -2952,6 +2952,10 @@ static int genpd_iterate_idle_states(struct device_node *dn,
                np = it.node;
                if (!of_match_node(idle_state_match, np))
                        continue;
+
+               if (!of_device_is_available(np))
+                       continue;
+
                if (states) {
                        ret = genpd_parse_state(&states[i], np);
                        if (ret) {
index 4d6278a..2a5a37f 100644 (file)
@@ -229,7 +229,7 @@ EXPORT_SYMBOL_GPL(device_property_read_string);
  * Find a given string in a string array and if it is found return the
  * index back.
  *
- * Return: %0 if the property was found (success),
+ * Return: index, starting from %0, if the property was found (success),
  *        %-EINVAL if given arguments are not valid,
  *        %-ENODATA if the property does not have a value,
  *        %-EPROTO if the property is not an array of strings,
@@ -450,7 +450,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_string);
  * Find a given string in a string array and if it is found return the
  * index back.
  *
- * Return: %0 if the property was found (success),
+ * Return: index, starting from %0, if the property was found (success),
  *        %-EINVAL if given arguments are not valid,
  *        %-ENODATA if the property does not have a value,
  *        %-EPROTO if the property is not an array of strings,
index 8f7f144..7f9bcc8 100644 (file)
@@ -30,11 +30,6 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio
                return NULL;
        memset(req, 0, sizeof(*req));
 
-       req->private_bio = bio_alloc_clone(device->ldev->backing_bdev, bio_src,
-                                          GFP_NOIO, &drbd_io_bio_set);
-       req->private_bio->bi_private = req;
-       req->private_bio->bi_end_io = drbd_request_endio;
-
        req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0)
                      | (bio_op(bio_src) == REQ_OP_WRITE_ZEROES ? RQ_ZEROES : 0)
                      | (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0);
@@ -1219,9 +1214,12 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio)
        /* Update disk stats */
        req->start_jif = bio_start_io_acct(req->master_bio);
 
-       if (!get_ldev(device)) {
-               bio_put(req->private_bio);
-               req->private_bio = NULL;
+       if (get_ldev(device)) {
+               req->private_bio = bio_alloc_clone(device->ldev->backing_bdev,
+                                                  bio, GFP_NOIO,
+                                                  &drbd_io_bio_set);
+               req->private_bio->bi_private = req;
+               req->private_bio->bi_end_io = drbd_request_endio;
        }
 
        /* process discards always from our submitter thread */
index f9e3930..04453f4 100644 (file)
@@ -7222,8 +7222,10 @@ static int __init rbd_sysfs_init(void)
        int ret;
 
        ret = device_register(&rbd_root_dev);
-       if (ret < 0)
+       if (ret < 0) {
+               put_device(&rbd_root_dev);
                return ret;
+       }
 
        ret = bus_register(&rbd_bus_type);
        if (ret < 0)
index 2651bf4..5afce6f 100644 (file)
@@ -124,7 +124,7 @@ struct ublk_queue {
        bool force_abort;
        unsigned short nr_io_ready;     /* how many ios setup */
        struct ublk_device *dev;
-       struct ublk_io ios[0];
+       struct ublk_io ios[];
 };
 
 #define UBLK_DAEMON_MONITOR_PERIOD     (5 * HZ)
index e7dd457..e98fcac 100644 (file)
@@ -71,7 +71,7 @@ static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
        while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) {
                if (!wait)
                        return 0;
-               cpu_relax();
+               hwrng_msleep(rng, 1000);
        }
 
        num_words = rng_readl(priv, RNG_STATUS) >> 24;
index 2fe28ee..6975415 100644 (file)
@@ -791,13 +791,13 @@ void __init random_init_early(const char *command_line)
 #endif
 
        for (i = 0, arch_bits = sizeof(entropy) * 8; i < ARRAY_SIZE(entropy);) {
-               longs = arch_get_random_seed_longs(entropy, ARRAY_SIZE(entropy) - i);
+               longs = arch_get_random_seed_longs_early(entropy, ARRAY_SIZE(entropy) - i);
                if (longs) {
                        _mix_pool_bytes(entropy, sizeof(*entropy) * longs);
                        i += longs;
                        continue;
                }
-               longs = arch_get_random_longs(entropy, ARRAY_SIZE(entropy) - i);
+               longs = arch_get_random_longs_early(entropy, ARRAY_SIZE(entropy) - i);
                if (longs) {
                        _mix_pool_bytes(entropy, sizeof(*entropy) * longs);
                        i += longs;
index 77a863b..deed4af 100644 (file)
@@ -232,34 +232,45 @@ static const enum counter_function quad8_count_functions_list[] = {
        COUNTER_FUNCTION_QUADRATURE_X4,
 };
 
+static int quad8_function_get(const struct quad8 *const priv, const size_t id,
+                             enum counter_function *const function)
+{
+       if (!priv->quadrature_mode[id]) {
+               *function = COUNTER_FUNCTION_PULSE_DIRECTION;
+               return 0;
+       }
+
+       switch (priv->quadrature_scale[id]) {
+       case 0:
+               *function = COUNTER_FUNCTION_QUADRATURE_X1_A;
+               return 0;
+       case 1:
+               *function = COUNTER_FUNCTION_QUADRATURE_X2_A;
+               return 0;
+       case 2:
+               *function = COUNTER_FUNCTION_QUADRATURE_X4;
+               return 0;
+       default:
+               /* should never reach this path */
+               return -EINVAL;
+       }
+}
+
 static int quad8_function_read(struct counter_device *counter,
                               struct counter_count *count,
                               enum counter_function *function)
 {
        struct quad8 *const priv = counter_priv(counter);
-       const int id = count->id;
        unsigned long irqflags;
+       int retval;
 
        spin_lock_irqsave(&priv->lock, irqflags);
 
-       if (priv->quadrature_mode[id])
-               switch (priv->quadrature_scale[id]) {
-               case 0:
-                       *function = COUNTER_FUNCTION_QUADRATURE_X1_A;
-                       break;
-               case 1:
-                       *function = COUNTER_FUNCTION_QUADRATURE_X2_A;
-                       break;
-               case 2:
-                       *function = COUNTER_FUNCTION_QUADRATURE_X4;
-                       break;
-               }
-       else
-               *function = COUNTER_FUNCTION_PULSE_DIRECTION;
+       retval = quad8_function_get(priv, count->id, function);
 
        spin_unlock_irqrestore(&priv->lock, irqflags);
 
-       return 0;
+       return retval;
 }
 
 static int quad8_function_write(struct counter_device *counter,
@@ -359,6 +370,7 @@ static int quad8_action_read(struct counter_device *counter,
                             enum counter_synapse_action *action)
 {
        struct quad8 *const priv = counter_priv(counter);
+       unsigned long irqflags;
        int err;
        enum counter_function function;
        const size_t signal_a_id = count->synapses[0].signal->id;
@@ -374,9 +386,21 @@ static int quad8_action_read(struct counter_device *counter,
                return 0;
        }
 
-       err = quad8_function_read(counter, count, &function);
-       if (err)
+       spin_lock_irqsave(&priv->lock, irqflags);
+
+       /* Get Count function and direction atomically */
+       err = quad8_function_get(priv, count->id, &function);
+       if (err) {
+               spin_unlock_irqrestore(&priv->lock, irqflags);
+               return err;
+       }
+       err = quad8_direction_read(counter, count, &direction);
+       if (err) {
+               spin_unlock_irqrestore(&priv->lock, irqflags);
                return err;
+       }
+
+       spin_unlock_irqrestore(&priv->lock, irqflags);
 
        /* Default action mode */
        *action = COUNTER_SYNAPSE_ACTION_NONE;
@@ -389,10 +413,6 @@ static int quad8_action_read(struct counter_device *counter,
                return 0;
        case COUNTER_FUNCTION_QUADRATURE_X1_A:
                if (synapse->signal->id == signal_a_id) {
-                       err = quad8_direction_read(counter, count, &direction);
-                       if (err)
-                               return err;
-
                        if (direction == COUNTER_COUNT_DIRECTION_FORWARD)
                                *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
                        else
index f9dee15..e2d1dc6 100644 (file)
@@ -28,7 +28,6 @@ struct mchp_tc_data {
        int qdec_mode;
        int num_channels;
        int channel[2];
-       bool trig_inverted;
 };
 
 static const enum counter_function mchp_tc_count_functions[] = {
@@ -153,7 +152,7 @@ static int mchp_tc_count_signal_read(struct counter_device *counter,
 
        regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], SR), &sr);
 
-       if (priv->trig_inverted)
+       if (signal->id == 1)
                sigstatus = (sr & ATMEL_TC_MTIOB);
        else
                sigstatus = (sr & ATMEL_TC_MTIOA);
@@ -171,6 +170,17 @@ static int mchp_tc_count_action_read(struct counter_device *counter,
        struct mchp_tc_data *const priv = counter_priv(counter);
        u32 cmr;
 
+       if (priv->qdec_mode) {
+               *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
+               return 0;
+       }
+
+       /* Only TIOA signal is evaluated in non-QDEC mode */
+       if (synapse->signal->id != 0) {
+               *action = COUNTER_SYNAPSE_ACTION_NONE;
+               return 0;
+       }
+
        regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], CMR), &cmr);
 
        switch (cmr & ATMEL_TC_ETRGEDG) {
@@ -199,8 +209,8 @@ static int mchp_tc_count_action_write(struct counter_device *counter,
        struct mchp_tc_data *const priv = counter_priv(counter);
        u32 edge = ATMEL_TC_ETRGEDG_NONE;
 
-       /* QDEC mode is rising edge only */
-       if (priv->qdec_mode)
+       /* QDEC mode is rising edge only; only TIOA handled in non-QDEC mode */
+       if (priv->qdec_mode || synapse->signal->id != 0)
                return -EINVAL;
 
        switch (action) {
index af10de3..fb1cb17 100644 (file)
@@ -377,7 +377,8 @@ static const enum counter_signal_polarity ecap_cnt_pol_avail[] = {
        COUNTER_SIGNAL_POLARITY_NEGATIVE,
 };
 
-static DEFINE_COUNTER_ARRAY_POLARITY(ecap_cnt_pol_array, ecap_cnt_pol_avail, ECAP_NB_CEVT);
+static DEFINE_COUNTER_AVAILABLE(ecap_cnt_pol_available, ecap_cnt_pol_avail);
+static DEFINE_COUNTER_ARRAY_POLARITY(ecap_cnt_pol_array, ecap_cnt_pol_available, ECAP_NB_CEVT);
 
 static struct counter_comp ecap_cnt_signal_ext[] = {
        COUNTER_COMP_ARRAY_POLARITY(ecap_cnt_pol_read, ecap_cnt_pol_write, ecap_cnt_pol_array),
@@ -479,8 +480,8 @@ static int ecap_cnt_probe(struct platform_device *pdev)
        int ret;
 
        counter_dev = devm_counter_alloc(dev, sizeof(*ecap_dev));
-       if (IS_ERR(counter_dev))
-               return PTR_ERR(counter_dev);
+       if (!counter_dev)
+               return -ENOMEM;
 
        counter_dev->name = ECAP_DRV_NAME;
        counter_dev->parent = dev;
index d69d13a..4aec4b2 100644 (file)
@@ -222,10 +222,8 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
        if (reg_name[0]) {
                priv->opp_token = dev_pm_opp_set_regulators(cpu_dev, reg_name);
                if (priv->opp_token < 0) {
-                       ret = priv->opp_token;
-                       if (ret != -EPROBE_DEFER)
-                               dev_err(cpu_dev, "failed to set regulators: %d\n",
-                                       ret);
+                       ret = dev_err_probe(cpu_dev, priv->opp_token,
+                                           "failed to set regulators\n");
                        goto free_cpumask;
                }
        }
index 90beb26..ad4ce84 100644 (file)
@@ -396,9 +396,7 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
                ret = imx6q_opp_check_speed_grading(cpu_dev);
        }
        if (ret) {
-               if (ret != -EPROBE_DEFER)
-                       dev_err(cpu_dev, "failed to read ocotp: %d\n",
-                               ret);
+               dev_err_probe(cpu_dev, ret, "failed to read ocotp\n");
                goto out_free_opp;
        }
 
index fc3ebeb..6ff73c3 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/pm_qos.h>
 #include <trace/events/power.h>
 
+#include <asm/cpu.h>
 #include <asm/div64.h>
 #include <asm/msr.h>
 #include <asm/cpu_device_id.h>
@@ -280,10 +281,10 @@ static struct cpudata **all_cpu_data;
  * structure is used to store those callbacks.
  */
 struct pstate_funcs {
-       int (*get_max)(void);
-       int (*get_max_physical)(void);
-       int (*get_min)(void);
-       int (*get_turbo)(void);
+       int (*get_max)(int cpu);
+       int (*get_max_physical)(int cpu);
+       int (*get_min)(int cpu);
+       int (*get_turbo)(int cpu);
        int (*get_scaling)(void);
        int (*get_cpu_scaling)(int cpu);
        int (*get_aperf_mperf_shift)(void);
@@ -398,16 +399,6 @@ static int intel_pstate_get_cppc_guaranteed(int cpu)
 
        return cppc_perf.nominal_perf;
 }
-
-static u32 intel_pstate_cppc_nominal(int cpu)
-{
-       u64 nominal_perf;
-
-       if (cppc_get_nominal_perf(cpu, &nominal_perf))
-               return 0;
-
-       return nominal_perf;
-}
 #else /* CONFIG_ACPI_CPPC_LIB */
 static inline void intel_pstate_set_itmt_prio(int cpu)
 {
@@ -531,35 +522,18 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
 {
        int perf_ctl_max_phys = cpu->pstate.max_pstate_physical;
        int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
-       int perf_ctl_turbo = pstate_funcs.get_turbo();
-       int turbo_freq = perf_ctl_turbo * perf_ctl_scaling;
+       int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu);
        int scaling = cpu->pstate.scaling;
 
        pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
-       pr_debug("CPU%d: perf_ctl_max = %d\n", cpu->cpu, pstate_funcs.get_max());
        pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
        pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling);
        pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate);
        pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate);
        pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling);
 
-       /*
-        * If the product of the HWP performance scaling factor and the HWP_CAP
-        * highest performance is greater than the maximum turbo frequency
-        * corresponding to the pstate_funcs.get_turbo() return value, the
-        * scaling factor is too high, so recompute it to make the HWP_CAP
-        * highest performance correspond to the maximum turbo frequency.
-        */
-       cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling;
-       if (turbo_freq < cpu->pstate.turbo_freq) {
-               cpu->pstate.turbo_freq = turbo_freq;
-               scaling = DIV_ROUND_UP(turbo_freq, cpu->pstate.turbo_pstate);
-               cpu->pstate.scaling = scaling;
-
-               pr_debug("CPU%d: refined HWP-to-frequency scaling factor: %d\n",
-                        cpu->cpu, scaling);
-       }
-
+       cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_pstate * scaling,
+                                          perf_ctl_scaling);
        cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
                                         perf_ctl_scaling);
 
@@ -1740,7 +1714,7 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
        intel_pstate_update_epp_defaults(cpudata);
 }
 
-static int atom_get_min_pstate(void)
+static int atom_get_min_pstate(int not_used)
 {
        u64 value;
 
@@ -1748,7 +1722,7 @@ static int atom_get_min_pstate(void)
        return (value >> 8) & 0x7F;
 }
 
-static int atom_get_max_pstate(void)
+static int atom_get_max_pstate(int not_used)
 {
        u64 value;
 
@@ -1756,7 +1730,7 @@ static int atom_get_max_pstate(void)
        return (value >> 16) & 0x7F;
 }
 
-static int atom_get_turbo_pstate(void)
+static int atom_get_turbo_pstate(int not_used)
 {
        u64 value;
 
@@ -1834,23 +1808,23 @@ static void atom_get_vid(struct cpudata *cpudata)
        cpudata->vid.turbo = value & 0x7f;
 }
 
-static int core_get_min_pstate(void)
+static int core_get_min_pstate(int cpu)
 {
        u64 value;
 
-       rdmsrl(MSR_PLATFORM_INFO, value);
+       rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
        return (value >> 40) & 0xFF;
 }
 
-static int core_get_max_pstate_physical(void)
+static int core_get_max_pstate_physical(int cpu)
 {
        u64 value;
 
-       rdmsrl(MSR_PLATFORM_INFO, value);
+       rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
        return (value >> 8) & 0xFF;
 }
 
-static int core_get_tdp_ratio(u64 plat_info)
+static int core_get_tdp_ratio(int cpu, u64 plat_info)
 {
        /* Check how many TDP levels present */
        if (plat_info & 0x600000000) {
@@ -1860,13 +1834,13 @@ static int core_get_tdp_ratio(u64 plat_info)
                int err;
 
                /* Get the TDP level (0, 1, 2) to get ratios */
-               err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
+               err = rdmsrl_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
                if (err)
                        return err;
 
                /* TDP MSR are continuous starting at 0x648 */
                tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03);
-               err = rdmsrl_safe(tdp_msr, &tdp_ratio);
+               err = rdmsrl_safe_on_cpu(cpu, tdp_msr, &tdp_ratio);
                if (err)
                        return err;
 
@@ -1883,7 +1857,7 @@ static int core_get_tdp_ratio(u64 plat_info)
        return -ENXIO;
 }
 
-static int core_get_max_pstate(void)
+static int core_get_max_pstate(int cpu)
 {
        u64 tar;
        u64 plat_info;
@@ -1891,10 +1865,10 @@ static int core_get_max_pstate(void)
        int tdp_ratio;
        int err;
 
-       rdmsrl(MSR_PLATFORM_INFO, plat_info);
+       rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info);
        max_pstate = (plat_info >> 8) & 0xFF;
 
-       tdp_ratio = core_get_tdp_ratio(plat_info);
+       tdp_ratio = core_get_tdp_ratio(cpu, plat_info);
        if (tdp_ratio <= 0)
                return max_pstate;
 
@@ -1903,7 +1877,7 @@ static int core_get_max_pstate(void)
                return tdp_ratio;
        }
 
-       err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar);
+       err = rdmsrl_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar);
        if (!err) {
                int tar_levels;
 
@@ -1918,13 +1892,13 @@ static int core_get_max_pstate(void)
        return max_pstate;
 }
 
-static int core_get_turbo_pstate(void)
+static int core_get_turbo_pstate(int cpu)
 {
        u64 value;
        int nont, ret;
 
-       rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
-       nont = core_get_max_pstate();
+       rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
+       nont = core_get_max_pstate(cpu);
        ret = (value) & 255;
        if (ret <= nont)
                ret = nont;
@@ -1952,50 +1926,37 @@ static int knl_get_aperf_mperf_shift(void)
        return 10;
 }
 
-static int knl_get_turbo_pstate(void)
+static int knl_get_turbo_pstate(int cpu)
 {
        u64 value;
        int nont, ret;
 
-       rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
-       nont = core_get_max_pstate();
+       rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
+       nont = core_get_max_pstate(cpu);
        ret = (((value) >> 8) & 0xFF);
        if (ret <= nont)
                ret = nont;
        return ret;
 }
 
-#ifdef CONFIG_ACPI_CPPC_LIB
-static u32 hybrid_ref_perf;
-
-static int hybrid_get_cpu_scaling(int cpu)
+static void hybrid_get_type(void *data)
 {
-       return DIV_ROUND_UP(core_get_scaling() * hybrid_ref_perf,
-                           intel_pstate_cppc_nominal(cpu));
+       u8 *cpu_type = data;
+
+       *cpu_type = get_this_hybrid_cpu_type();
 }
 
-static void intel_pstate_cppc_set_cpu_scaling(void)
+static int hybrid_get_cpu_scaling(int cpu)
 {
-       u32 min_nominal_perf = U32_MAX;
-       int cpu;
+       u8 cpu_type = 0;
 
-       for_each_present_cpu(cpu) {
-               u32 nominal_perf = intel_pstate_cppc_nominal(cpu);
+       smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
+       /* P-cores have a smaller perf level-to-freqency scaling factor. */
+       if (cpu_type == 0x40)
+               return 78741;
 
-               if (nominal_perf && nominal_perf < min_nominal_perf)
-                       min_nominal_perf = nominal_perf;
-       }
-
-       if (min_nominal_perf < U32_MAX) {
-               hybrid_ref_perf = min_nominal_perf;
-               pstate_funcs.get_cpu_scaling = hybrid_get_cpu_scaling;
-       }
+       return core_get_scaling();
 }
-#else
-static inline void intel_pstate_cppc_set_cpu_scaling(void)
-{
-}
-#endif /* CONFIG_ACPI_CPPC_LIB */
 
 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
 {
@@ -2025,10 +1986,10 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu)
 
 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
 {
-       int perf_ctl_max_phys = pstate_funcs.get_max_physical();
+       int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu);
        int perf_ctl_scaling = pstate_funcs.get_scaling();
 
-       cpu->pstate.min_pstate = pstate_funcs.get_min();
+       cpu->pstate.min_pstate = pstate_funcs.get_min(cpu->cpu);
        cpu->pstate.max_pstate_physical = perf_ctl_max_phys;
        cpu->pstate.perf_ctl_scaling = perf_ctl_scaling;
 
@@ -2044,8 +2005,8 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
                }
        } else {
                cpu->pstate.scaling = perf_ctl_scaling;
-               cpu->pstate.max_pstate = pstate_funcs.get_max();
-               cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
+               cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu);
+               cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(cpu->cpu);
        }
 
        if (cpu->pstate.scaling == perf_ctl_scaling) {
@@ -3221,9 +3182,9 @@ static unsigned int force_load __initdata;
 
 static int __init intel_pstate_msrs_not_valid(void)
 {
-       if (!pstate_funcs.get_max() ||
-           !pstate_funcs.get_min() ||
-           !pstate_funcs.get_turbo())
+       if (!pstate_funcs.get_max(0) ||
+           !pstate_funcs.get_min(0) ||
+           !pstate_funcs.get_turbo(0))
                return -ENODEV;
 
        return 0;
@@ -3450,7 +3411,7 @@ static int __init intel_pstate_init(void)
                                default_driver = &intel_pstate;
 
                        if (boot_cpu_has(X86_FEATURE_HYBRID_CPU))
-                               intel_pstate_cppc_set_cpu_scaling();
+                               pstate_funcs.get_cpu_scaling = hybrid_get_cpu_scaling;
 
                        goto hwp_cpu_matched;
                }
index 863548f..a577586 100644 (file)
@@ -64,7 +64,7 @@ static struct platform_device *cpufreq_dt_pdev, *cpufreq_pdev;
 
 static void get_krait_bin_format_a(struct device *cpu_dev,
                                          int *speed, int *pvs, int *pvs_ver,
-                                         struct nvmem_cell *pvs_nvmem, u8 *buf)
+                                         u8 *buf)
 {
        u32 pte_efuse;
 
@@ -95,7 +95,7 @@ static void get_krait_bin_format_a(struct device *cpu_dev,
 
 static void get_krait_bin_format_b(struct device *cpu_dev,
                                          int *speed, int *pvs, int *pvs_ver,
-                                         struct nvmem_cell *pvs_nvmem, u8 *buf)
+                                         u8 *buf)
 {
        u32 pte_efuse, redundant_sel;
 
@@ -213,6 +213,7 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev,
        int speed = 0, pvs = 0, pvs_ver = 0;
        u8 *speedbin;
        size_t len;
+       int ret = 0;
 
        speedbin = nvmem_cell_read(speedbin_nvmem, &len);
 
@@ -222,15 +223,16 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev,
        switch (len) {
        case 4:
                get_krait_bin_format_a(cpu_dev, &speed, &pvs, &pvs_ver,
-                                      speedbin_nvmem, speedbin);
+                                      speedbin);
                break;
        case 8:
                get_krait_bin_format_b(cpu_dev, &speed, &pvs, &pvs_ver,
-                                      speedbin_nvmem, speedbin);
+                                      speedbin);
                break;
        default:
                dev_err(cpu_dev, "Unable to read nvmem data. Defaulting to 0!\n");
-               return -ENODEV;
+               ret = -ENODEV;
+               goto len_error;
        }
 
        snprintf(*pvs_name, sizeof("speedXX-pvsXX-vXX"), "speed%d-pvs%d-v%d",
@@ -238,8 +240,9 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev,
 
        drv->versions = (1 << speed);
 
+len_error:
        kfree(speedbin);
-       return 0;
+       return ret;
 }
 
 static const struct qcom_cpufreq_match_data match_data_kryo = {
@@ -262,7 +265,8 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
        struct nvmem_cell *speedbin_nvmem;
        struct device_node *np;
        struct device *cpu_dev;
-       char *pvs_name = "speedXX-pvsXX-vXX";
+       char pvs_name_buffer[] = "speedXX-pvsXX-vXX";
+       char *pvs_name = pvs_name_buffer;
        unsigned cpu;
        const struct of_device_id *match;
        int ret;
@@ -295,11 +299,8 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
        if (drv->data->get_version) {
                speedbin_nvmem = of_nvmem_cell_get(np, NULL);
                if (IS_ERR(speedbin_nvmem)) {
-                       if (PTR_ERR(speedbin_nvmem) != -EPROBE_DEFER)
-                               dev_err(cpu_dev,
-                                       "Could not get nvmem cell: %ld\n",
-                                       PTR_ERR(speedbin_nvmem));
-                       ret = PTR_ERR(speedbin_nvmem);
+                       ret = dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
+                                           "Could not get nvmem cell\n");
                        goto free_drv;
                }
 
index a492258..1583a37 100644 (file)
@@ -56,12 +56,9 @@ static int sun50i_cpufreq_get_efuse(u32 *versions)
 
        speedbin_nvmem = of_nvmem_cell_get(np, NULL);
        of_node_put(np);
-       if (IS_ERR(speedbin_nvmem)) {
-               if (PTR_ERR(speedbin_nvmem) != -EPROBE_DEFER)
-                       pr_err("Could not get nvmem cell: %ld\n",
-                              PTR_ERR(speedbin_nvmem));
-               return PTR_ERR(speedbin_nvmem);
-       }
+       if (IS_ERR(speedbin_nvmem))
+               return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
+                                    "Could not get nvmem cell\n");
 
        speedbin = nvmem_cell_read(speedbin_nvmem, &len);
        nvmem_cell_put(speedbin_nvmem);
index c2004ca..4596c3e 100644 (file)
@@ -589,6 +589,7 @@ static const struct of_device_id tegra194_cpufreq_of_match[] = {
        { .compatible = "nvidia,tegra239-ccplex-cluster", .data = &tegra239_cpufreq_soc },
        { /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, tegra194_cpufreq_of_match);
 
 static struct platform_driver tegra194_ccplex_driver = {
        .driver = {
index 5b79a4a..6787ed8 100644 (file)
@@ -124,28 +124,6 @@ config EFI_ZBOOT
          is supported by the encapsulated image. (The compression algorithm
          used is described in the zboot image header)
 
-config EFI_ZBOOT_SIGNED
-       def_bool y
-       depends on EFI_ZBOOT_SIGNING_CERT != ""
-       depends on EFI_ZBOOT_SIGNING_KEY != ""
-
-config EFI_ZBOOT_SIGNING
-       bool "Sign the EFI decompressor for UEFI secure boot"
-       depends on EFI_ZBOOT
-       help
-         Use the 'sbsign' command line tool (which must exist on the host
-         path) to sign both the EFI decompressor PE/COFF image, as well as the
-         encapsulated PE/COFF image, which is subsequently compressed and
-         wrapped by the former image.
-
-config EFI_ZBOOT_SIGNING_CERT
-       string "Certificate to use for signing the compressed EFI boot image"
-       depends on EFI_ZBOOT_SIGNING
-
-config EFI_ZBOOT_SIGNING_KEY
-       string "Private key to use for signing the compressed EFI boot image"
-       depends on EFI_ZBOOT_SIGNING
-
 config EFI_ARMSTUB_DTB_LOADER
        bool "Enable the DTB loader"
        depends on EFI_GENERIC_STUB && !RISCV && !LOONGARCH
index 3359ae2..7c48c38 100644 (file)
@@ -63,7 +63,7 @@ static bool __init efi_virtmap_init(void)
 
                if (!(md->attribute & EFI_MEMORY_RUNTIME))
                        continue;
-               if (md->virt_addr == 0)
+               if (md->virt_addr == U64_MAX)
                        return false;
 
                ret = efi_create_mapping(&efi_mm, md);
index 9624735..3ecdc43 100644 (file)
@@ -271,6 +271,8 @@ static __init int efivar_ssdt_load(void)
                        acpi_status ret = acpi_load_table(data, NULL);
                        if (ret)
                                pr_err("failed to load table: %u\n", ret);
+                       else
+                               continue;
                } else {
                        pr_err("failed to get var data: 0x%lx\n", status);
                }
index 35f234a..3340b38 100644 (file)
@@ -20,22 +20,11 @@ zboot-size-len-y                    := 4
 zboot-method-$(CONFIG_KERNEL_GZIP)     := gzip
 zboot-size-len-$(CONFIG_KERNEL_GZIP)   := 0
 
-quiet_cmd_sbsign = SBSIGN  $@
-      cmd_sbsign = sbsign --out $@ $< \
-                  --key $(CONFIG_EFI_ZBOOT_SIGNING_KEY) \
-                  --cert $(CONFIG_EFI_ZBOOT_SIGNING_CERT)
-
-$(obj)/$(EFI_ZBOOT_PAYLOAD).signed: $(obj)/$(EFI_ZBOOT_PAYLOAD) FORCE
-       $(call if_changed,sbsign)
-
-ZBOOT_PAYLOAD-y                                 := $(EFI_ZBOOT_PAYLOAD)
-ZBOOT_PAYLOAD-$(CONFIG_EFI_ZBOOT_SIGNED) := $(EFI_ZBOOT_PAYLOAD).signed
-
-$(obj)/vmlinuz: $(obj)/$(ZBOOT_PAYLOAD-y) FORCE
+$(obj)/vmlinuz: $(obj)/$(EFI_ZBOOT_PAYLOAD) FORCE
        $(call if_changed,$(zboot-method-y))
 
 OBJCOPYFLAGS_vmlinuz.o := -I binary -O $(EFI_ZBOOT_BFD_TARGET) \
-                        --rename-section .data=.gzdata,load,alloc,readonly,contents
+                         --rename-section .data=.gzdata,load,alloc,readonly,contents
 $(obj)/vmlinuz.o: $(obj)/vmlinuz FORCE
        $(call if_changed,objcopy)
 
@@ -53,18 +42,8 @@ LDFLAGS_vmlinuz.efi.elf := -T $(srctree)/drivers/firmware/efi/libstub/zboot.lds
 $(obj)/vmlinuz.efi.elf: $(obj)/vmlinuz.o $(ZBOOT_DEPS) FORCE
        $(call if_changed,ld)
 
-ZBOOT_EFI-y                            := vmlinuz.efi
-ZBOOT_EFI-$(CONFIG_EFI_ZBOOT_SIGNED)   := vmlinuz.efi.unsigned
-
-OBJCOPYFLAGS_$(ZBOOT_EFI-y) := -O binary
-$(obj)/$(ZBOOT_EFI-y): $(obj)/vmlinuz.efi.elf FORCE
+OBJCOPYFLAGS_vmlinuz.efi := -O binary
+$(obj)/vmlinuz.efi: $(obj)/vmlinuz.efi.elf FORCE
        $(call if_changed,objcopy)
 
 targets += zboot-header.o vmlinuz vmlinuz.o vmlinuz.efi.elf vmlinuz.efi
-
-ifneq ($(CONFIG_EFI_ZBOOT_SIGNED),)
-$(obj)/vmlinuz.efi: $(obj)/vmlinuz.efi.unsigned FORCE
-       $(call if_changed,sbsign)
-endif
-
-targets += $(EFI_ZBOOT_PAYLOAD).signed vmlinuz.efi.unsigned
index 4f4d98e..70e9789 100644 (file)
@@ -313,16 +313,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
 
                        /*
                         * Set the virtual address field of all
-                        * EFI_MEMORY_RUNTIME entries to 0. This will signal
-                        * the incoming kernel that no virtual translation has
-                        * been installed.
+                        * EFI_MEMORY_RUNTIME entries to U64_MAX. This will
+                        * signal the incoming kernel that no virtual
+                        * translation has been installed.
                         */
                        for (l = 0; l < priv.boot_memmap->map_size;
                             l += priv.boot_memmap->desc_size) {
                                p = (void *)priv.boot_memmap->map + l;
 
                                if (p->attribute & EFI_MEMORY_RUNTIME)
-                                       p->virt_addr = 0;
+                                       p->virt_addr = U64_MAX;
                        }
                }
                return EFI_SUCCESS;
index b9ce639..33a7811 100644 (file)
@@ -765,9 +765,9 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
  * relocated by efi_relocate_kernel.
  * On failure, we exit to the firmware via efi_exit instead of returning.
  */
-unsigned long efi_main(efi_handle_t handle,
-                            efi_system_table_t *sys_table_arg,
-                            struct boot_params *boot_params)
+asmlinkage unsigned long efi_main(efi_handle_t handle,
+                                 efi_system_table_t *sys_table_arg,
+                                 struct boot_params *boot_params)
 {
        unsigned long bzimage_addr = (unsigned long)startup_32;
        unsigned long buffer_start, buffer_end;
index 87a6276..93d33f6 100644 (file)
@@ -38,7 +38,8 @@ SECTIONS
        }
 }
 
-PROVIDE(__efistub__gzdata_size = ABSOLUTE(. - __efistub__gzdata_start));
+PROVIDE(__efistub__gzdata_size =
+               ABSOLUTE(__efistub__gzdata_end - __efistub__gzdata_start));
 
 PROVIDE(__data_rawsize = ABSOLUTE(_edata - _etext));
 PROVIDE(__data_size = ABSOLUTE(_end - _etext));
index d28e715..d0daacd 100644 (file)
@@ -41,7 +41,7 @@ static bool __init efi_virtmap_init(void)
 
                if (!(md->attribute & EFI_MEMORY_RUNTIME))
                        continue;
-               if (md->virt_addr == 0)
+               if (md->virt_addr == U64_MAX)
                        return false;
 
                ret = efi_create_mapping(&efi_mm, md);
index dd74d2a..433b615 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/types.h>
+#include <linux/sizes.h>
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/module.h>
@@ -20,19 +21,19 @@ static struct efivars *__efivars;
 
 static DEFINE_SEMAPHORE(efivars_lock);
 
-efi_status_t check_var_size(u32 attributes, unsigned long size)
+static efi_status_t check_var_size(u32 attributes, unsigned long size)
 {
        const struct efivar_operations *fops;
 
        fops = __efivars->ops;
 
        if (!fops->query_variable_store)
-               return EFI_UNSUPPORTED;
+               return (size <= SZ_64K) ? EFI_SUCCESS : EFI_OUT_OF_RESOURCES;
 
        return fops->query_variable_store(attributes, size, false);
 }
-EXPORT_SYMBOL_NS_GPL(check_var_size, EFIVAR);
 
+static
 efi_status_t check_var_size_nonblocking(u32 attributes, unsigned long size)
 {
        const struct efivar_operations *fops;
@@ -40,11 +41,10 @@ efi_status_t check_var_size_nonblocking(u32 attributes, unsigned long size)
        fops = __efivars->ops;
 
        if (!fops->query_variable_store)
-               return EFI_UNSUPPORTED;
+               return (size <= SZ_64K) ? EFI_SUCCESS : EFI_OUT_OF_RESOURCES;
 
        return fops->query_variable_store(attributes, size, true);
 }
-EXPORT_SYMBOL_NS_GPL(check_var_size_nonblocking, EFIVAR);
 
 /**
  * efivars_kobject - get the kobject for the registered efivars
index e4fb4cb..5b265a6 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/module.h>
+#include <linux/seq_file.h>
 #include <linux/irqdomain.h>
 #include <linux/irqchip/chained_irq.h>
 #include <linux/pinctrl/consumer.h>
@@ -94,7 +95,6 @@ struct tegra_gpio_info {
        struct tegra_gpio_bank                  *bank_info;
        const struct tegra_gpio_soc_config      *soc;
        struct gpio_chip                        gc;
-       struct irq_chip                         ic;
        u32                                     bank_count;
        unsigned int                            *irqs;
 };
@@ -288,6 +288,7 @@ static void tegra_gpio_irq_mask(struct irq_data *d)
        unsigned int gpio = d->hwirq;
 
        tegra_gpio_mask_write(tgi, GPIO_MSK_INT_ENB(tgi, gpio), gpio, 0);
+       gpiochip_disable_irq(chip, gpio);
 }
 
 static void tegra_gpio_irq_unmask(struct irq_data *d)
@@ -296,6 +297,7 @@ static void tegra_gpio_irq_unmask(struct irq_data *d)
        struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
        unsigned int gpio = d->hwirq;
 
+       gpiochip_enable_irq(chip, gpio);
        tegra_gpio_mask_write(tgi, GPIO_MSK_INT_ENB(tgi, gpio), gpio, 1);
 }
 
@@ -598,10 +600,47 @@ static void tegra_gpio_irq_release_resources(struct irq_data *d)
        tegra_gpio_enable(tgi, d->hwirq);
 }
 
+static void tegra_gpio_irq_print_chip(struct irq_data *d, struct seq_file *s)
+{
+       struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+
+       seq_printf(s, dev_name(chip->parent));
+}
+
+static const struct irq_chip tegra_gpio_irq_chip = {
+       .irq_shutdown           = tegra_gpio_irq_shutdown,
+       .irq_ack                = tegra_gpio_irq_ack,
+       .irq_mask               = tegra_gpio_irq_mask,
+       .irq_unmask             = tegra_gpio_irq_unmask,
+       .irq_set_type           = tegra_gpio_irq_set_type,
+#ifdef CONFIG_PM_SLEEP
+       .irq_set_wake           = tegra_gpio_irq_set_wake,
+#endif
+       .irq_print_chip         = tegra_gpio_irq_print_chip,
+       .irq_request_resources  = tegra_gpio_irq_request_resources,
+       .irq_release_resources  = tegra_gpio_irq_release_resources,
+       .flags                  = IRQCHIP_IMMUTABLE,
+};
+
+static const struct irq_chip tegra210_gpio_irq_chip = {
+       .irq_shutdown           = tegra_gpio_irq_shutdown,
+       .irq_ack                = tegra_gpio_irq_ack,
+       .irq_mask               = tegra_gpio_irq_mask,
+       .irq_unmask             = tegra_gpio_irq_unmask,
+       .irq_set_affinity       = tegra_gpio_irq_set_affinity,
+       .irq_set_type           = tegra_gpio_irq_set_type,
+#ifdef CONFIG_PM_SLEEP
+       .irq_set_wake           = tegra_gpio_irq_set_wake,
+#endif
+       .irq_print_chip         = tegra_gpio_irq_print_chip,
+       .irq_request_resources  = tegra_gpio_irq_request_resources,
+       .irq_release_resources  = tegra_gpio_irq_release_resources,
+       .flags                  = IRQCHIP_IMMUTABLE,
+};
+
 #ifdef CONFIG_DEBUG_FS
 
 #include <linux/debugfs.h>
-#include <linux/seq_file.h>
 
 static int tegra_dbg_gpio_show(struct seq_file *s, void *unused)
 {
@@ -689,18 +728,6 @@ static int tegra_gpio_probe(struct platform_device *pdev)
        tgi->gc.ngpio                   = tgi->bank_count * 32;
        tgi->gc.parent                  = &pdev->dev;
 
-       tgi->ic.name                    = "GPIO";
-       tgi->ic.irq_ack                 = tegra_gpio_irq_ack;
-       tgi->ic.irq_mask                = tegra_gpio_irq_mask;
-       tgi->ic.irq_unmask              = tegra_gpio_irq_unmask;
-       tgi->ic.irq_set_type            = tegra_gpio_irq_set_type;
-       tgi->ic.irq_shutdown            = tegra_gpio_irq_shutdown;
-#ifdef CONFIG_PM_SLEEP
-       tgi->ic.irq_set_wake            = tegra_gpio_irq_set_wake;
-#endif
-       tgi->ic.irq_request_resources   = tegra_gpio_irq_request_resources;
-       tgi->ic.irq_release_resources   = tegra_gpio_irq_release_resources;
-
        platform_set_drvdata(pdev, tgi);
 
        if (tgi->soc->debounce_supported)
@@ -733,7 +760,6 @@ static int tegra_gpio_probe(struct platform_device *pdev)
        }
 
        irq = &tgi->gc.irq;
-       irq->chip = &tgi->ic;
        irq->fwnode = of_node_to_fwnode(pdev->dev.of_node);
        irq->child_to_parent_hwirq = tegra_gpio_child_to_parent_hwirq;
        irq->populate_parent_alloc_arg = tegra_gpio_populate_parent_fwspec;
@@ -752,7 +778,9 @@ static int tegra_gpio_probe(struct platform_device *pdev)
                if (!irq->parent_domain)
                        return -EPROBE_DEFER;
 
-               tgi->ic.irq_set_affinity = tegra_gpio_irq_set_affinity;
+               gpio_irq_chip_set_chip(irq, &tegra210_gpio_irq_chip);
+       } else {
+               gpio_irq_chip_set_chip(irq, &tegra_gpio_irq_chip);
        }
 
        tgi->regs = devm_platform_ioremap_resource(pdev, 0);
index ae9371b..8639a4f 100644 (file)
@@ -274,9 +274,6 @@ extern int amdgpu_vcnfw_log;
 #define AMDGPU_RESET_VCE                       (1 << 13)
 #define AMDGPU_RESET_VCE1                      (1 << 14)
 
-#define AMDGPU_RESET_LEVEL_SOFT_RECOVERY (1 << 0)
-#define AMDGPU_RESET_LEVEL_MODE2 (1 << 1)
-
 /* max cursor sizes (in pixels) */
 #define CIK_CURSOR_WIDTH 128
 #define CIK_CURSOR_HEIGHT 128
@@ -1065,7 +1062,6 @@ struct amdgpu_device {
 
        struct work_struct              reset_work;
 
-       uint32_t                                                amdgpu_reset_level_mask;
        bool                            job_hang;
 };
 
index 03bbfaa..0561812 100644 (file)
@@ -134,7 +134,6 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work)
        reset_context.method = AMD_RESET_METHOD_NONE;
        reset_context.reset_req_dev = adev;
        clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
-       clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
 
        amdgpu_device_gpu_recover(adev, NULL, &reset_context);
 }
index 0b0a72c..7e80caa 100644 (file)
@@ -111,7 +111,7 @@ static int init_interrupts_v11(struct amdgpu_device *adev, uint32_t pipe_id)
 
        lock_srbm(adev, mec, pipe, 0, 0);
 
-       WREG32(SOC15_REG_OFFSET(GC, 0, regCPC_INT_CNTL),
+       WREG32_SOC15(GC, 0, regCPC_INT_CNTL,
                CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
                CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
 
index 978d397..84f44f7 100644 (file)
@@ -510,13 +510,13 @@ kfd_mem_dmamap_userptr(struct kgd_mem *mem,
        struct ttm_tt *ttm = bo->tbo.ttm;
        int ret;
 
+       if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
+               return -EINVAL;
+
        ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
        if (unlikely(!ttm->sg))
                return -ENOMEM;
 
-       if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
-               return -EINVAL;
-
        /* Same sequence as in amdgpu_ttm_tt_pin_userptr */
        ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
                                        ttm->num_pages, 0,
index f6d9d5d..d2139ac 100644 (file)
@@ -326,7 +326,10 @@ static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
        if (r)
                return r;
 
-       ctx->stable_pstate = current_stable_pstate;
+       if (mgr->adev->pm.stable_pstate_ctx)
+               ctx->stable_pstate = mgr->adev->pm.stable_pstate_ctx->stable_pstate;
+       else
+               ctx->stable_pstate = current_stable_pstate;
 
        return 0;
 }
index 6066aeb..de61a85 100644 (file)
@@ -1954,8 +1954,6 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
                return PTR_ERR(ent);
        }
 
-       debugfs_create_u32("amdgpu_reset_level", 0600, root, &adev->amdgpu_reset_level_mask);
-
        /* Register debugfs entries for amdgpu_ttm */
        amdgpu_ttm_debugfs_init(adev);
        amdgpu_debugfs_pm_init(adev);
index ab8f970..ddaecb2 100644 (file)
@@ -2928,6 +2928,14 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
        amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
        amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
 
+       /*
+        * Per PMFW team's suggestion, driver needs to handle gfxoff
+        * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
+        * scenario. Add the missing df cstate disablement here.
+        */
+       if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+               dev_warn(adev->dev, "Failed to disallow df cstate");
+
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.valid)
                        continue;
@@ -3202,6 +3210,15 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
                        return r;
                }
                adev->ip_blocks[i].status.hw = true;
+
+               if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
+                       /* disable gfxoff for IP resume. The gfxoff will be re-enabled in
+                        * amdgpu_device_resume() after IP resume.
+                        */
+                       amdgpu_gfx_off_ctrl(adev, false);
+                       DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n");
+               }
+
        }
 
        return 0;
@@ -4177,6 +4194,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
        /* Make sure IB tests flushed */
        flush_delayed_work(&adev->delayed_init_work);
 
+       if (adev->in_s0ix) {
+               /* re-enable gfxoff after IP resume. This re-enables gfxoff after
+                * it was disabled for IP resume in amdgpu_device_ip_resume_phase2().
+                */
+               amdgpu_gfx_off_ctrl(adev, true);
+               DRM_DEBUG("will enable gfxoff for the mission mode\n");
+       }
        if (fbcon)
                drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
 
@@ -5210,7 +5234,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 
        reset_context->job = job;
        reset_context->hive = hive;
-
        /*
         * Build list of devices to reset.
         * In case we are in XGMI hive mode, resort the device list
@@ -5337,11 +5360,8 @@ retry:   /* Rest of adevs pre asic reset from XGMI hive. */
                        amdgpu_ras_resume(adev);
        } else {
                r = amdgpu_do_asic_reset(device_list_handle, reset_context);
-               if (r && r == -EAGAIN) {
-                       set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags);
-                       adev->asic_reset_res = 0;
+               if (r && r == -EAGAIN)
                        goto retry;
-               }
 
                if (!r && gpu_reset_for_dev_remove)
                        goto recover_end;
@@ -5377,7 +5397,7 @@ skip_hw_reset:
                        drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
                }
 
-               if (adev->enable_mes)
+               if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
                        amdgpu_mes_self_test(tmp_adev);
 
                if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
@@ -5777,7 +5797,6 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
        reset_context.reset_req_dev = adev;
        set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
        set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
-       set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
 
        adev->no_hw_access = true;
        r = amdgpu_device_pre_asic_reset(adev, &reset_context);
index 46c9933..cd968e7 100644 (file)
@@ -72,7 +72,6 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
                reset_context.method = AMD_RESET_METHOD_NONE;
                reset_context.reset_req_dev = adev;
                clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
-               clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
 
                r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
                if (r)
index fe23e09..bf1ff8f 100644 (file)
@@ -344,6 +344,10 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
                fw_info->ver = adev->mes.ucode_fw_version[1];
                fw_info->feature = 0;
                break;
+       case AMDGPU_INFO_FW_IMU:
+               fw_info->ver = adev->gfx.imu_fw_version;
+               fw_info->feature = 0;
+               break;
        default:
                return -EINVAL;
        }
@@ -1520,6 +1524,15 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
                           fw_info.feature, fw_info.ver);
        }
 
+       /* IMU */
+       query_fw.fw_type = AMDGPU_INFO_FW_IMU;
+       query_fw.index = 0;
+       ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+       if (ret)
+               return ret;
+       seq_printf(m, "IMU feature version: %u, firmware version: 0x%08x\n",
+                  fw_info.feature, fw_info.ver);
+
        /* PSP SOS */
        query_fw.fw_type = AMDGPU_INFO_FW_SOS;
        ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
index 2dad7aa..a4b47e1 100644 (file)
@@ -1950,7 +1950,6 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
                reset_context.method = AMD_RESET_METHOD_NONE;
                reset_context.reset_req_dev = adev;
                clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
-               clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
 
                amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
        }
@@ -2268,6 +2267,25 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
 
 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
 {
+       if (amdgpu_sriov_vf(adev)) {
+               switch (adev->ip_versions[MP0_HWIP][0]) {
+               case IP_VERSION(13, 0, 2):
+                       return true;
+               default:
+                       return false;
+               }
+       }
+
+       if (adev->asic_type == CHIP_IP_DISCOVERY) {
+               switch (adev->ip_versions[MP0_HWIP][0]) {
+               case IP_VERSION(13, 0, 0):
+               case IP_VERSION(13, 0, 10):
+                       return true;
+               default:
+                       return false;
+               }
+       }
+
        return adev->asic_type == CHIP_VEGA10 ||
                adev->asic_type == CHIP_VEGA20 ||
                adev->asic_type == CHIP_ARCTURUS ||
@@ -2311,11 +2329,6 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
            !amdgpu_ras_asic_supported(adev))
                return;
 
-       /* If driver run on sriov guest side, only enable ras for aldebaran */
-       if (amdgpu_sriov_vf(adev) &&
-               adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 2))
-               return;
-
        if (!adev->gmc.xgmi.connected_to_cpu) {
                if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
                        dev_info(adev->dev, "MEM ECC is active.\n");
index 9da5ead..f778466 100644 (file)
@@ -37,8 +37,6 @@ int amdgpu_reset_init(struct amdgpu_device *adev)
 {
        int ret = 0;
 
-       adev->amdgpu_reset_level_mask = 0x1;
-
        switch (adev->ip_versions[MP1_HWIP][0]) {
        case IP_VERSION(13, 0, 2):
                ret = aldebaran_reset_init(adev);
@@ -76,12 +74,6 @@ int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev,
 {
        struct amdgpu_reset_handler *reset_handler = NULL;
 
-       if (!(adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_MODE2))
-               return -ENOSYS;
-
-       if (test_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags))
-               return -ENOSYS;
-
        if (adev->reset_cntl && adev->reset_cntl->get_reset_handler)
                reset_handler = adev->reset_cntl->get_reset_handler(
                        adev->reset_cntl, reset_context);
@@ -98,12 +90,6 @@ int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
        int ret;
        struct amdgpu_reset_handler *reset_handler = NULL;
 
-       if (!(adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_MODE2))
-               return -ENOSYS;
-
-       if (test_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags))
-               return -ENOSYS;
-
        if (adev->reset_cntl)
                reset_handler = adev->reset_cntl->get_reset_handler(
                        adev->reset_cntl, reset_context);
index f5318fe..f4a501f 100644 (file)
@@ -30,8 +30,7 @@ enum AMDGPU_RESET_FLAGS {
 
        AMDGPU_NEED_FULL_RESET = 0,
        AMDGPU_SKIP_HW_RESET = 1,
-       AMDGPU_SKIP_MODE2_RESET = 2,
-       AMDGPU_RESET_FOR_DEVICE_REMOVE = 3,
+       AMDGPU_RESET_FOR_DEVICE_REMOVE = 2,
 };
 
 struct amdgpu_reset_context {
index 3e316b0..d3558c3 100644 (file)
@@ -405,9 +405,6 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
 {
        ktime_t deadline = ktime_add_us(ktime_get(), 10000);
 
-       if (!(ring->adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_SOFT_RECOVERY))
-               return false;
-
        if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
                return false;
 
index dc262d2..57277b1 100644 (file)
@@ -439,6 +439,9 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev,
        while (cursor.remaining) {
                amdgpu_res_next(&cursor, cursor.size);
 
+               if (!cursor.remaining)
+                       break;
+
                /* ttm_resource_ioremap only supports contiguous memory */
                if (end != cursor.start)
                        return false;
index dd0bc64..5cb62e6 100644 (file)
@@ -698,6 +698,7 @@ FW_VERSION_ATTR(rlc_srlg_fw_version, 0444, gfx.rlc_srlg_fw_version);
 FW_VERSION_ATTR(rlc_srls_fw_version, 0444, gfx.rlc_srls_fw_version);
 FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version);
 FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version);
+FW_VERSION_ATTR(imu_fw_version, 0444, gfx.imu_fw_version);
 FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos.fw_version);
 FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_context.bin_desc.fw_version);
 FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras_context.context.bin_desc.fw_version);
@@ -719,7 +720,8 @@ static struct attribute *fw_attrs[] = {
        &dev_attr_ta_ras_fw_version.attr, &dev_attr_ta_xgmi_fw_version.attr,
        &dev_attr_smc_fw_version.attr, &dev_attr_sdma_fw_version.attr,
        &dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr,
-       &dev_attr_dmcu_fw_version.attr, NULL
+       &dev_attr_dmcu_fw_version.attr, &dev_attr_imu_fw_version.attr,
+       NULL
 };
 
 static const struct attribute_group fw_attr_group = {
index e4af40b..c73abe5 100644 (file)
@@ -547,6 +547,7 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
        POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
        POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC,      adev->gfx.mec_fw_version);
        POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2,     adev->gfx.mec2_fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_IMU,      adev->gfx.imu_fw_version);
        POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS,      adev->psp.sos.fw_version);
        POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,
                            adev->psp.asd_context.bin_desc.fw_version);
@@ -726,6 +727,12 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
                        adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
        }
 
+       if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
+               /* VF MMIO access (except mailbox range) from CPU
+                * will be blocked during sriov runtime
+                */
+               adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
+
        /* we have the ability to check now */
        if (amdgpu_sriov_vf(adev)) {
                switch (adev->asic_type) {
index d94c31e..49c4347 100644 (file)
@@ -31,6 +31,7 @@
 #define AMDGPU_SRIOV_CAPS_IS_VF        (1 << 2) /* this GPU is a virtual function */
 #define AMDGPU_PASSTHROUGH_MODE        (1 << 3) /* thw whole GPU is pass through for VM */
 #define AMDGPU_SRIOV_CAPS_RUNTIME      (1 << 4) /* is out of full access mode */
+#define AMDGPU_VF_MMIO_ACCESS_PROTECT  (1 << 5) /* MMIO write access is not allowed in sriov runtime */
 
 /* flags for indirect register access path supported by rlcg for sriov */
 #define AMDGPU_RLCG_GC_WRITE_LEGACY    (0x8 << 28)
@@ -297,6 +298,9 @@ struct amdgpu_video_codec_info;
 #define amdgpu_passthrough(adev) \
 ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
 
+#define amdgpu_sriov_vf_mmio_access_protection(adev) \
+((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT)
+
 static inline bool is_virtual_machine(void)
 {
 #if defined(CONFIG_X86)
index 83b0c5d..2291aa1 100644 (file)
@@ -2338,7 +2338,11 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
         */
 #ifdef CONFIG_X86_64
        if (amdgpu_vm_update_mode == -1) {
-               if (amdgpu_gmc_vram_full_visible(&adev->gmc))
+               /* For asic with VF MMIO access protection
+                * avoid using CPU for VM table updates
+                */
+               if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
+                   !amdgpu_sriov_vf_mmio_access_protection(adev))
                        adev->vm_manager.vm_update_mode =
                                AMDGPU_VM_USE_CPU_FOR_COMPUTE;
                else
index 2b0669c..69e105f 100644 (file)
@@ -116,8 +116,15 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
                                   DMA_RESV_USAGE_BOOKKEEP);
        }
 
-       if (fence && !p->immediate)
+       if (fence && !p->immediate) {
+               /*
+                * Most hw generations now have a separate queue for page table
+                * updates, but when the queue is shared with userspace we need
+                * the extra CPU round trip to correctly flush the TLB.
+                */
+               set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &f->flags);
                swap(*fence, f);
+       }
        dma_fence_put(f);
        return 0;
 
index e78e4c2..6c97148 100644 (file)
@@ -70,6 +70,7 @@ enum amd_sriov_ucode_engine_id {
        AMD_SRIOV_UCODE_ID_RLC_SRLS,
        AMD_SRIOV_UCODE_ID_MEC,
        AMD_SRIOV_UCODE_ID_MEC2,
+       AMD_SRIOV_UCODE_ID_IMU,
        AMD_SRIOV_UCODE_ID_SOS,
        AMD_SRIOV_UCODE_ID_ASD,
        AMD_SRIOV_UCODE_ID_TA_RAS,
index 2511097..0fecc5b 100644 (file)
@@ -1571,7 +1571,7 @@ static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev)
                WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases);
 
                /* Enable trap for each kfd vmid. */
-               data = RREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_PER_VMID_CNTL));
+               data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL);
                data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
        }
        soc21_grbm_select(adev, 0, 0, 0, 0);
@@ -5051,6 +5051,7 @@ static int gfx_v11_0_set_powergating_state(void *handle,
        switch (adev->ip_versions[GC_HWIP][0]) {
        case IP_VERSION(11, 0, 0):
        case IP_VERSION(11, 0, 2):
+       case IP_VERSION(11, 0, 3):
                amdgpu_gfx_off_ctrl(adev, enable);
                break;
        case IP_VERSION(11, 0, 1):
@@ -5076,6 +5077,7 @@ static int gfx_v11_0_set_clockgating_state(void *handle,
        case IP_VERSION(11, 0, 0):
        case IP_VERSION(11, 0, 1):
        case IP_VERSION(11, 0, 2):
+       case IP_VERSION(11, 0, 3):
                gfx_v11_0_update_gfx_clock_gating(adev,
                                state ==  AMD_CG_STATE_GATE);
                break;
index 846ccb6..66dfb57 100644 (file)
@@ -186,6 +186,10 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
        /* Use register 17 for GART */
        const unsigned eng = 17;
        unsigned int i;
+       unsigned char hub_ip = 0;
+
+       hub_ip = (vmhub == AMDGPU_GFXHUB_0) ?
+                  GC_HWIP : MMHUB_HWIP;
 
        spin_lock(&adev->gmc.invalidate_lock);
        /*
@@ -199,8 +203,8 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
        if (use_semaphore) {
                for (i = 0; i < adev->usec_timeout; i++) {
                        /* a read return value of 1 means semaphore acuqire */
-                       tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
-                                           hub->eng_distance * eng);
+                       tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
+                                           hub->eng_distance * eng, hub_ip);
                        if (tmp & 0x1)
                                break;
                        udelay(1);
@@ -210,12 +214,12 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
                        DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
        }
 
-       WREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
+       WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req, hub_ip);
 
        /* Wait for ACK with a delay.*/
        for (i = 0; i < adev->usec_timeout; i++) {
-               tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
-                                   hub->eng_distance * eng);
+               tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
+                                   hub->eng_distance * eng, hub_ip);
                tmp &= 1 << vmid;
                if (tmp)
                        break;
@@ -229,8 +233,8 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
                 * add semaphore release after invalidation,
                 * write with 0 means semaphore release
                 */
-               WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
-                             hub->eng_distance * eng, 0);
+               WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
+                             hub->eng_distance * eng, 0, hub_ip);
 
        /* Issue additional private vm invalidation to MMHUB */
        if ((vmhub != AMDGPU_GFXHUB_0) &&
index 5cec6b2..f141fad 100644 (file)
@@ -98,7 +98,14 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
        struct amdgpu_device *adev = mes->adev;
        struct amdgpu_ring *ring = &mes->ring;
        unsigned long flags;
+       signed long timeout = adev->usec_timeout;
 
+       if (amdgpu_emu_mode) {
+               timeout *= 100;
+       } else if (amdgpu_sriov_vf(adev)) {
+               /* Worst case in sriov where all other 15 VF timeout, each VF needs about 600ms */
+               timeout = 15 * 600 * 1000;
+       }
        BUG_ON(size % 4 != 0);
 
        spin_lock_irqsave(&mes->ring_lock, flags);
@@ -118,7 +125,7 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
        DRM_DEBUG("MES msg=%d was emitted\n", x_pkt->header.opcode);
 
        r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq,
-                     adev->usec_timeout * (amdgpu_emu_mode ? 100 : 1));
+                     timeout);
        if (r < 1) {
                DRM_ERROR("MES failed to response msg=%d\n",
                          x_pkt->header.opcode);
@@ -1156,6 +1163,42 @@ static int mes_v11_0_sw_fini(void *handle)
        return 0;
 }
 
+static void mes_v11_0_kiq_dequeue_sched(struct amdgpu_device *adev)
+{
+       uint32_t data;
+       int i;
+
+       mutex_lock(&adev->srbm_mutex);
+       soc21_grbm_select(adev, 3, AMDGPU_MES_SCHED_PIPE, 0, 0);
+
+       /* disable the queue if it's active */
+       if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) {
+               WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1);
+               for (i = 0; i < adev->usec_timeout; i++) {
+                       if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
+                               break;
+                       udelay(1);
+               }
+       }
+       data = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
+       data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
+                               DOORBELL_EN, 0);
+       data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
+                               DOORBELL_HIT, 1);
+       WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, data);
+
+       WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 0);
+
+       WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 0);
+       WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 0);
+       WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 0);
+
+       soc21_grbm_select(adev, 0, 0, 0, 0);
+       mutex_unlock(&adev->srbm_mutex);
+
+       adev->mes.ring.sched.ready = false;
+}
+
 static void mes_v11_0_kiq_setting(struct amdgpu_ring *ring)
 {
        uint32_t tmp;
@@ -1207,6 +1250,9 @@ failure:
 
 static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev)
 {
+       if (adev->mes.ring.sched.ready)
+               mes_v11_0_kiq_dequeue_sched(adev);
+
        mes_v11_0_enable(adev, false);
        return 0;
 }
@@ -1262,9 +1308,6 @@ failure:
 
 static int mes_v11_0_hw_fini(void *handle)
 {
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       adev->mes.ring.sched.ready = false;
        return 0;
 }
 
@@ -1296,7 +1339,8 @@ static int mes_v11_0_late_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (!amdgpu_in_reset(adev))
+       if (!amdgpu_in_reset(adev) &&
+           (adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)))
                amdgpu_mes_self_test(adev);
 
        return 0;
index 4d304f2..998b5d1 100644 (file)
@@ -32,8 +32,6 @@
 #include "gc/gc_10_1_0_offset.h"
 #include "soc15_common.h"
 
-#define mmMM_ATC_L2_MISC_CG_Sienna_Cichlid                      0x064d
-#define mmMM_ATC_L2_MISC_CG_Sienna_Cichlid_BASE_IDX             0
 #define mmDAGB0_CNTL_MISC2_Sienna_Cichlid                       0x0070
 #define mmDAGB0_CNTL_MISC2_Sienna_Cichlid_BASE_IDX              0
 
@@ -574,7 +572,6 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
        case IP_VERSION(2, 1, 0):
        case IP_VERSION(2, 1, 1):
        case IP_VERSION(2, 1, 2):
-               def  = data  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid);
                def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid);
                break;
        default:
@@ -608,8 +605,6 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
        case IP_VERSION(2, 1, 0):
        case IP_VERSION(2, 1, 1):
        case IP_VERSION(2, 1, 2):
-               if (def != data)
-                       WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid, data);
                if (def1 != data1)
                        WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid, data1);
                break;
@@ -634,8 +629,8 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
        case IP_VERSION(2, 1, 0):
        case IP_VERSION(2, 1, 1):
        case IP_VERSION(2, 1, 2):
-               def  = data  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid);
-               break;
+               /* There is no ATCL2 in MMHUB for 2.1.x */
+               return;
        default:
                def  = data  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
                break;
@@ -646,18 +641,8 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
        else
                data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
 
-       if (def != data) {
-               switch (adev->ip_versions[MMHUB_HWIP][0]) {
-               case IP_VERSION(2, 1, 0):
-               case IP_VERSION(2, 1, 1):
-               case IP_VERSION(2, 1, 2):
-                       WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid, data);
-                       break;
-               default:
-                       WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
-                       break;
-               }
-       }
+       if (def != data)
+               WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
 }
 
 static int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
@@ -695,7 +680,10 @@ static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
        case IP_VERSION(2, 1, 0):
        case IP_VERSION(2, 1, 1):
        case IP_VERSION(2, 1, 2):
-               data  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid);
+               /* There is no ATCL2 in MMHUB for 2.1.x. Keep the status
+                * based on DAGB
+                */
+               data = MM_ATC_L2_MISC_CG__ENABLE_MASK;
                data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid);
                break;
        default:
index a2f04b2..12906ba 100644 (file)
@@ -290,7 +290,6 @@ flr_done:
                reset_context.method = AMD_RESET_METHOD_NONE;
                reset_context.reset_req_dev = adev;
                clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
-               clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
 
                amdgpu_device_gpu_recover(adev, NULL, &reset_context);
        }
index a977f00..e07757e 100644 (file)
@@ -317,7 +317,6 @@ flr_done:
                reset_context.method = AMD_RESET_METHOD_NONE;
                reset_context.reset_req_dev = adev;
                clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
-               clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
 
                amdgpu_device_gpu_recover(adev, NULL, &reset_context);
        }
index fd14fa9..288c414 100644 (file)
@@ -529,7 +529,6 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
                reset_context.method = AMD_RESET_METHOD_NONE;
                reset_context.reset_req_dev = adev;
                clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
-               clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
 
                amdgpu_device_gpu_recover(adev, NULL, &reset_context);
        }
index 298fa11..1122bd4 100644 (file)
@@ -1417,11 +1417,6 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
                WREG32_SDMA(i, mmSDMA0_CNTL, temp);
 
                if (!amdgpu_sriov_vf(adev)) {
-                       ring = &adev->sdma.instance[i].ring;
-                       adev->nbio.funcs->sdma_doorbell_range(adev, i,
-                               ring->use_doorbell, ring->doorbell_index,
-                               adev->doorbell_index.sdma_doorbell_range);
-
                        /* unhalt engine */
                        temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
                        temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
index 7aa570c..81a6d5b 100644 (file)
 #include "amdgpu_psp.h"
 #include "amdgpu_xgmi.h"
 
+static bool sienna_cichlid_is_mode2_default(struct amdgpu_reset_control *reset_ctl)
+{
+#if 0
+       struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+       if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7) &&
+           adev->pm.fw_version >= 0x3a5500 && !amdgpu_sriov_vf(adev))
+               return true;
+#endif
+       return false;
+}
+
 static struct amdgpu_reset_handler *
 sienna_cichlid_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
                            struct amdgpu_reset_context *reset_context)
 {
        struct amdgpu_reset_handler *handler;
-       struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
 
        if (reset_context->method != AMD_RESET_METHOD_NONE) {
                list_for_each_entry(handler, &reset_ctl->reset_handlers,
@@ -44,15 +55,13 @@ sienna_cichlid_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
                        if (handler->reset_method == reset_context->method)
                                return handler;
                }
-       } else {
-               list_for_each_entry(handler, &reset_ctl->reset_handlers,
+       }
+
+       if (sienna_cichlid_is_mode2_default(reset_ctl)) {
+               list_for_each_entry (handler, &reset_ctl->reset_handlers,
                                     handler_list) {
-                       if (handler->reset_method == AMD_RESET_METHOD_MODE2 &&
-                           adev->pm.fw_version >= 0x3a5500 &&
-                           !amdgpu_sriov_vf(adev)) {
-                               reset_context->method = AMD_RESET_METHOD_MODE2;
+                       if (handler->reset_method == AMD_RESET_METHOD_MODE2)
                                return handler;
-                       }
                }
        }
 
index 183024d..e3b2b6b 100644 (file)
@@ -1211,6 +1211,20 @@ static int soc15_common_sw_fini(void *handle)
        return 0;
 }
 
+static void soc15_sdma_doorbell_range_init(struct amdgpu_device *adev)
+{
+       int i;
+
+       /* sdma doorbell range is programed by hypervisor */
+       if (!amdgpu_sriov_vf(adev)) {
+               for (i = 0; i < adev->sdma.num_instances; i++) {
+                       adev->nbio.funcs->sdma_doorbell_range(adev, i,
+                               true, adev->doorbell_index.sdma_engine[i] << 1,
+                               adev->doorbell_index.sdma_doorbell_range);
+               }
+       }
+}
+
 static int soc15_common_hw_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1230,6 +1244,13 @@ static int soc15_common_hw_init(void *handle)
 
        /* enable the doorbell aperture */
        soc15_enable_doorbell_aperture(adev, true);
+       /* HW doorbell routing policy: doorbell writing not
+        * in SDMA/IH/MM/ACV range will be routed to CP. So
+        * we need to init SDMA doorbell range prior
+        * to CP ip block init and ring test.  IH already
+        * happens before CP.
+        */
+       soc15_sdma_doorbell_range_init(adev);
 
        return 0;
 }
index 795706b..e080440 100644 (file)
@@ -423,6 +423,7 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
        case IP_VERSION(11, 0, 0):
                return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
        case IP_VERSION(11, 0, 2):
+       case IP_VERSION(11, 0, 3):
                return false;
        default:
                return true;
@@ -636,7 +637,11 @@ static int soc21_common_early_init(void *handle)
                break;
        case IP_VERSION(11, 0, 3):
                adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG |
-                       AMD_CG_SUPPORT_JPEG_MGCG;
+                       AMD_CG_SUPPORT_JPEG_MGCG |
+                       AMD_CG_SUPPORT_GFX_CGCG |
+                       AMD_CG_SUPPORT_GFX_CGLS |
+                       AMD_CG_SUPPORT_REPEATER_FGCG |
+                       AMD_CG_SUPPORT_GFX_MGCG;
                adev->pg_flags = AMD_PG_SUPPORT_VCN |
                        AMD_PG_SUPPORT_VCN_DPG |
                        AMD_PG_SUPPORT_JPEG;
index cd5f8b2..8bfdfd0 100644 (file)
@@ -795,6 +795,102 @@ static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
        },
 };
 
+static struct kfd_gpu_cache_info gfx1037_cache_info[] = {
+       {
+               /* TCP L1 Cache per CU */
+               .cache_size = 16,
+               .cache_level = 1,
+               .flags = (CRAT_CACHE_FLAGS_ENABLED |
+                               CRAT_CACHE_FLAGS_DATA_CACHE |
+                               CRAT_CACHE_FLAGS_SIMD_CACHE),
+               .num_cu_shared = 1,
+       },
+       {
+               /* Scalar L1 Instruction Cache per SQC */
+               .cache_size = 32,
+               .cache_level = 1,
+               .flags = (CRAT_CACHE_FLAGS_ENABLED |
+                               CRAT_CACHE_FLAGS_INST_CACHE |
+                               CRAT_CACHE_FLAGS_SIMD_CACHE),
+               .num_cu_shared = 2,
+       },
+       {
+               /* Scalar L1 Data Cache per SQC */
+               .cache_size = 16,
+               .cache_level = 1,
+               .flags = (CRAT_CACHE_FLAGS_ENABLED |
+                               CRAT_CACHE_FLAGS_DATA_CACHE |
+                               CRAT_CACHE_FLAGS_SIMD_CACHE),
+               .num_cu_shared = 2,
+       },
+       {
+               /* GL1 Data Cache per SA */
+               .cache_size = 128,
+               .cache_level = 1,
+               .flags = (CRAT_CACHE_FLAGS_ENABLED |
+                               CRAT_CACHE_FLAGS_DATA_CACHE |
+                               CRAT_CACHE_FLAGS_SIMD_CACHE),
+               .num_cu_shared = 2,
+       },
+       {
+               /* L2 Data Cache per GPU (Total Tex Cache) */
+               .cache_size = 256,
+               .cache_level = 2,
+               .flags = (CRAT_CACHE_FLAGS_ENABLED |
+                               CRAT_CACHE_FLAGS_DATA_CACHE |
+                               CRAT_CACHE_FLAGS_SIMD_CACHE),
+               .num_cu_shared = 2,
+       },
+};
+
+static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = {
+       {
+               /* TCP L1 Cache per CU */
+               .cache_size = 16,
+               .cache_level = 1,
+               .flags = (CRAT_CACHE_FLAGS_ENABLED |
+                         CRAT_CACHE_FLAGS_DATA_CACHE |
+                         CRAT_CACHE_FLAGS_SIMD_CACHE),
+               .num_cu_shared = 1,
+       },
+       {
+               /* Scalar L1 Instruction Cache per SQC */
+               .cache_size = 32,
+               .cache_level = 1,
+               .flags = (CRAT_CACHE_FLAGS_ENABLED |
+                         CRAT_CACHE_FLAGS_INST_CACHE |
+                         CRAT_CACHE_FLAGS_SIMD_CACHE),
+               .num_cu_shared = 2,
+       },
+       {
+               /* Scalar L1 Data Cache per SQC */
+               .cache_size = 16,
+               .cache_level = 1,
+               .flags = (CRAT_CACHE_FLAGS_ENABLED |
+                         CRAT_CACHE_FLAGS_DATA_CACHE |
+                         CRAT_CACHE_FLAGS_SIMD_CACHE),
+               .num_cu_shared = 2,
+       },
+       {
+               /* GL1 Data Cache per SA */
+               .cache_size = 128,
+               .cache_level = 1,
+               .flags = (CRAT_CACHE_FLAGS_ENABLED |
+                         CRAT_CACHE_FLAGS_DATA_CACHE |
+                         CRAT_CACHE_FLAGS_SIMD_CACHE),
+               .num_cu_shared = 2,
+       },
+       {
+               /* L2 Data Cache per GPU (Total Tex Cache) */
+               .cache_size = 256,
+               .cache_level = 2,
+               .flags = (CRAT_CACHE_FLAGS_ENABLED |
+                         CRAT_CACHE_FLAGS_DATA_CACHE |
+                         CRAT_CACHE_FLAGS_SIMD_CACHE),
+               .num_cu_shared = 2,
+       },
+};
+
 static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
                struct crat_subtype_computeunit *cu)
 {
@@ -1514,11 +1610,17 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
                        num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info);
                        break;
                case IP_VERSION(10, 3, 3):
-               case IP_VERSION(10, 3, 6): /* TODO: Double check these on production silicon */
-               case IP_VERSION(10, 3, 7): /* TODO: Double check these on production silicon */
                        pcache_info = yellow_carp_cache_info;
                        num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info);
                        break;
+               case IP_VERSION(10, 3, 6):
+                       pcache_info = gc_10_3_6_cache_info;
+                       num_of_cache_types = ARRAY_SIZE(gc_10_3_6_cache_info);
+                       break;
+               case IP_VERSION(10, 3, 7):
+                       pcache_info = gfx1037_cache_info;
+                       num_of_cache_types = ARRAY_SIZE(gfx1037_cache_info);
+                       break;
                case IP_VERSION(11, 0, 0):
                case IP_VERSION(11, 0, 1):
                case IP_VERSION(11, 0, 2):
index dfd3be4..e6854f7 100644 (file)
@@ -1369,7 +1369,7 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
 {
        struct amdgpu_device *adev = drm_to_adev(plane->dev);
        const struct drm_format_info *info = drm_format_info(format);
-       struct hw_asic_id asic_id = adev->dm.dc->ctx->asic_id;
+       int i;
 
        enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
 
@@ -1386,49 +1386,13 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
                return true;
        }
 
-       /* check if swizzle mode is supported by this version of DCN */
-       switch (asic_id.chip_family) {
-       case FAMILY_SI:
-       case FAMILY_CI:
-       case FAMILY_KV:
-       case FAMILY_CZ:
-       case FAMILY_VI:
-               /* asics before AI does not have modifier support */
-               return false;
-       case FAMILY_AI:
-       case FAMILY_RV:
-       case FAMILY_NV:
-       case FAMILY_VGH:
-       case FAMILY_YELLOW_CARP:
-       case AMDGPU_FAMILY_GC_10_3_6:
-       case AMDGPU_FAMILY_GC_10_3_7:
-               switch (AMD_FMT_MOD_GET(TILE, modifier)) {
-               case AMD_FMT_MOD_TILE_GFX9_64K_R_X:
-               case AMD_FMT_MOD_TILE_GFX9_64K_D_X:
-               case AMD_FMT_MOD_TILE_GFX9_64K_S_X:
-               case AMD_FMT_MOD_TILE_GFX9_64K_D:
-                       return true;
-               default:
-                       return false;
-               }
-               break;
-       case AMDGPU_FAMILY_GC_11_0_0:
-       case AMDGPU_FAMILY_GC_11_0_1:
-               switch (AMD_FMT_MOD_GET(TILE, modifier)) {
-               case AMD_FMT_MOD_TILE_GFX11_256K_R_X:
-               case AMD_FMT_MOD_TILE_GFX9_64K_R_X:
-               case AMD_FMT_MOD_TILE_GFX9_64K_D_X:
-               case AMD_FMT_MOD_TILE_GFX9_64K_S_X:
-               case AMD_FMT_MOD_TILE_GFX9_64K_D:
-                       return true;
-               default:
-                       return false;
-               }
-               break;
-       default:
-               ASSERT(0); /* Unknown asic */
-               break;
+       /* Check that the modifier is on the list of the plane's supported modifiers. */
+       for (i = 0; i < plane->modifier_count; i++) {
+               if (modifier == plane->modifiers[i])
+                       break;
        }
+       if (i == plane->modifier_count)
+               return false;
 
        /*
         * For D swizzle the canonical modifier depends on the bpp, so check
index d732b6f..a7e0001 100644 (file)
@@ -1270,16 +1270,6 @@ void dcn20_pipe_control_lock(
                                        lock,
                                        &hw_locks,
                                        &inst_flags);
-       } else if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
-               union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
-               hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
-               hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER;
-               hw_lock_cmd.bits.lock_pipe = 1;
-               hw_lock_cmd.bits.otg_inst = pipe->stream_res.tg->inst;
-               hw_lock_cmd.bits.lock = lock;
-               if (!lock)
-                       hw_lock_cmd.bits.should_release = 1;
-               dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
        } else if (pipe->plane_state != NULL && pipe->plane_state->triplebuffer_flips) {
                if (lock)
                        pipe->stream_res.tg->funcs->triplebuffer_lock(pipe->stream_res.tg);
@@ -1856,7 +1846,7 @@ void dcn20_post_unlock_program_front_end(
 
                        for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000
                                        && hubp->funcs->hubp_is_flip_pending(hubp); j++)
-                               mdelay(1);
+                               udelay(1);
                }
        }
 
index d51d0c4..b03a781 100644 (file)
@@ -200,7 +200,7 @@ bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc,
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 
                if (!pipe->stream)
-                       return false;
+                       continue;
 
                if (!pipe->plane_state)
                        return false;
index d70838e..ca7d240 100644 (file)
@@ -77,7 +77,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_32.o := $(dml_ccflags) $(frame_warn_flag)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_rq_dlg_calc_32.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_ccflags) $(frame_warn_flag)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn321/dcn321_fpu.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/dcn31_fpu.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)
index e85364d..5cb3e86 100644 (file)
@@ -262,8 +262,9 @@ struct kfd2kgd_calls {
                                uint32_t queue_id);
 
        int (*hqd_destroy)(struct amdgpu_device *adev, void *mqd,
-                               uint32_t reset_type, unsigned int timeout,
-                               uint32_t pipe_id, uint32_t queue_id);
+                               enum kfd_preempt_type reset_type,
+                               unsigned int timeout, uint32_t pipe_id,
+                               uint32_t queue_id);
 
        bool (*hqd_sdma_is_occupied)(struct amdgpu_device *adev, void *mqd);
 
index 948cc75..236657e 100644 (file)
@@ -3362,11 +3362,11 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
        if (adev->pm.sysfs_initialized)
                return 0;
 
+       INIT_LIST_HEAD(&adev->pm.pm_attr_list);
+
        if (adev->pm.dpm_enabled == 0)
                return 0;
 
-       INIT_LIST_HEAD(&adev->pm.pm_attr_list);
-
        adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
                                                                   DRIVER_NAME, adev,
                                                                   hwmon_groups);
index 190af79..dad3e37 100644 (file)
@@ -67,21 +67,22 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
 int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
                uint32_t *speed)
 {
-       struct amdgpu_device *adev = hwmgr->adev;
-       uint32_t duty100, duty;
-       uint64_t tmp64;
+       uint32_t current_rpm;
+       uint32_t percent = 0;
 
-       duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
-                               CG_FDO_CTRL1, FMAX_DUTY100);
-       duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS),
-                               CG_THERMAL_STATUS, FDO_PWM_DUTY);
+       if (hwmgr->thermal_controller.fanInfo.bNoFan)
+               return 0;
 
-       if (!duty100)
-               return -EINVAL;
+       if (vega10_get_current_rpm(hwmgr, &current_rpm))
+               return -1;
+
+       if (hwmgr->thermal_controller.
+                       advanceFanControlParameters.usMaxFanRPM != 0)
+               percent = current_rpm * 255 /
+                       hwmgr->thermal_controller.
+                       advanceFanControlParameters.usMaxFanRPM;
 
-       tmp64 = (uint64_t)duty * 255;
-       do_div(tmp64, duty100);
-       *speed = MIN((uint32_t)tmp64, 255);
+       *speed = MIN(percent, 255);
 
        return 0;
 }
index 13c5c7f..4fe75dd 100644 (file)
@@ -1314,8 +1314,8 @@ static int smu_smc_hw_setup(struct smu_context *smu)
 
        ret = smu_enable_thermal_alert(smu);
        if (ret) {
-               dev_err(adev->dev, "Failed to enable thermal alert!\n");
-               return ret;
+         dev_err(adev->dev, "Failed to enable thermal alert!\n");
+         return ret;
        }
 
        ret = smu_notify_display_change(smu);
index 063f4a7..b76f0f7 100644 (file)
@@ -25,7 +25,7 @@
 #define SMU13_DRIVER_IF_V13_0_0_H
 
 //Increment this version if SkuTable_t or BoardTable_t change
-#define PPTABLE_VERSION 0x24
+#define PPTABLE_VERSION 0x26
 
 #define NUM_GFXCLK_DPM_LEVELS    16
 #define NUM_SOCCLK_DPM_LEVELS    8
 #define FEATURE_SPARE_63_BIT                  63
 #define NUM_FEATURES                          64
 
+#define ALLOWED_FEATURE_CTRL_DEFAULT 0xFFFFFFFFFFFFFFFFULL
+#define ALLOWED_FEATURE_CTRL_SCPM      ((1 << FEATURE_DPM_GFXCLK_BIT) | \
+                                                                       (1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
+                                                                       (1 << FEATURE_DPM_UCLK_BIT) | \
+                                                                       (1 << FEATURE_DPM_FCLK_BIT) | \
+                                                                       (1 << FEATURE_DPM_SOCCLK_BIT) | \
+                                                                       (1 << FEATURE_DPM_MP0CLK_BIT) | \
+                                                                       (1 << FEATURE_DPM_LINK_BIT) | \
+                                                                       (1 << FEATURE_DPM_DCN_BIT) | \
+                                                                       (1 << FEATURE_DS_GFXCLK_BIT) | \
+                                                                       (1 << FEATURE_DS_SOCCLK_BIT) | \
+                                                                       (1 << FEATURE_DS_FCLK_BIT) | \
+                                                                       (1 << FEATURE_DS_LCLK_BIT) | \
+                                                                       (1 << FEATURE_DS_DCFCLK_BIT) | \
+                                                                       (1 << FEATURE_DS_UCLK_BIT))
+
 //For use with feature control messages
 typedef enum {
   FEATURE_PWR_ALL,
@@ -133,6 +149,7 @@ typedef enum {
 #define DEBUG_OVERRIDE_DISABLE_DFLL                    0x00000200
 #define DEBUG_OVERRIDE_ENABLE_RLC_VF_BRINGUP_MODE      0x00000400
 #define DEBUG_OVERRIDE_DFLL_MASTER_MODE                0x00000800
+#define DEBUG_OVERRIDE_ENABLE_PROFILING_MODE           0x00001000
 
 // VR Mapping Bit Defines
 #define VR_MAPPING_VR_SELECT_MASK  0x01
@@ -262,15 +279,15 @@ typedef enum {
 } I2cControllerPort_e;
 
 typedef enum {
-  I2C_CONTROLLER_NAME_VR_GFX = 0,
-  I2C_CONTROLLER_NAME_VR_SOC,
-  I2C_CONTROLLER_NAME_VR_VMEMP,
-  I2C_CONTROLLER_NAME_VR_VDDIO,
-  I2C_CONTROLLER_NAME_LIQUID0,
-  I2C_CONTROLLER_NAME_LIQUID1,
-  I2C_CONTROLLER_NAME_PLX,
-  I2C_CONTROLLER_NAME_OTHER,
-  I2C_CONTROLLER_NAME_COUNT,
+       I2C_CONTROLLER_NAME_VR_GFX = 0,
+       I2C_CONTROLLER_NAME_VR_SOC,
+       I2C_CONTROLLER_NAME_VR_VMEMP,
+       I2C_CONTROLLER_NAME_VR_VDDIO,
+       I2C_CONTROLLER_NAME_LIQUID0,
+       I2C_CONTROLLER_NAME_LIQUID1,
+       I2C_CONTROLLER_NAME_PLX,
+       I2C_CONTROLLER_NAME_FAN_INTAKE,
+       I2C_CONTROLLER_NAME_COUNT,
 } I2cControllerName_e;
 
 typedef enum {
@@ -282,16 +299,17 @@ typedef enum {
   I2C_CONTROLLER_THROTTLER_LIQUID0,
   I2C_CONTROLLER_THROTTLER_LIQUID1,
   I2C_CONTROLLER_THROTTLER_PLX,
+  I2C_CONTROLLER_THROTTLER_FAN_INTAKE,
   I2C_CONTROLLER_THROTTLER_INA3221,
   I2C_CONTROLLER_THROTTLER_COUNT,
 } I2cControllerThrottler_e;
 
 typedef enum {
-  I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5,
-  I2C_CONTROLLER_PROTOCOL_VR_IR35217,
-  I2C_CONTROLLER_PROTOCOL_TMP_TMP102A,
-  I2C_CONTROLLER_PROTOCOL_INA3221,
-  I2C_CONTROLLER_PROTOCOL_COUNT,
+       I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5,
+       I2C_CONTROLLER_PROTOCOL_VR_IR35217,
+       I2C_CONTROLLER_PROTOCOL_TMP_MAX31875,
+       I2C_CONTROLLER_PROTOCOL_INA3221,
+       I2C_CONTROLLER_PROTOCOL_COUNT,
 } I2cControllerProtocol_e;
 
 typedef struct {
@@ -658,13 +676,20 @@ typedef struct {
 
 #define PP_NUM_OD_VF_CURVE_POINTS PP_NUM_RTAVFS_PWL_ZONES + 1
 
+typedef enum {
+       FAN_MODE_AUTO = 0,
+       FAN_MODE_MANUAL_LINEAR,
+} FanMode_e;
 
 typedef struct {
   uint32_t FeatureCtrlMask;
 
   //Voltage control
   int16_t                VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS];
-  uint16_t               reserved[2];
+  uint16_t               VddGfxVmax;         // in mV
+
+  uint8_t                IdlePwrSavingFeaturesCtrl;
+  uint8_t                RuntimePwrSavingFeaturesCtrl;
 
   //Frequency changes
   int16_t                GfxclkFmin;           // MHz
@@ -674,7 +699,7 @@ typedef struct {
 
   //PPT
   int16_t                Ppt;         // %
-  int16_t                reserved1;
+  int16_t                Tdc;
 
   //Fan control
   uint8_t                FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS];
@@ -701,16 +726,19 @@ typedef struct {
   uint32_t FeatureCtrlMask;
 
   int16_t VoltageOffsetPerZoneBoundary;
-  uint16_t               reserved[2];
+  uint16_t               VddGfxVmax;         // in mV
+
+  uint8_t                IdlePwrSavingFeaturesCtrl;
+  uint8_t                RuntimePwrSavingFeaturesCtrl;
 
-  uint16_t               GfxclkFmin;           // MHz
-  uint16_t               GfxclkFmax;           // MHz
+  int16_t               GfxclkFmin;           // MHz
+  int16_t               GfxclkFmax;           // MHz
   uint16_t               UclkFmin;             // MHz
   uint16_t               UclkFmax;             // MHz
 
   //PPT
   int16_t                Ppt;         // %
-  int16_t                reserved1;
+  int16_t                Tdc;
 
   uint8_t                FanLinearPwmPoints;
   uint8_t                FanLinearTempPoints;
@@ -857,7 +885,8 @@ typedef struct {
   uint16_t  FanStartTempMin;
   uint16_t  FanStartTempMax;
 
-  uint32_t Spare[12];
+  uint16_t  PowerMinPpt0[POWER_SOURCE_COUNT];
+  uint32_t Spare[11];
 
 } MsgLimits_t;
 
@@ -1041,7 +1070,17 @@ typedef struct {
   uint32_t        GfxoffSpare[15];
 
   // GFX GPO
-  uint32_t        GfxGpoSpare[16];
+  uint32_t        DfllBtcMasterScalerM;
+  int32_t         DfllBtcMasterScalerB;
+  uint32_t        DfllBtcSlaveScalerM;
+  int32_t         DfllBtcSlaveScalerB;
+
+  uint32_t        DfllPccAsWaitCtrl; //GDFLL_AS_WAIT_CTRL_PCC register value to be passed to RLC msg
+  uint32_t        DfllPccAsStepCtrl; //GDFLL_AS_STEP_CTRL_PCC register value to be passed to RLC msg
+
+  uint32_t        DfllL2FrequencyBoostM; //Unitless (float)
+  uint32_t        DfllL2FrequencyBoostB; //In MHz (integer)
+  uint32_t        GfxGpoSpare[8];
 
   // GFX DCS
 
@@ -1114,12 +1153,14 @@ typedef struct {
   uint16_t IntakeTempHighIntakeAcousticLimit;
   uint16_t IntakeTempAcouticLimitReleaseRate;
 
-  uint16_t FanStalledTempLimitOffset;
+  int16_t FanAbnormalTempLimitOffset;
   uint16_t FanStalledTriggerRpm;
-  uint16_t FanAbnormalTriggerRpm;
-  uint16_t FanPadding;
+  uint16_t FanAbnormalTriggerRpmCoeff;
+  uint16_t FanAbnormalDetectionEnable;
 
-  uint32_t     FanSpare[14];
+  uint8_t      FanIntakeSensorSupport;
+  uint8_t      FanIntakePadding[3];
+  uint32_t     FanSpare[13];
 
   // SECTION: VDD_GFX AVFS
 
@@ -1198,8 +1239,13 @@ typedef struct {
   int16_t     TotalBoardPowerM;
   int16_t     TotalBoardPowerB;
 
+  //PMFW-11158
+  QuadraticInt_t qFeffCoeffGameClock[POWER_SOURCE_COUNT];
+  QuadraticInt_t qFeffCoeffBaseClock[POWER_SOURCE_COUNT];
+  QuadraticInt_t qFeffCoeffBoostClock[POWER_SOURCE_COUNT];
+
   // SECTION: Sku Reserved
-  uint32_t         Spare[61];
+  uint32_t         Spare[43];
 
   // Padding for MMHUB - do not modify this
   uint32_t     MmHubPadding[8];
@@ -1288,8 +1334,11 @@ typedef struct {
   uint32_t    PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued
   uint32_t    BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS
 
+  uint8_t     FuseWritePowerMuxPresent;
+  uint8_t     FuseWritePadding[3];
+
   // SECTION: Board Reserved
-  uint32_t     BoardSpare[64];
+  uint32_t     BoardSpare[63];
 
   // SECTION: Structure Padding
 
@@ -1381,7 +1430,7 @@ typedef struct {
   uint16_t AverageTotalBoardPower;
 
   uint16_t AvgTemperature[TEMP_COUNT];
-  uint16_t TempPadding;
+  uint16_t AvgTemperatureFanIntake;
 
   uint8_t  PcieRate               ;
   uint8_t  PcieWidth              ;
@@ -1550,5 +1599,7 @@ typedef struct {
 #define IH_INTERRUPT_CONTEXT_ID_AUDIO_D0            0x5
 #define IH_INTERRUPT_CONTEXT_ID_AUDIO_D3            0x6
 #define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING  0x7
+#define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL        0x8
+#define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY        0x9
 
 #endif
index ae2d337..f774017 100644 (file)
@@ -27,7 +27,7 @@
 // *** IMPORTANT ***
 // SMU TEAM: Always increment the interface version if
 // any structure is changed in this file
-#define PMFW_DRIVER_IF_VERSION 5
+#define PMFW_DRIVER_IF_VERSION 7
 
 typedef struct {
   int32_t value;
@@ -163,8 +163,8 @@ typedef struct {
   uint16_t DclkFrequency;               //[MHz]
   uint16_t MemclkFrequency;             //[MHz]
   uint16_t spare;                       //[centi]
-  uint16_t UvdActivity;                 //[centi]
   uint16_t GfxActivity;                 //[centi]
+  uint16_t UvdActivity;                 //[centi]
 
   uint16_t Voltage[2];                  //[mV] indices: VDDCR_VDD, VDDCR_SOC
   uint16_t Current[2];                  //[mA] indices: VDDCR_VDD, VDDCR_SOC
@@ -199,6 +199,19 @@ typedef struct {
   uint16_t DeviceState;
   uint16_t CurTemp;                     //[centi-Celsius]
   uint16_t spare2;
+
+  uint16_t AverageGfxclkFrequency;
+  uint16_t AverageFclkFrequency;
+  uint16_t AverageGfxActivity;
+  uint16_t AverageSocclkFrequency;
+  uint16_t AverageVclkFrequency;
+  uint16_t AverageVcnActivity;
+  uint16_t AverageDRAMReads;          //Filtered DF Bandwidth::DRAM Reads
+  uint16_t AverageDRAMWrites;         //Filtered DF Bandwidth::DRAM Writes
+  uint16_t AverageSocketPower;        //Filtered value of CurrentSocketPower
+  uint16_t AverageCorePower;          //Filtered of [sum of CorePower[8]])
+  uint16_t AverageCoreC0Residency[8]; //Filtered of [average C0 residency %  per core]
+  uint32_t MetricsCounter;            //Counts the # of metrics table parameter reads per update to the metrics table, i.e. if the metrics table update happens every 1 second, this value could be up to 1000 if the smu collected metrics data every cycle, or as low as 0 if the smu was asleep the whole time. Reset to 0 after writing.
 } SmuMetrics_t;
 
 typedef struct {
index 9d62ea2..80fb583 100644 (file)
@@ -28,9 +28,9 @@
 #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
 #define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
 #define SMU13_DRIVER_IF_VERSION_ALDE 0x08
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x05
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x30
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D
 
index 4450055..9cd0051 100644 (file)
@@ -2242,9 +2242,17 @@ static void arcturus_get_unique_id(struct smu_context *smu)
 static int arcturus_set_df_cstate(struct smu_context *smu,
                                  enum pp_df_cstate state)
 {
+       struct amdgpu_device *adev = smu->adev;
        uint32_t smu_version;
        int ret;
 
+       /*
+        * Arcturus does not need the cstate disablement
+        * prerequisite for gpu reset.
+        */
+       if (amdgpu_in_reset(adev) || adev->in_suspend)
+               return 0;
+
        ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
        if (ret) {
                dev_err(smu->adev->dev, "Failed to get smu version!\n");
index 619aee5..d30ec30 100644 (file)
@@ -1640,6 +1640,15 @@ static bool aldebaran_is_baco_supported(struct smu_context *smu)
 static int aldebaran_set_df_cstate(struct smu_context *smu,
                                   enum pp_df_cstate state)
 {
+       struct amdgpu_device *adev = smu->adev;
+
+       /*
+        * Aldebaran does not need the cstate disablement
+        * prerequisite for gpu reset.
+        */
+       if (amdgpu_in_reset(adev) || adev->in_suspend)
+               return 0;
+
        return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
 }
 
index 93fffdb..43fb102 100644 (file)
@@ -211,7 +211,8 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
                return 0;
 
        if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7)) ||
-           (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)))
+           (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) ||
+           (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)))
                return 0;
 
        /* override pptable_id from driver parameter */
@@ -288,7 +289,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
                smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE;
                break;
        case IP_VERSION(13, 0, 0):
-               smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_0;
+       case IP_VERSION(13, 0, 10):
+               smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10;
                break;
        case IP_VERSION(13, 0, 7):
                smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_7;
@@ -304,9 +306,6 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
        case IP_VERSION(13, 0, 5):
                smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_5;
                break;
-       case IP_VERSION(13, 0, 10):
-               smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_10;
-               break;
        default:
                dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
                        adev->ip_versions[MP1_HWIP][0]);
@@ -454,9 +453,6 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
                dev_info(adev->dev, "override pptable id %d\n", pptable_id);
        } else {
                pptable_id = smu->smu_table.boot_values.pp_table_id;
-
-               if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))
-                       pptable_id = 6666;
        }
 
        /* force using vbios pptable in sriov mode */
@@ -844,6 +840,7 @@ int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
        case IP_VERSION(13, 0, 5):
        case IP_VERSION(13, 0, 7):
        case IP_VERSION(13, 0, 8):
+       case IP_VERSION(13, 0, 10):
                if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
                        return 0;
                if (enable)
index 1d45448..2952932 100644 (file)
@@ -119,6 +119,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
        MSG_MAP(NotifyPowerSource,              PPSMC_MSG_NotifyPowerSource,           0),
        MSG_MAP(Mode1Reset,                     PPSMC_MSG_Mode1Reset,                  0),
        MSG_MAP(PrepareMp1ForUnload,            PPSMC_MSG_PrepareMp1ForUnload,         0),
+       MSG_MAP(DFCstateControl,                PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
 };
 
 static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
@@ -1753,6 +1754,15 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu,
        return ret;
 }
 
+static int smu_v13_0_0_set_df_cstate(struct smu_context *smu,
+                                    enum pp_df_cstate state)
+{
+       return smu_cmn_send_smc_msg_with_param(smu,
+                                              SMU_MSG_DFCstateControl,
+                                              state,
+                                              NULL);
+}
+
 static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
        .get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
        .set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
@@ -1822,6 +1832,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
        .mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported,
        .mode1_reset = smu_v13_0_mode1_reset,
        .set_mp1_state = smu_v13_0_0_set_mp1_state,
+       .set_df_cstate = smu_v13_0_0_set_df_cstate,
 };
 
 void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
index c422bf8..c4102cf 100644 (file)
@@ -121,6 +121,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
        MSG_MAP(Mode1Reset,             PPSMC_MSG_Mode1Reset,                  0),
        MSG_MAP(PrepareMp1ForUnload,            PPSMC_MSG_PrepareMp1ForUnload,         0),
        MSG_MAP(SetMGpuFanBoostLimitRpm,        PPSMC_MSG_SetMGpuFanBoostLimitRpm,     0),
+       MSG_MAP(DFCstateControl,                PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
 };
 
 static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
@@ -1587,6 +1588,16 @@ static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
 
        return true;
 }
+
+static int smu_v13_0_7_set_df_cstate(struct smu_context *smu,
+                                    enum pp_df_cstate state)
+{
+       return smu_cmn_send_smc_msg_with_param(smu,
+                                              SMU_MSG_DFCstateControl,
+                                              state,
+                                              NULL);
+}
+
 static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
        .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
        .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
@@ -1649,6 +1660,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
        .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported,
        .mode1_reset = smu_v13_0_mode1_reset,
        .set_mp1_state = smu_v13_0_7_set_mp1_state,
+       .set_df_cstate = smu_v13_0_7_set_df_cstate,
 };
 
 void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
index d7483c1..083337a 100644 (file)
@@ -105,6 +105,7 @@ struct ps8640 {
        struct gpio_desc *gpio_powerdown;
        struct device_link *link;
        bool pre_enabled;
+       bool need_post_hpd_delay;
 };
 
 static const struct regmap_config ps8640_regmap_config[] = {
@@ -173,14 +174,31 @@ static int _ps8640_wait_hpd_asserted(struct ps8640 *ps_bridge, unsigned long wai
 {
        struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL];
        int status;
+       int ret;
 
        /*
         * Apparently something about the firmware in the chip signals that
         * HPD goes high by reporting GPIO9 as high (even though HPD isn't
         * actually connected to GPIO9).
         */
-       return regmap_read_poll_timeout(map, PAGE2_GPIO_H, status,
-                                       status & PS_GPIO9, wait_us / 10, wait_us);
+       ret = regmap_read_poll_timeout(map, PAGE2_GPIO_H, status,
+                                      status & PS_GPIO9, wait_us / 10, wait_us);
+
+       /*
+        * The first time we see HPD go high after a reset we delay an extra
+        * 50 ms. The best guess is that the MCU is doing "stuff" during this
+        * time (maybe talking to the panel) and we don't want to interrupt it.
+        *
+        * No locking is done around "need_post_hpd_delay". If we're here we
+        * know we're holding a PM Runtime reference and the only other place
+        * that touches this is PM Runtime resume.
+        */
+       if (!ret && ps_bridge->need_post_hpd_delay) {
+               ps_bridge->need_post_hpd_delay = false;
+               msleep(50);
+       }
+
+       return ret;
 }
 
 static int ps8640_wait_hpd_asserted(struct drm_dp_aux *aux, unsigned long wait_us)
@@ -381,6 +399,9 @@ static int __maybe_unused ps8640_resume(struct device *dev)
        msleep(50);
        gpiod_set_value(ps_bridge->gpio_reset, 0);
 
+       /* We just reset things, so we need a delay after the first HPD */
+       ps_bridge->need_post_hpd_delay = true;
+
        /*
         * Mystery 200 ms delay for the "MCU to be ready". It's unclear if
         * this is truly necessary since the MCU will already signal that
index e3142c8..61c29ce 100644 (file)
@@ -435,7 +435,7 @@ int drmm_connector_init(struct drm_device *dev,
        if (drm_WARN_ON(dev, funcs && funcs->destroy))
                return -EINVAL;
 
-       ret = __drm_connector_init(dev, connector, funcs, connector_type, NULL);
+       ret = __drm_connector_init(dev, connector, funcs, connector_type, ddc);
        if (ret)
                return ret;
 
index c9be61d..47419d1 100644 (file)
@@ -3957,6 +3957,8 @@ intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp)
 
                drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base);
 
+               intel_dp->frl.is_trained = false;
+
                /* Restart FRL training or fall back to TMDS mode */
                intel_dp_check_frl_training(intel_dp);
        }
index 6d2003d..a821e3d 100644 (file)
@@ -2293,11 +2293,11 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
        }
 
        if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
-           IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
+           IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) {
                /*
                 * Wa_1607030317:tgl
                 * Wa_1607186500:tgl
-                * Wa_1607297627:tgl,rkl,dg1[a0]
+                * Wa_1607297627:tgl,rkl,dg1[a0],adlp
                 *
                 * On TGL and RKL there are multiple entries for this WA in the
                 * BSpec; some indicate this is an A0-only WA, others indicate
index 6ed5786..744cca5 100644 (file)
@@ -591,8 +591,15 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
                pm_runtime_use_autosuspend(kdev);
        }
 
-       /* Enable by default */
-       pm_runtime_allow(kdev);
+       /*
+        *  FIXME: Temp hammer to keep autosupend disable on lmem supported platforms.
+        *  As per PCIe specs 5.3.1.4.1, all iomem read write request over a PCIe
+        *  function will be unsupported in case PCIe endpoint function is in D3.
+        *  Let's keep i915 autosuspend control 'on' till we fix all known issue
+        *  with lmem access in D3.
+        */
+       if (!IS_DGFX(i915))
+               pm_runtime_allow(kdev);
 
        /*
         * The core calls the driver load handler with an RPM reference held.
index 4e0cbd6..3c9dfdb 100644 (file)
@@ -155,7 +155,7 @@ config DRM_MSM_HDMI
          Compile in support for the HDMI output MSM DRM driver. It can
          be a primary or a secondary display on device. Note that this is used
          only for the direct HDMI output. If the device outputs HDMI data
-         throught some kind of DSI-to-HDMI bridge, this option can be disabled.
+         through some kind of DSI-to-HDMI bridge, this option can be disabled.
 
 config DRM_MSM_HDMI_HDCP
        bool "Enable HDMI HDCP support in MSM DRM driver"
index 55f4433..a5c3d1e 100644 (file)
@@ -91,7 +91,7 @@ struct a6xx_state_memobj {
 static void *state_kcalloc(struct a6xx_gpu_state *a6xx_state, int nr, size_t objsize)
 {
        struct a6xx_state_memobj *obj =
-               kzalloc((nr * objsize) + sizeof(*obj), GFP_KERNEL);
+               kvzalloc((nr * objsize) + sizeof(*obj), GFP_KERNEL);
 
        if (!obj)
                return NULL;
@@ -813,6 +813,9 @@ static struct msm_gpu_state_bo *a6xx_snapshot_gmu_bo(
 {
        struct msm_gpu_state_bo *snapshot;
 
+       if (!bo->size)
+               return NULL;
+
        snapshot = state_kcalloc(a6xx_state, 1, sizeof(*snapshot));
        if (!snapshot)
                return NULL;
@@ -1040,8 +1043,13 @@ static void a6xx_gpu_state_destroy(struct kref *kref)
        if (a6xx_state->gmu_hfi)
                kvfree(a6xx_state->gmu_hfi->data);
 
-       list_for_each_entry_safe(obj, tmp, &a6xx_state->objs, node)
-               kfree(obj);
+       if (a6xx_state->gmu_debug)
+               kvfree(a6xx_state->gmu_debug->data);
+
+       list_for_each_entry_safe(obj, tmp, &a6xx_state->objs, node) {
+               list_del(&obj->node);
+               kvfree(obj);
+       }
 
        adreno_gpu_state_destroy(state);
        kfree(a6xx_state);
index 24b489b..6288064 100644 (file)
@@ -679,6 +679,9 @@ static int adreno_system_suspend(struct device *dev)
        struct msm_gpu *gpu = dev_to_gpu(dev);
        int remaining, ret;
 
+       if (!gpu)
+               return 0;
+
        suspend_scheduler(gpu);
 
        remaining = wait_event_timeout(gpu->retire_event,
@@ -700,7 +703,12 @@ out:
 
 static int adreno_system_resume(struct device *dev)
 {
-       resume_scheduler(dev_to_gpu(dev));
+       struct msm_gpu *gpu = dev_to_gpu(dev);
+
+       if (!gpu)
+               return 0;
+
+       resume_scheduler(gpu);
        return pm_runtime_force_resume(dev);
 }
 
index 382fb7f..5a0e849 100644 (file)
@@ -729,7 +729,12 @@ static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
        return buf;
 }
 
-/* len is expected to be in bytes */
+/* len is expected to be in bytes
+ *
+ * WARNING: *ptr should be allocated with kvmalloc or friends.  It can be free'd
+ * with kvfree() and replaced with a newly kvmalloc'd buffer on the first call
+ * when the unencoded raw data is encoded
+ */
 void adreno_show_object(struct drm_printer *p, void **ptr, int len,
                bool *encoded)
 {
index 7288041..7444b75 100644 (file)
@@ -56,8 +56,9 @@ static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
        return ret;
 }
 
-static int mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
-                                struct drm_display_mode *mode)
+static enum drm_mode_status
+mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
+                              struct drm_display_mode *mode)
 {
        struct mdp4_lvds_connector *mdp4_lvds_connector =
                        to_mdp4_lvds_connector(connector);
index 3854c9f..dd26ca6 100644 (file)
@@ -1243,8 +1243,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
 {
        int ret = 0;
        const u8 *dpcd = ctrl->panel->dpcd;
-       u8 encoding = DP_SET_ANSI_8B10B;
-       u8 ssc;
+       u8 encoding[] = { 0, DP_SET_ANSI_8B10B };
        u8 assr;
        struct dp_link_info link_info = {0};
 
@@ -1256,13 +1255,11 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
 
        dp_aux_link_configure(ctrl->aux, &link_info);
 
-       if (drm_dp_max_downspread(dpcd)) {
-               ssc = DP_SPREAD_AMP_0_5;
-               drm_dp_dpcd_write(ctrl->aux, DP_DOWNSPREAD_CTRL, &ssc, 1);
-       }
+       if (drm_dp_max_downspread(dpcd))
+               encoding[0] |= DP_SPREAD_AMP_0_5;
 
-       drm_dp_dpcd_write(ctrl->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
-                               &encoding, 1);
+       /* config DOWNSPREAD_CTRL and MAIN_LINK_CHANNEL_CODING_SET */
+       drm_dp_dpcd_write(ctrl->aux, DP_DOWNSPREAD_CTRL, encoding, 2);
 
        if (drm_dp_alternate_scrambler_reset_cap(dpcd)) {
                assr = DP_ALTERNATE_SCRAMBLER_RESET_ENABLE;
index bfd0aef..a49f6db 100644 (file)
@@ -1249,7 +1249,7 @@ int dp_display_request_irq(struct msm_dp *dp_display)
                return -EINVAL;
        }
 
-       rc = devm_request_irq(&dp->pdev->dev, dp->irq,
+       rc = devm_request_irq(dp_display->drm_dev->dev, dp->irq,
                        dp_display_irq_handler,
                        IRQF_TRIGGER_HIGH, "dp_display_isr", dp);
        if (rc < 0) {
@@ -1528,6 +1528,11 @@ void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor)
        }
 }
 
+static void of_dp_aux_depopulate_bus_void(void *data)
+{
+       of_dp_aux_depopulate_bus(data);
+}
+
 static int dp_display_get_next_bridge(struct msm_dp *dp)
 {
        int rc;
@@ -1552,10 +1557,16 @@ static int dp_display_get_next_bridge(struct msm_dp *dp)
                 * panel driver is probed asynchronously but is the best we
                 * can do without a bigger driver reorganization.
                 */
-               rc = devm_of_dp_aux_populate_ep_devices(dp_priv->aux);
+               rc = of_dp_aux_populate_bus(dp_priv->aux, NULL);
                of_node_put(aux_bus);
                if (rc)
                        goto error;
+
+               rc = devm_add_action_or_reset(dp->drm_dev->dev,
+                                               of_dp_aux_depopulate_bus_void,
+                                               dp_priv->aux);
+               if (rc)
+                       goto error;
        } else if (dp->is_edp) {
                DRM_ERROR("eDP aux_bus not found\n");
                return -ENODEV;
@@ -1568,7 +1579,7 @@ static int dp_display_get_next_bridge(struct msm_dp *dp)
         * For DisplayPort interfaces external bridges are optional, so
         * silently ignore an error if one is not present (-ENODEV).
         */
-       rc = dp_parser_find_next_bridge(dp_priv->parser);
+       rc = devm_dp_parser_find_next_bridge(dp->drm_dev->dev, dp_priv->parser);
        if (!dp->is_edp && rc == -ENODEV)
                return 0;
 
@@ -1597,6 +1608,12 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
                return -EINVAL;
 
        priv = dev->dev_private;
+
+       if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) {
+               DRM_DEV_ERROR(dev->dev, "too many bridges\n");
+               return -ENOSPC;
+       }
+
        dp_display->drm_dev = dev;
 
        dp_priv = container_of(dp_display, struct dp_display_private, dp_display);
index 6df25f7..6db82f9 100644 (file)
@@ -31,6 +31,36 @@ static enum drm_connector_status dp_bridge_detect(struct drm_bridge *bridge)
                                        connector_status_disconnected;
 }
 
+static int dp_bridge_atomic_check(struct drm_bridge *bridge,
+                           struct drm_bridge_state *bridge_state,
+                           struct drm_crtc_state *crtc_state,
+                           struct drm_connector_state *conn_state)
+{
+       struct msm_dp *dp;
+
+       dp = to_dp_bridge(bridge)->dp_display;
+
+       drm_dbg_dp(dp->drm_dev, "is_connected = %s\n",
+               (dp->is_connected) ? "true" : "false");
+
+       /*
+        * There is no protection in the DRM framework to check if the display
+        * pipeline has been already disabled before trying to disable it again.
+        * Hence if the sink is unplugged, the pipeline gets disabled, but the
+        * crtc->active is still true. Any attempt to set the mode or manually
+        * disable this encoder will result in the crash.
+        *
+        * TODO: add support for telling the DRM subsystem that the pipeline is
+        * disabled by the hardware and thus all access to it should be forbidden.
+        * After that this piece of code can be removed.
+        */
+       if (bridge->ops & DRM_BRIDGE_OP_HPD)
+               return (dp->is_connected) ? 0 : -ENOTCONN;
+
+       return 0;
+}
+
+
 /**
  * dp_bridge_get_modes - callback to add drm modes via drm_mode_probed_add()
  * @bridge: Poiner to drm bridge
@@ -61,6 +91,9 @@ static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *
 }
 
 static const struct drm_bridge_funcs dp_bridge_ops = {
+       .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+       .atomic_destroy_state   = drm_atomic_helper_bridge_destroy_state,
+       .atomic_reset           = drm_atomic_helper_bridge_reset,
        .enable       = dp_bridge_enable,
        .disable      = dp_bridge_disable,
        .post_disable = dp_bridge_post_disable,
@@ -68,6 +101,7 @@ static const struct drm_bridge_funcs dp_bridge_ops = {
        .mode_valid   = dp_bridge_mode_valid,
        .get_modes    = dp_bridge_get_modes,
        .detect       = dp_bridge_detect,
+       .atomic_check = dp_bridge_atomic_check,
 };
 
 struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
index dd73221..dcbe893 100644 (file)
@@ -240,12 +240,12 @@ static int dp_parser_clock(struct dp_parser *parser)
        return 0;
 }
 
-int dp_parser_find_next_bridge(struct dp_parser *parser)
+int devm_dp_parser_find_next_bridge(struct device *dev, struct dp_parser *parser)
 {
-       struct device *dev = &parser->pdev->dev;
+       struct platform_device *pdev = parser->pdev;
        struct drm_bridge *bridge;
 
-       bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
+       bridge = devm_drm_of_get_bridge(dev, pdev->dev.of_node, 1, 0);
        if (IS_ERR(bridge))
                return PTR_ERR(bridge);
 
index 866c1a8..d30ab77 100644 (file)
@@ -138,8 +138,9 @@ struct dp_parser {
 struct dp_parser *dp_parser_get(struct platform_device *pdev);
 
 /**
- * dp_parser_find_next_bridge() - find an additional bridge to DP
+ * devm_dp_parser_find_next_bridge() - find an additional bridge to DP
  *
+ * @dev: device to tie bridge lifetime to
  * @parser: dp_parser data from client
  *
  * This function is used to find any additional bridge attached to
@@ -147,6 +148,6 @@ struct dp_parser *dp_parser_get(struct platform_device *pdev);
  *
  * Return: 0 if able to get the bridge, otherwise negative errno for failure.
  */
-int dp_parser_find_next_bridge(struct dp_parser *parser);
+int devm_dp_parser_find_next_bridge(struct device *dev, struct dp_parser *parser);
 
 #endif
index 39bbabb..8a95c74 100644 (file)
@@ -218,6 +218,12 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
                return -EINVAL;
 
        priv = dev->dev_private;
+
+       if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) {
+               DRM_DEV_ERROR(dev->dev, "too many bridges\n");
+               return -ENOSPC;
+       }
+
        msm_dsi->dev = dev;
 
        ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
index 93fe61b..f28fb21 100644 (file)
@@ -300,6 +300,11 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
        struct platform_device *pdev = hdmi->pdev;
        int ret;
 
+       if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) {
+               DRM_DEV_ERROR(dev->dev, "too many bridges\n");
+               return -ENOSPC;
+       }
+
        hdmi->dev = dev;
        hdmi->encoder = encoder;
 
@@ -339,7 +344,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
                goto fail;
        }
 
-       ret = devm_request_irq(&pdev->dev, hdmi->irq,
+       ret = devm_request_irq(dev->dev, hdmi->irq,
                        msm_hdmi_irq, IRQF_TRIGGER_HIGH,
                        "hdmi_isr", hdmi);
        if (ret < 0) {
index 28034c2..105b5b4 100644 (file)
@@ -247,6 +247,7 @@ static int msm_drm_uninit(struct device *dev)
 
        for (i = 0; i < priv->num_bridges; i++)
                drm_bridge_remove(priv->bridges[i]);
+       priv->num_bridges = 0;
 
        pm_runtime_get_sync(dev);
        msm_irq_uninstall(ddev);
index 5599d93..45a3e5c 100644 (file)
@@ -501,11 +501,11 @@ out:
  */
 static void submit_cleanup(struct msm_gem_submit *submit, bool error)
 {
-       unsigned cleanup_flags = BO_LOCKED | BO_OBJ_PINNED;
+       unsigned cleanup_flags = BO_LOCKED;
        unsigned i;
 
        if (error)
-               cleanup_flags |= BO_VMA_PINNED;
+               cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED;
 
        for (i = 0; i < submit->nr_bos; i++) {
                struct msm_gem_object *msm_obj = submit->bos[i].obj;
@@ -706,7 +706,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        struct msm_drm_private *priv = dev->dev_private;
        struct drm_msm_gem_submit *args = data;
        struct msm_file_private *ctx = file->driver_priv;
-       struct msm_gem_submit *submit = NULL;
+       struct msm_gem_submit *submit;
        struct msm_gpu *gpu = priv->gpu;
        struct msm_gpu_submitqueue *queue;
        struct msm_ringbuffer *ring;
@@ -946,8 +946,7 @@ out_unlock:
                put_unused_fd(out_fence_fd);
        mutex_unlock(&queue->lock);
 out_post_unlock:
-       if (submit)
-               msm_gem_submit_put(submit);
+       msm_gem_submit_put(submit);
        if (!IS_ERR_OR_NULL(post_deps)) {
                for (i = 0; i < args->nr_out_syncobjs; ++i) {
                        kfree(post_deps[i].chain);
index 0098ee8..021f4e2 100644 (file)
@@ -997,4 +997,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
        }
 
        msm_devfreq_cleanup(gpu);
+
+       platform_set_drvdata(gpu->pdev, NULL);
 }
index ff911e7..58a72e6 100644 (file)
@@ -280,6 +280,10 @@ struct msm_gpu {
 static inline struct msm_gpu *dev_to_gpu(struct device *dev)
 {
        struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
+
+       if (!adreno_smmu)
+               return NULL;
+
        return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
 }
 
index cad4c35..57a8e95 100644 (file)
@@ -25,7 +25,8 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
 
                msm_gem_lock(obj);
                msm_gem_unpin_vma_fenced(submit->bos[i].vma, fctx);
-               submit->bos[i].flags &= ~BO_VMA_PINNED;
+               msm_gem_unpin_locked(obj);
+               submit->bos[i].flags &= ~(BO_VMA_PINNED | BO_OBJ_PINNED);
                msm_gem_unlock(obj);
        }
 
index 5fe2091..20fe538 100644 (file)
@@ -176,6 +176,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
                .src            = &src,
                .dst            = &dst,
                .pgmap_owner    = drm->dev,
+               .fault_page     = vmf->page,
                .flags          = MIGRATE_VMA_SELECT_DEVICE_PRIVATE,
        };
 
index 89056a1..6bd0634 100644 (file)
@@ -63,13 +63,13 @@ static void panfrost_core_dump_header(struct panfrost_dump_iterator *iter,
 {
        struct panfrost_dump_object_header *hdr = iter->hdr;
 
-       hdr->magic = cpu_to_le32(PANFROSTDUMP_MAGIC);
-       hdr->type = cpu_to_le32(type);
-       hdr->file_offset = cpu_to_le32(iter->data - iter->start);
-       hdr->file_size = cpu_to_le32(data_end - iter->data);
+       hdr->magic = PANFROSTDUMP_MAGIC;
+       hdr->type = type;
+       hdr->file_offset = iter->data - iter->start;
+       hdr->file_size = data_end - iter->data;
 
        iter->hdr++;
-       iter->data += le32_to_cpu(hdr->file_size);
+       iter->data += hdr->file_size;
 }
 
 static void
@@ -93,8 +93,8 @@ panfrost_core_dump_registers(struct panfrost_dump_iterator *iter,
 
                reg = panfrost_dump_registers[i] + js_as_offset;
 
-               dumpreg->reg = cpu_to_le32(reg);
-               dumpreg->value = cpu_to_le32(gpu_read(pfdev, reg));
+               dumpreg->reg = reg;
+               dumpreg->value = gpu_read(pfdev, reg);
        }
 
        panfrost_core_dump_header(iter, PANFROSTDUMP_BUF_REG, dumpreg);
@@ -106,7 +106,7 @@ void panfrost_core_dump(struct panfrost_job *job)
        struct panfrost_dump_iterator iter;
        struct drm_gem_object *dbo;
        unsigned int n_obj, n_bomap_pages;
-       __le64 *bomap, *bomap_start;
+       u64 *bomap, *bomap_start;
        size_t file_size;
        u32 as_nr;
        int slot;
@@ -177,11 +177,11 @@ void panfrost_core_dump(struct panfrost_job *job)
         * For now, we write the job identifier in the register dump header,
         * so that we can decode the entire dump later with pandecode
         */
-       iter.hdr->reghdr.jc = cpu_to_le64(job->jc);
-       iter.hdr->reghdr.major = cpu_to_le32(PANFROSTDUMP_MAJOR);
-       iter.hdr->reghdr.minor = cpu_to_le32(PANFROSTDUMP_MINOR);
-       iter.hdr->reghdr.gpu_id = cpu_to_le32(pfdev->features.id);
-       iter.hdr->reghdr.nbos = cpu_to_le64(job->bo_count);
+       iter.hdr->reghdr.jc = job->jc;
+       iter.hdr->reghdr.major = PANFROSTDUMP_MAJOR;
+       iter.hdr->reghdr.minor = PANFROSTDUMP_MINOR;
+       iter.hdr->reghdr.gpu_id = pfdev->features.id;
+       iter.hdr->reghdr.nbos = job->bo_count;
 
        panfrost_core_dump_registers(&iter, pfdev, as_nr, slot);
 
@@ -218,27 +218,27 @@ void panfrost_core_dump(struct panfrost_job *job)
 
                WARN_ON(!mapping->active);
 
-               iter.hdr->bomap.data[0] = cpu_to_le32((bomap - bomap_start));
+               iter.hdr->bomap.data[0] = bomap - bomap_start;
 
                for_each_sgtable_page(bo->base.sgt, &page_iter, 0) {
                        struct page *page = sg_page_iter_page(&page_iter);
 
                        if (!IS_ERR(page)) {
-                               *bomap++ = cpu_to_le64(page_to_phys(page));
+                               *bomap++ = page_to_phys(page);
                        } else {
                                dev_err(pfdev->dev, "Panfrost Dump: wrong page\n");
-                               *bomap++ = ~cpu_to_le64(0);
+                               *bomap++ = 0;
                        }
                }
 
-               iter.hdr->bomap.iova = cpu_to_le64(mapping->mmnode.start << PAGE_SHIFT);
+               iter.hdr->bomap.iova = mapping->mmnode.start << PAGE_SHIFT;
 
                vaddr = map.vaddr;
                memcpy(iter.data, vaddr, bo->base.base.size);
 
                drm_gem_shmem_vunmap(&bo->base, &map);
 
-               iter.hdr->bomap.valid = cpu_to_le32(1);
+               iter.hdr->bomap.valid = 1;
 
 dump_header:   panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_BO, iter.data +
                                          bo->base.base.size);
index 6b25b2f..4b913db 100644 (file)
@@ -207,6 +207,7 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
        struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
                                                 finish_cb);
 
+       dma_fence_put(f);
        INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
        schedule_work(&job->work);
 }
@@ -234,8 +235,10 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
                struct drm_sched_fence *s_fence = job->s_fence;
 
                /* Wait for all dependencies to avoid data corruptions */
-               while ((f = drm_sched_job_dependency(job, entity)))
+               while ((f = drm_sched_job_dependency(job, entity))) {
                        dma_fence_wait(f, false);
+                       dma_fence_put(f);
+               }
 
                drm_sched_fence_scheduled(s_fence);
                dma_fence_set_error(&s_fence->finished, -ESRCH);
@@ -250,6 +253,7 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
                        continue;
                }
 
+               dma_fence_get(entity->last_scheduled);
                r = dma_fence_add_callback(entity->last_scheduled,
                                           &job->finish_cb,
                                           drm_sched_entity_kill_jobs_cb);
@@ -385,7 +389,8 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
        }
 
        s_fence = to_drm_sched_fence(fence);
-       if (s_fence && s_fence->sched == sched) {
+       if (s_fence && s_fence->sched == sched &&
+           !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
 
                /*
                 * Fence is from the same scheduler, only need to wait for
index 8d86c25..2191e57 100644 (file)
@@ -438,7 +438,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
        iosys_map_set_vaddr(&src, xrgb8888);
 
        drm_fb_xrgb8888_to_xrgb2101010(&dst, &result->dst_pitch, &src, &fb, &params->clip);
-       buf = le32buf_to_cpu(test, buf, TEST_BUF_SIZE);
+       buf = le32buf_to_cpu(test, buf, dst_size / sizeof(u32));
        KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
 }
 
index ffbbb45..2027063 100644 (file)
@@ -490,6 +490,7 @@ module_init(vc4_drm_register);
 module_exit(vc4_drm_unregister);
 
 MODULE_ALIAS("platform:vc4-drm");
+MODULE_SOFTDEP("pre: snd-soc-hdmi-codec");
 MODULE_DESCRIPTION("Broadcom VC4 DRM Driver");
 MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
 MODULE_LICENSE("GPL v2");
index 64f9fea..596e311 100644 (file)
@@ -3318,12 +3318,37 @@ static int vc4_hdmi_runtime_resume(struct device *dev)
        struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
        unsigned long __maybe_unused flags;
        u32 __maybe_unused value;
+       unsigned long rate;
        int ret;
 
+       /*
+        * The HSM clock is in the HDMI power domain, so we need to set
+        * its frequency while the power domain is active so that it
+        * keeps its rate.
+        */
+       ret = clk_set_min_rate(vc4_hdmi->hsm_clock, HSM_MIN_CLOCK_FREQ);
+       if (ret)
+               return ret;
+
        ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
        if (ret)
                return ret;
 
+       /*
+        * Whenever the RaspberryPi boots without an HDMI monitor
+        * plugged in, the firmware won't have initialized the HSM clock
+        * rate and it will be reported as 0.
+        *
+        * If we try to access a register of the controller in such a
+        * case, it will lead to a silent CPU stall. Let's make sure we
+        * prevent such a case.
+        */
+       rate = clk_get_rate(vc4_hdmi->hsm_clock);
+       if (!rate) {
+               ret = -EINVAL;
+               goto err_disable_clk;
+       }
+
        if (vc4_hdmi->variant->reset)
                vc4_hdmi->variant->reset(vc4_hdmi);
 
@@ -3345,6 +3370,10 @@ static int vc4_hdmi_runtime_resume(struct device *dev)
 #endif
 
        return 0;
+
+err_disable_clk:
+       clk_disable_unprepare(vc4_hdmi->hsm_clock);
+       return ret;
 }
 
 static void vc4_hdmi_put_ddc_device(void *ptr)
index da86565..dad953f 100644 (file)
 #define USB_DEVICE_ID_MADCATZ_BEATPAD  0x4540
 #define USB_DEVICE_ID_MADCATZ_RAT5     0x1705
 #define USB_DEVICE_ID_MADCATZ_RAT9     0x1709
+#define USB_DEVICE_ID_MADCATZ_MMO7  0x1713
 
 #define USB_VENDOR_ID_MCC              0x09db
 #define USB_DEVICE_ID_MCC_PMD1024LS    0x0076
 #define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2    0x09cc
 #define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE       0x0ba0
 #define USB_DEVICE_ID_SONY_PS5_CONTROLLER      0x0ce6
+#define USB_DEVICE_ID_SONY_PS5_CONTROLLER_2    0x0df2
 #define USB_DEVICE_ID_SONY_MOTION_CONTROLLER   0x03d5
 #define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER       0x042f
 #define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER             0x0002
index 9dabd63..44763c0 100644 (file)
@@ -985,7 +985,7 @@ static int lenovo_led_brightness_set(struct led_classdev *led_cdev,
        struct device *dev = led_cdev->dev->parent;
        struct hid_device *hdev = to_hid_device(dev);
        struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
-       u8 tp10ubkbd_led[] = { TP10UBKBD_MUTE_LED, TP10UBKBD_MICMUTE_LED };
+       static const u8 tp10ubkbd_led[] = { TP10UBKBD_MUTE_LED, TP10UBKBD_MICMUTE_LED };
        int led_nr = 0;
        int ret = 0;
 
index 664a624..c9c968d 100644 (file)
@@ -480,7 +480,7 @@ static int magicmouse_raw_event(struct hid_device *hdev,
                magicmouse_raw_event(hdev, report, data + 2, data[1]);
                magicmouse_raw_event(hdev, report, data + 2 + data[1],
                        size - 2 - data[1]);
-               break;
+               return 0;
        default:
                return 0;
        }
index 40050eb..0b58763 100644 (file)
@@ -46,6 +46,7 @@ struct ps_device {
        uint32_t fw_version;
 
        int (*parse_report)(struct ps_device *dev, struct hid_report *report, u8 *data, int size);
+       void (*remove)(struct ps_device *dev);
 };
 
 /* Calibration data for playstation motion sensors. */
@@ -107,6 +108,9 @@ struct ps_led_info {
 #define DS_STATUS_CHARGING             GENMASK(7, 4)
 #define DS_STATUS_CHARGING_SHIFT       4
 
+/* Feature version from DualSense Firmware Info report. */
+#define DS_FEATURE_VERSION(major, minor) ((major & 0xff) << 8 | (minor & 0xff))
+
 /*
  * Status of a DualSense touch point contact.
  * Contact IDs, with highest bit set are 'inactive'
@@ -125,6 +129,7 @@ struct ps_led_info {
 #define DS_OUTPUT_VALID_FLAG1_RELEASE_LEDS BIT(3)
 #define DS_OUTPUT_VALID_FLAG1_PLAYER_INDICATOR_CONTROL_ENABLE BIT(4)
 #define DS_OUTPUT_VALID_FLAG2_LIGHTBAR_SETUP_CONTROL_ENABLE BIT(1)
+#define DS_OUTPUT_VALID_FLAG2_COMPATIBLE_VIBRATION2 BIT(2)
 #define DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE BIT(4)
 #define DS_OUTPUT_LIGHTBAR_SETUP_LIGHT_OUT BIT(1)
 
@@ -142,6 +147,9 @@ struct dualsense {
        struct input_dev *sensors;
        struct input_dev *touchpad;
 
+       /* Update version is used as a feature/capability version. */
+       uint16_t update_version;
+
        /* Calibration data for accelerometer and gyroscope. */
        struct ps_calibration_data accel_calib_data[3];
        struct ps_calibration_data gyro_calib_data[3];
@@ -152,6 +160,7 @@ struct dualsense {
        uint32_t sensor_timestamp_us;
 
        /* Compatible rumble state */
+       bool use_vibration_v2;
        bool update_rumble;
        uint8_t motor_left;
        uint8_t motor_right;
@@ -174,6 +183,7 @@ struct dualsense {
        struct led_classdev player_leds[5];
 
        struct work_struct output_worker;
+       bool output_worker_initialized;
        void *output_report_dmabuf;
        uint8_t output_seq; /* Sequence number for output report. */
 };
@@ -299,6 +309,7 @@ static const struct {int x; int y; } ps_gamepad_hat_mapping[] = {
        {0, 0},
 };
 
+static inline void dualsense_schedule_work(struct dualsense *ds);
 static void dualsense_set_lightbar(struct dualsense *ds, uint8_t red, uint8_t green, uint8_t blue);
 
 /*
@@ -789,6 +800,7 @@ err_free:
        return ret;
 }
 
+
 static int dualsense_get_firmware_info(struct dualsense *ds)
 {
        uint8_t *buf;
@@ -808,6 +820,15 @@ static int dualsense_get_firmware_info(struct dualsense *ds)
        ds->base.hw_version = get_unaligned_le32(&buf[24]);
        ds->base.fw_version = get_unaligned_le32(&buf[28]);
 
+       /* Update version is some kind of feature version. It is distinct from
+        * the firmware version as there can be many different variations of a
+        * controller over time with the same physical shell, but with different
+        * PCBs and other internal changes. The update version (internal name) is
+        * used as a means to detect what features are available and change behavior.
+        * Note: the version is different between DualSense and DualSense Edge.
+        */
+       ds->update_version = get_unaligned_le16(&buf[44]);
+
 err_free:
        kfree(buf);
        return ret;
@@ -878,7 +899,7 @@ static int dualsense_player_led_set_brightness(struct led_classdev *led, enum le
        ds->update_player_leds = true;
        spin_unlock_irqrestore(&ds->base.lock, flags);
 
-       schedule_work(&ds->output_worker);
+       dualsense_schedule_work(ds);
 
        return 0;
 }
@@ -922,6 +943,16 @@ static void dualsense_init_output_report(struct dualsense *ds, struct dualsense_
        }
 }
 
+static inline void dualsense_schedule_work(struct dualsense *ds)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ds->base.lock, flags);
+       if (ds->output_worker_initialized)
+               schedule_work(&ds->output_worker);
+       spin_unlock_irqrestore(&ds->base.lock, flags);
+}
+
 /*
  * Helper function to send DualSense output reports. Applies a CRC at the end of a report
  * for Bluetooth reports.
@@ -960,7 +991,10 @@ static void dualsense_output_worker(struct work_struct *work)
        if (ds->update_rumble) {
                /* Select classic rumble style haptics and enable it. */
                common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_HAPTICS_SELECT;
-               common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION;
+               if (ds->use_vibration_v2)
+                       common->valid_flag2 |= DS_OUTPUT_VALID_FLAG2_COMPATIBLE_VIBRATION2;
+               else
+                       common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION;
                common->motor_left = ds->motor_left;
                common->motor_right = ds->motor_right;
                ds->update_rumble = false;
@@ -1082,7 +1116,7 @@ static int dualsense_parse_report(struct ps_device *ps_dev, struct hid_report *r
                spin_unlock_irqrestore(&ps_dev->lock, flags);
 
                /* Schedule updating of microphone state at hardware level. */
-               schedule_work(&ds->output_worker);
+               dualsense_schedule_work(ds);
        }
        ds->last_btn_mic_state = btn_mic_state;
 
@@ -1197,10 +1231,22 @@ static int dualsense_play_effect(struct input_dev *dev, void *data, struct ff_ef
        ds->motor_right = effect->u.rumble.weak_magnitude / 256;
        spin_unlock_irqrestore(&ds->base.lock, flags);
 
-       schedule_work(&ds->output_worker);
+       dualsense_schedule_work(ds);
        return 0;
 }
 
+static void dualsense_remove(struct ps_device *ps_dev)
+{
+       struct dualsense *ds = container_of(ps_dev, struct dualsense, base);
+       unsigned long flags;
+
+       spin_lock_irqsave(&ds->base.lock, flags);
+       ds->output_worker_initialized = false;
+       spin_unlock_irqrestore(&ds->base.lock, flags);
+
+       cancel_work_sync(&ds->output_worker);
+}
+
 static int dualsense_reset_leds(struct dualsense *ds)
 {
        struct dualsense_output_report report;
@@ -1237,7 +1283,7 @@ static void dualsense_set_lightbar(struct dualsense *ds, uint8_t red, uint8_t gr
        ds->lightbar_blue = blue;
        spin_unlock_irqrestore(&ds->base.lock, flags);
 
-       schedule_work(&ds->output_worker);
+       dualsense_schedule_work(ds);
 }
 
 static void dualsense_set_player_leds(struct dualsense *ds)
@@ -1260,7 +1306,7 @@ static void dualsense_set_player_leds(struct dualsense *ds)
 
        ds->update_player_leds = true;
        ds->player_leds_state = player_ids[player_id];
-       schedule_work(&ds->output_worker);
+       dualsense_schedule_work(ds);
 }
 
 static struct ps_device *dualsense_create(struct hid_device *hdev)
@@ -1299,7 +1345,9 @@ static struct ps_device *dualsense_create(struct hid_device *hdev)
        ps_dev->battery_capacity = 100; /* initial value until parse_report. */
        ps_dev->battery_status = POWER_SUPPLY_STATUS_UNKNOWN;
        ps_dev->parse_report = dualsense_parse_report;
+       ps_dev->remove = dualsense_remove;
        INIT_WORK(&ds->output_worker, dualsense_output_worker);
+       ds->output_worker_initialized = true;
        hid_set_drvdata(hdev, ds);
 
        max_output_report_size = sizeof(struct dualsense_output_report_bt);
@@ -1320,6 +1368,21 @@ static struct ps_device *dualsense_create(struct hid_device *hdev)
                return ERR_PTR(ret);
        }
 
+       /* Original DualSense firmware simulated classic controller rumble through
+        * its new haptics hardware. It felt different from classic rumble users
+        * were used to. Since then new firmwares were introduced to change behavior
+        * and make this new 'v2' behavior default on PlayStation and other platforms.
+        * The original DualSense requires a new enough firmware as bundled with PS5
+        * software released in 2021. DualSense edge supports it out of the box.
+        * Both devices also support the old mode, but it is not really used.
+        */
+       if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER) {
+               /* Feature version 2.21 introduced new vibration method. */
+               ds->use_vibration_v2 = ds->update_version >= DS_FEATURE_VERSION(2, 21);
+       } else if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) {
+               ds->use_vibration_v2 = true;
+       }
+
        ret = ps_devices_list_add(ps_dev);
        if (ret)
                return ERR_PTR(ret);
@@ -1436,7 +1499,8 @@ static int ps_probe(struct hid_device *hdev, const struct hid_device_id *id)
                goto err_stop;
        }
 
-       if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER) {
+       if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER ||
+               hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) {
                dev = dualsense_create(hdev);
                if (IS_ERR(dev)) {
                        hid_err(hdev, "Failed to create dualsense.\n");
@@ -1461,6 +1525,9 @@ static void ps_remove(struct hid_device *hdev)
        ps_devices_list_remove(dev);
        ps_device_release_player_id(dev);
 
+       if (dev->remove)
+               dev->remove(dev);
+
        hid_hw_close(hdev);
        hid_hw_stop(hdev);
 }
@@ -1468,6 +1535,8 @@ static void ps_remove(struct hid_device *hdev)
 static const struct hid_device_id ps_devices[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER) },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, ps_devices);
index 70f602c..50e1c71 100644 (file)
@@ -620,6 +620,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_MMO7) },
 #endif
 #if IS_ENABLED(CONFIG_HID_SAMSUNG)
        { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
index c7bf14c..b84e975 100644 (file)
@@ -187,6 +187,8 @@ static const struct hid_device_id saitek_devices[] = {
                .driver_data = SAITEK_RELEASE_MODE_RAT7 },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7),
                .driver_data = SAITEK_RELEASE_MODE_MMO7 },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_MMO7),
+               .driver_data = SAITEK_RELEASE_MODE_MMO7 },
        { }
 };
 
index ccf0af5..8bf32c6 100644 (file)
@@ -46,9 +46,6 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
 #define TOTAL_ATTRS            (MAX_CORE_ATTRS + 1)
 #define MAX_CORE_DATA          (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
 
-#define TO_CORE_ID(cpu)                (cpu_data(cpu).cpu_core_id)
-#define TO_ATTR_NO(cpu)                (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
-
 #ifdef CONFIG_SMP
 #define for_each_sibling(i, cpu) \
        for_each_cpu(i, topology_sibling_cpumask(cpu))
@@ -91,6 +88,8 @@ struct temp_data {
 struct platform_data {
        struct device           *hwmon_dev;
        u16                     pkg_id;
+       u16                     cpu_map[NUM_REAL_CORES];
+       struct ida              ida;
        struct cpumask          cpumask;
        struct temp_data        *core_data[MAX_CORE_DATA];
        struct device_attribute name_attr;
@@ -441,7 +440,7 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
                                                        MSR_IA32_THERM_STATUS;
        tdata->is_pkg_data = pkg_flag;
        tdata->cpu = cpu;
-       tdata->cpu_core_id = TO_CORE_ID(cpu);
+       tdata->cpu_core_id = topology_core_id(cpu);
        tdata->attr_size = MAX_CORE_ATTRS;
        mutex_init(&tdata->update_lock);
        return tdata;
@@ -454,7 +453,7 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
        struct platform_data *pdata = platform_get_drvdata(pdev);
        struct cpuinfo_x86 *c = &cpu_data(cpu);
        u32 eax, edx;
-       int err, attr_no;
+       int err, index, attr_no;
 
        /*
         * Find attr number for sysfs:
@@ -462,14 +461,26 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
         * The attr number is always core id + 2
         * The Pkgtemp will always show up as temp1_*, if available
         */
-       attr_no = pkg_flag ? PKG_SYSFS_ATTR_NO : TO_ATTR_NO(cpu);
+       if (pkg_flag) {
+               attr_no = PKG_SYSFS_ATTR_NO;
+       } else {
+               index = ida_alloc(&pdata->ida, GFP_KERNEL);
+               if (index < 0)
+                       return index;
+               pdata->cpu_map[index] = topology_core_id(cpu);
+               attr_no = index + BASE_SYSFS_ATTR_NO;
+       }
 
-       if (attr_no > MAX_CORE_DATA - 1)
-               return -ERANGE;
+       if (attr_no > MAX_CORE_DATA - 1) {
+               err = -ERANGE;
+               goto ida_free;
+       }
 
        tdata = init_temp_data(cpu, pkg_flag);
-       if (!tdata)
-               return -ENOMEM;
+       if (!tdata) {
+               err = -ENOMEM;
+               goto ida_free;
+       }
 
        /* Test if we can access the status register */
        err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx);
@@ -505,6 +516,9 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
 exit_free:
        pdata->core_data[attr_no] = NULL;
        kfree(tdata);
+ida_free:
+       if (!pkg_flag)
+               ida_free(&pdata->ida, index);
        return err;
 }
 
@@ -524,6 +538,9 @@ static void coretemp_remove_core(struct platform_data *pdata, int indx)
 
        kfree(pdata->core_data[indx]);
        pdata->core_data[indx] = NULL;
+
+       if (indx >= BASE_SYSFS_ATTR_NO)
+               ida_free(&pdata->ida, indx - BASE_SYSFS_ATTR_NO);
 }
 
 static int coretemp_probe(struct platform_device *pdev)
@@ -537,6 +554,7 @@ static int coretemp_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        pdata->pkg_id = pdev->id;
+       ida_init(&pdata->ida);
        platform_set_drvdata(pdev, pdata);
 
        pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME,
@@ -553,6 +571,7 @@ static int coretemp_remove(struct platform_device *pdev)
                if (pdata->core_data[i])
                        coretemp_remove_core(pdata, i);
 
+       ida_destroy(&pdata->ida);
        return 0;
 }
 
@@ -647,7 +666,7 @@ static int coretemp_cpu_offline(unsigned int cpu)
        struct platform_device *pdev = coretemp_get_pdev(cpu);
        struct platform_data *pd;
        struct temp_data *tdata;
-       int indx, target;
+       int i, indx = -1, target;
 
        /*
         * Don't execute this on suspend as the device remove locks
@@ -660,12 +679,19 @@ static int coretemp_cpu_offline(unsigned int cpu)
        if (!pdev)
                return 0;
 
-       /* The core id is too big, just return */
-       indx = TO_ATTR_NO(cpu);
-       if (indx > MAX_CORE_DATA - 1)
+       pd = platform_get_drvdata(pdev);
+
+       for (i = 0; i < NUM_REAL_CORES; i++) {
+               if (pd->cpu_map[i] == topology_core_id(cpu)) {
+                       indx = i + BASE_SYSFS_ATTR_NO;
+                       break;
+               }
+       }
+
+       /* Too many cores and this core is not populated, just return */
+       if (indx < 0)
                return 0;
 
-       pd = platform_get_drvdata(pdev);
        tdata = pd->core_data[indx];
 
        cpumask_clear_cpu(cpu, &pd->cpumask);
index 345d883..2210aa6 100644 (file)
@@ -820,7 +820,8 @@ static const struct hid_device_id corsairpsu_idtable[] = {
        { HID_USB_DEVICE(0x1b1c, 0x1c0b) }, /* Corsair RM750i */
        { HID_USB_DEVICE(0x1b1c, 0x1c0c) }, /* Corsair RM850i */
        { HID_USB_DEVICE(0x1b1c, 0x1c0d) }, /* Corsair RM1000i */
-       { HID_USB_DEVICE(0x1b1c, 0x1c1e) }, /* Corsaur HX1000i revision 2 */
+       { HID_USB_DEVICE(0x1b1c, 0x1c1e) }, /* Corsair HX1000i revision 2 */
+       { HID_USB_DEVICE(0x1b1c, 0x1c1f) }, /* Corsair HX1500i */
        { },
 };
 MODULE_DEVICE_TABLE(hid, corsairpsu_idtable);
index dc3d9a2..83a347c 100644 (file)
@@ -257,7 +257,10 @@ static int pwm_fan_update_enable(struct pwm_fan_ctx *ctx, long val)
 
        if (val == 0) {
                /* Disable pwm-fan unconditionally */
-               ret = __set_pwm(ctx, 0);
+               if (ctx->enabled)
+                       ret = __set_pwm(ctx, 0);
+               else
+                       ret = pwm_fan_switch_power(ctx, false);
                if (ret)
                        ctx->enable_mode = old_val;
                pwm_fan_update_state(ctx, 0);
index d5dbc67..f306817 100644 (file)
@@ -1687,14 +1687,15 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
                ret = coresight_fixup_device_conns(csdev);
        if (!ret)
                ret = coresight_fixup_orphan_conns(csdev);
-       if (!ret && cti_assoc_ops && cti_assoc_ops->add)
-               cti_assoc_ops->add(csdev);
 
 out_unlock:
        mutex_unlock(&coresight_mutex);
        /* Success */
-       if (!ret)
+       if (!ret) {
+               if (cti_assoc_ops && cti_assoc_ops->add)
+                       cti_assoc_ops->add(csdev);
                return csdev;
+       }
 
        /* Unregister the device if needed */
        if (registered) {
index 8988b2e..c6e8c65 100644 (file)
@@ -90,11 +90,9 @@ void cti_write_all_hw_regs(struct cti_drvdata *drvdata)
 static int cti_enable_hw(struct cti_drvdata *drvdata)
 {
        struct cti_config *config = &drvdata->config;
-       struct device *dev = &drvdata->csdev->dev;
        unsigned long flags;
        int rc = 0;
 
-       pm_runtime_get_sync(dev->parent);
        spin_lock_irqsave(&drvdata->spinlock, flags);
 
        /* no need to do anything if enabled or unpowered*/
@@ -119,7 +117,6 @@ cti_state_unchanged:
        /* cannot enable due to error */
 cti_err_not_enabled:
        spin_unlock_irqrestore(&drvdata->spinlock, flags);
-       pm_runtime_put(dev->parent);
        return rc;
 }
 
@@ -153,7 +150,6 @@ cti_hp_not_enabled:
 static int cti_disable_hw(struct cti_drvdata *drvdata)
 {
        struct cti_config *config = &drvdata->config;
-       struct device *dev = &drvdata->csdev->dev;
        struct coresight_device *csdev = drvdata->csdev;
 
        spin_lock(&drvdata->spinlock);
@@ -175,7 +171,6 @@ static int cti_disable_hw(struct cti_drvdata *drvdata)
        coresight_disclaim_device_unlocked(csdev);
        CS_LOCK(drvdata->base);
        spin_unlock(&drvdata->spinlock);
-       pm_runtime_put(dev->parent);
        return 0;
 
        /* not disabled this call */
@@ -541,7 +536,7 @@ cti_match_fixup_csdev(struct cti_device *ctidev, const char *node_name,
 /*
  * Search the cti list to add an associated CTI into the supplied CS device
  * This will set the association if CTI declared before the CS device.
- * (called from coresight_register() with coresight_mutex locked).
+ * (called from coresight_register() without coresight_mutex locked).
  */
 static void cti_add_assoc_to_csdev(struct coresight_device *csdev)
 {
@@ -569,7 +564,8 @@ static void cti_add_assoc_to_csdev(struct coresight_device *csdev)
                         * if we found a matching csdev then update the ECT
                         * association pointer for the device with this CTI.
                         */
-                       csdev->ect_dev = ect_item->csdev;
+                       coresight_set_assoc_ectdev_mutex(csdev->ect_dev,
+                                                        ect_item->csdev);
                        break;
                }
        }
index 264e780..e50f960 100644 (file)
@@ -764,6 +764,7 @@ config I2C_LPC2K
 config I2C_MLXBF
         tristate "Mellanox BlueField I2C controller"
         depends on MELLANOX_PLATFORM && ARM64
+       depends on ACPI
        select I2C_SLAVE
         help
           Enabling this option will add I2C SMBus support for Mellanox BlueField
index e68e775..1810d57 100644 (file)
@@ -2247,7 +2247,6 @@ static struct i2c_adapter_quirks mlxbf_i2c_quirks = {
        .max_write_len = MLXBF_I2C_MASTER_DATA_W_LENGTH,
 };
 
-#ifdef CONFIG_ACPI
 static const struct acpi_device_id mlxbf_i2c_acpi_ids[] = {
        { "MLNXBF03", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_1] },
        { "MLNXBF23", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_2] },
@@ -2282,12 +2281,6 @@ static int mlxbf_i2c_acpi_probe(struct device *dev, struct mlxbf_i2c_priv *priv)
 
        return 0;
 }
-#else
-static int mlxbf_i2c_acpi_probe(struct device *dev, struct mlxbf_i2c_priv *priv)
-{
-       return -ENOENT;
-}
-#endif /* CONFIG_ACPI */
 
 static int mlxbf_i2c_probe(struct platform_device *pdev)
 {
@@ -2490,9 +2483,7 @@ static struct platform_driver mlxbf_i2c_driver = {
        .remove = mlxbf_i2c_remove,
        .driver = {
                .name = "i2c-mlxbf",
-#ifdef CONFIG_ACPI
                .acpi_match_table = ACPI_PTR(mlxbf_i2c_acpi_ids),
-#endif /* CONFIG_ACPI  */
        },
 };
 
index 72fcfb1..081f51e 100644 (file)
@@ -40,7 +40,7 @@
 #define MLXCPLD_LPCI2C_STATUS_REG      0x9
 #define MLXCPLD_LPCI2C_DATA_REG                0xa
 
-/* LPC I2C masks and parametres */
+/* LPC I2C masks and parameters */
 #define MLXCPLD_LPCI2C_RST_SEL_MASK    0x1
 #define MLXCPLD_LPCI2C_TRANS_END       0x1
 #define MLXCPLD_LPCI2C_STATUS_NACK     0x10
index 87739fb..a4b97fe 100644 (file)
@@ -639,6 +639,11 @@ static int cci_probe(struct platform_device *pdev)
        if (ret < 0)
                goto error;
 
+       pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
+       pm_runtime_use_autosuspend(dev);
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+
        for (i = 0; i < cci->data->num_masters; i++) {
                if (!cci->master[i].cci)
                        continue;
@@ -650,14 +655,12 @@ static int cci_probe(struct platform_device *pdev)
                }
        }
 
-       pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
-       pm_runtime_use_autosuspend(dev);
-       pm_runtime_set_active(dev);
-       pm_runtime_enable(dev);
-
        return 0;
 
 error_i2c:
+       pm_runtime_disable(dev);
+       pm_runtime_dont_use_autosuspend(dev);
+
        for (--i ; i >= 0; i--) {
                if (cci->master[i].cci) {
                        i2c_del_adapter(&cci->master[i].adap);
index cfb8e04..87d5625 100644 (file)
@@ -97,7 +97,7 @@ MODULE_PARM_DESC(high_clock,
 module_param(force, bool, 0);
 MODULE_PARM_DESC(force, "Forcibly enable the SIS630. DANGEROUS!");
 
-/* SMBus base adress */
+/* SMBus base address */
 static unsigned short smbus_base;
 
 /* supported chips */
index b3fe6b2..277a024 100644 (file)
@@ -920,6 +920,7 @@ static struct platform_driver xiic_i2c_driver = {
 
 module_platform_driver(xiic_i2c_driver);
 
+MODULE_ALIAS("platform:" DRIVER_NAME);
 MODULE_AUTHOR("info@mocean-labs.com");
 MODULE_DESCRIPTION("Xilinx I2C bus driver");
 MODULE_LICENSE("GPL v2");
index 47feb37..7c7d780 100644 (file)
@@ -1185,17 +1185,30 @@ static ssize_t adxl367_get_fifo_watermark(struct device *dev,
        return sysfs_emit(buf, "%d\n", fifo_watermark);
 }
 
-static IIO_CONST_ATTR(hwfifo_watermark_min, "1");
-static IIO_CONST_ATTR(hwfifo_watermark_max,
-                     __stringify(ADXL367_FIFO_MAX_WATERMARK));
+static ssize_t hwfifo_watermark_min_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       return sysfs_emit(buf, "%s\n", "1");
+}
+
+static ssize_t hwfifo_watermark_max_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       return sysfs_emit(buf, "%s\n", __stringify(ADXL367_FIFO_MAX_WATERMARK));
+}
+
+static IIO_DEVICE_ATTR_RO(hwfifo_watermark_min, 0);
+static IIO_DEVICE_ATTR_RO(hwfifo_watermark_max, 0);
 static IIO_DEVICE_ATTR(hwfifo_watermark, 0444,
                       adxl367_get_fifo_watermark, NULL, 0);
 static IIO_DEVICE_ATTR(hwfifo_enabled, 0444,
                       adxl367_get_fifo_enabled, NULL, 0);
 
 static const struct attribute *adxl367_fifo_attributes[] = {
-       &iio_const_attr_hwfifo_watermark_min.dev_attr.attr,
-       &iio_const_attr_hwfifo_watermark_max.dev_attr.attr,
+       &iio_dev_attr_hwfifo_watermark_min.dev_attr.attr,
+       &iio_dev_attr_hwfifo_watermark_max.dev_attr.attr,
        &iio_dev_attr_hwfifo_watermark.dev_attr.attr,
        &iio_dev_attr_hwfifo_enabled.dev_attr.attr,
        NULL,
index e3ecbae..bc53af8 100644 (file)
@@ -998,17 +998,30 @@ static ssize_t adxl372_get_fifo_watermark(struct device *dev,
        return sprintf(buf, "%d\n", st->watermark);
 }
 
-static IIO_CONST_ATTR(hwfifo_watermark_min, "1");
-static IIO_CONST_ATTR(hwfifo_watermark_max,
-                     __stringify(ADXL372_FIFO_SIZE));
+static ssize_t hwfifo_watermark_min_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       return sysfs_emit(buf, "%s\n", "1");
+}
+
+static ssize_t hwfifo_watermark_max_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       return sysfs_emit(buf, "%s\n", __stringify(ADXL372_FIFO_SIZE));
+}
+
+static IIO_DEVICE_ATTR_RO(hwfifo_watermark_min, 0);
+static IIO_DEVICE_ATTR_RO(hwfifo_watermark_max, 0);
 static IIO_DEVICE_ATTR(hwfifo_watermark, 0444,
                       adxl372_get_fifo_watermark, NULL, 0);
 static IIO_DEVICE_ATTR(hwfifo_enabled, 0444,
                       adxl372_get_fifo_enabled, NULL, 0);
 
 static const struct attribute *adxl372_fifo_attributes[] = {
-       &iio_const_attr_hwfifo_watermark_min.dev_attr.attr,
-       &iio_const_attr_hwfifo_watermark_max.dev_attr.attr,
+       &iio_dev_attr_hwfifo_watermark_min.dev_attr.attr,
+       &iio_dev_attr_hwfifo_watermark_max.dev_attr.attr,
        &iio_dev_attr_hwfifo_watermark.dev_attr.attr,
        &iio_dev_attr_hwfifo_enabled.dev_attr.attr,
        NULL,
index 57e8a83..92f8b13 100644 (file)
@@ -925,17 +925,30 @@ static const struct iio_chan_spec_ext_info bmc150_accel_ext_info[] = {
        { }
 };
 
-static IIO_CONST_ATTR(hwfifo_watermark_min, "1");
-static IIO_CONST_ATTR(hwfifo_watermark_max,
-                     __stringify(BMC150_ACCEL_FIFO_LENGTH));
+static ssize_t hwfifo_watermark_min_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       return sysfs_emit(buf, "%s\n", "1");
+}
+
+static ssize_t hwfifo_watermark_max_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       return sysfs_emit(buf, "%s\n", __stringify(BMC150_ACCEL_FIFO_LENGTH));
+}
+
+static IIO_DEVICE_ATTR_RO(hwfifo_watermark_min, 0);
+static IIO_DEVICE_ATTR_RO(hwfifo_watermark_max, 0);
 static IIO_DEVICE_ATTR(hwfifo_enabled, S_IRUGO,
                       bmc150_accel_get_fifo_state, NULL, 0);
 static IIO_DEVICE_ATTR(hwfifo_watermark, S_IRUGO,
                       bmc150_accel_get_fifo_watermark, NULL, 0);
 
 static const struct attribute *bmc150_accel_fifo_attributes[] = {
-       &iio_const_attr_hwfifo_watermark_min.dev_attr.attr,
-       &iio_const_attr_hwfifo_watermark_max.dev_attr.attr,
+       &iio_dev_attr_hwfifo_watermark_min.dev_attr.attr,
+       &iio_dev_attr_hwfifo_watermark_max.dev_attr.attr,
        &iio_dev_attr_hwfifo_watermark.dev_attr.attr,
        &iio_dev_attr_hwfifo_enabled.dev_attr.attr,
        NULL,
index 4294d65..33e2515 100644 (file)
@@ -2193,17 +2193,30 @@ static ssize_t at91_adc_get_watermark(struct device *dev,
        return scnprintf(buf, PAGE_SIZE, "%d\n", st->dma_st.watermark);
 }
 
+static ssize_t hwfifo_watermark_min_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       return sysfs_emit(buf, "%s\n", "2");
+}
+
+static ssize_t hwfifo_watermark_max_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       return sysfs_emit(buf, "%s\n", AT91_HWFIFO_MAX_SIZE_STR);
+}
+
 static IIO_DEVICE_ATTR(hwfifo_enabled, 0444,
                       at91_adc_get_fifo_state, NULL, 0);
 static IIO_DEVICE_ATTR(hwfifo_watermark, 0444,
                       at91_adc_get_watermark, NULL, 0);
-
-static IIO_CONST_ATTR(hwfifo_watermark_min, "2");
-static IIO_CONST_ATTR(hwfifo_watermark_max, AT91_HWFIFO_MAX_SIZE_STR);
+static IIO_DEVICE_ATTR_RO(hwfifo_watermark_min, 0);
+static IIO_DEVICE_ATTR_RO(hwfifo_watermark_max, 0);
 
 static const struct attribute *at91_adc_fifo_attributes[] = {
-       &iio_const_attr_hwfifo_watermark_min.dev_attr.attr,
-       &iio_const_attr_hwfifo_watermark_max.dev_attr.attr,
+       &iio_dev_attr_hwfifo_watermark_min.dev_attr.attr,
+       &iio_dev_attr_hwfifo_watermark_max.dev_attr.attr,
        &iio_dev_attr_hwfifo_watermark.dev_attr.attr,
        &iio_dev_attr_hwfifo_enabled.dev_attr.attr,
        NULL,
index b35fd2c..76b334f 100644 (file)
@@ -55,8 +55,9 @@
 /* Internal voltage reference in mV */
 #define MCP3911_INT_VREF_MV            1200
 
-#define MCP3911_REG_READ(reg, id)      ((((reg) << 1) | ((id) << 5) | (1 << 0)) & 0xff)
-#define MCP3911_REG_WRITE(reg, id)     ((((reg) << 1) | ((id) << 5) | (0 << 0)) & 0xff)
+#define MCP3911_REG_READ(reg, id)      ((((reg) << 1) | ((id) << 6) | (1 << 0)) & 0xff)
+#define MCP3911_REG_WRITE(reg, id)     ((((reg) << 1) | ((id) << 6) | (0 << 0)) & 0xff)
+#define MCP3911_REG_MASK               GENMASK(4, 1)
 
 #define MCP3911_NUM_CHANNELS           2
 
@@ -89,8 +90,8 @@ static int mcp3911_read(struct mcp3911 *adc, u8 reg, u32 *val, u8 len)
 
        be32_to_cpus(val);
        *val >>= ((4 - len) * 8);
-       dev_dbg(&adc->spi->dev, "reading 0x%x from register 0x%x\n", *val,
-               reg >> 1);
+       dev_dbg(&adc->spi->dev, "reading 0x%x from register 0x%lx\n", *val,
+               FIELD_GET(MCP3911_REG_MASK, reg));
        return ret;
 }
 
@@ -248,7 +249,7 @@ static int mcp3911_write_raw(struct iio_dev *indio_dev,
                break;
 
        case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
-               for (int i = 0; i < sizeof(mcp3911_osr_table); i++) {
+               for (int i = 0; i < ARRAY_SIZE(mcp3911_osr_table); i++) {
                        if (val == mcp3911_osr_table[i]) {
                                val = FIELD_PREP(MCP3911_CONFIG_OSR, i);
                                ret = mcp3911_update(adc, MCP3911_REG_CONFIG, MCP3911_CONFIG_OSR,
@@ -496,7 +497,7 @@ static int mcp3911_probe(struct spi_device *spi)
                                indio_dev->name,
                                iio_device_id(indio_dev));
                if (!adc->trig)
-                       return PTR_ERR(adc->trig);
+                       return -ENOMEM;
 
                adc->trig->ops = &mcp3911_trigger_ops;
                iio_trigger_set_drvdata(adc->trig, adc);
index 6256977..3cda529 100644 (file)
@@ -2086,18 +2086,19 @@ static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev,
                stm32_adc_chan_init_one(indio_dev, &channels[scan_index], val,
                                        vin[1], scan_index, differential);
 
+               val = 0;
                ret = fwnode_property_read_u32(child, "st,min-sample-time-ns", &val);
                /* st,min-sample-time-ns is optional */
-               if (!ret) {
-                       stm32_adc_smpr_init(adc, channels[scan_index].channel, val);
-                       if (differential)
-                               stm32_adc_smpr_init(adc, vin[1], val);
-               } else if (ret != -EINVAL) {
+               if (ret && ret != -EINVAL) {
                        dev_err(&indio_dev->dev, "Invalid st,min-sample-time-ns property %d\n",
                                ret);
                        goto err;
                }
 
+               stm32_adc_smpr_init(adc, channels[scan_index].channel, val);
+               if (differential)
+                       stm32_adc_smpr_init(adc, vin[1], val);
+
                scan_index++;
        }
 
index 0a2ca1a..7bcb5c7 100644 (file)
@@ -858,7 +858,7 @@ static int tsl2583_probe(struct i2c_client *clientp,
                                         TSL2583_POWER_OFF_DELAY_MS);
        pm_runtime_use_autosuspend(&clientp->dev);
 
-       ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
+       ret = iio_device_register(indio_dev);
        if (ret) {
                dev_err(&clientp->dev, "%s: iio registration failed\n",
                        __func__);
index b652d2b..a60ccf1 100644 (file)
@@ -1385,13 +1385,6 @@ static int ltc2983_setup(struct ltc2983_data *st, bool assign_iio)
                return ret;
        }
 
-       st->iio_chan = devm_kzalloc(&st->spi->dev,
-                                   st->iio_channels * sizeof(*st->iio_chan),
-                                   GFP_KERNEL);
-
-       if (!st->iio_chan)
-               return -ENOMEM;
-
        ret = regmap_update_bits(st->regmap, LTC2983_GLOBAL_CONFIG_REG,
                                 LTC2983_NOTCH_FREQ_MASK,
                                 LTC2983_NOTCH_FREQ(st->filter_notch_freq));
@@ -1514,6 +1507,12 @@ static int ltc2983_probe(struct spi_device *spi)
                gpiod_set_value_cansleep(gpio, 0);
        }
 
+       st->iio_chan = devm_kzalloc(&spi->dev,
+                                   st->iio_channels * sizeof(*st->iio_chan),
+                                   GFP_KERNEL);
+       if (!st->iio_chan)
+               return -ENOMEM;
+
        ret = ltc2983_setup(st, true);
        if (ret)
                return ret;
index 65856e4..d3b39d0 100644 (file)
@@ -2330,7 +2330,8 @@ static void amd_iommu_get_resv_regions(struct device *dev,
                        type = IOMMU_RESV_RESERVED;
 
                region = iommu_alloc_resv_region(entry->address_start,
-                                                length, prot, type);
+                                                length, prot, type,
+                                                GFP_KERNEL);
                if (!region) {
                        dev_err(dev, "Out of memory allocating dm-regions\n");
                        return;
@@ -2340,14 +2341,14 @@ static void amd_iommu_get_resv_regions(struct device *dev,
 
        region = iommu_alloc_resv_region(MSI_RANGE_START,
                                         MSI_RANGE_END - MSI_RANGE_START + 1,
-                                        0, IOMMU_RESV_MSI);
+                                        0, IOMMU_RESV_MSI, GFP_KERNEL);
        if (!region)
                return;
        list_add_tail(&region->list, head);
 
        region = iommu_alloc_resv_region(HT_RANGE_START,
                                         HT_RANGE_END - HT_RANGE_START + 1,
-                                        0, IOMMU_RESV_RESERVED);
+                                        0, IOMMU_RESV_RESERVED, GFP_KERNEL);
        if (!region)
                return;
        list_add_tail(&region->list, head);
index 4526575..4f4a323 100644 (file)
@@ -758,7 +758,7 @@ static void apple_dart_get_resv_regions(struct device *dev,
 
                region = iommu_alloc_resv_region(DOORBELL_ADDR,
                                                 PAGE_SIZE, prot,
-                                                IOMMU_RESV_MSI);
+                                                IOMMU_RESV_MSI, GFP_KERNEL);
                if (!region)
                        return;
 
index ba47c73..6d5df91 100644 (file)
@@ -2757,7 +2757,7 @@ static void arm_smmu_get_resv_regions(struct device *dev,
        int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
 
        region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
-                                        prot, IOMMU_RESV_SW_MSI);
+                                        prot, IOMMU_RESV_SW_MSI, GFP_KERNEL);
        if (!region)
                return;
 
index 6c1114a..30dab14 100644 (file)
@@ -1534,7 +1534,7 @@ static void arm_smmu_get_resv_regions(struct device *dev,
        int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
 
        region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
-                                        prot, IOMMU_RESV_SW_MSI);
+                                        prot, IOMMU_RESV_SW_MSI, GFP_KERNEL);
        if (!region)
                return;
 
index a8b36c3..48cdcd0 100644 (file)
@@ -2410,6 +2410,7 @@ static int __init si_domain_init(int hw)
 
        if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
                domain_exit(si_domain);
+               si_domain = NULL;
                return -EFAULT;
        }
 
@@ -3052,6 +3053,10 @@ free_iommu:
                disable_dmar_iommu(iommu);
                free_dmar_iommu(iommu);
        }
+       if (si_domain) {
+               domain_exit(si_domain);
+               si_domain = NULL;
+       }
 
        return ret;
 }
@@ -4534,7 +4539,7 @@ static void intel_iommu_get_resv_regions(struct device *device,
        struct device *i_dev;
        int i;
 
-       down_read(&dmar_global_lock);
+       rcu_read_lock();
        for_each_rmrr_units(rmrr) {
                for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
                                          i, i_dev) {
@@ -4552,14 +4557,15 @@ static void intel_iommu_get_resv_regions(struct device *device,
                                IOMMU_RESV_DIRECT_RELAXABLE : IOMMU_RESV_DIRECT;
 
                        resv = iommu_alloc_resv_region(rmrr->base_address,
-                                                      length, prot, type);
+                                                      length, prot, type,
+                                                      GFP_ATOMIC);
                        if (!resv)
                                break;
 
                        list_add_tail(&resv->list, head);
                }
        }
-       up_read(&dmar_global_lock);
+       rcu_read_unlock();
 
 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
        if (dev_is_pci(device)) {
@@ -4567,7 +4573,8 @@ static void intel_iommu_get_resv_regions(struct device *device,
 
                if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) {
                        reg = iommu_alloc_resv_region(0, 1UL << 24, prot,
-                                                  IOMMU_RESV_DIRECT_RELAXABLE);
+                                       IOMMU_RESV_DIRECT_RELAXABLE,
+                                       GFP_KERNEL);
                        if (reg)
                                list_add_tail(&reg->list, head);
                }
@@ -4576,7 +4583,7 @@ static void intel_iommu_get_resv_regions(struct device *device,
 
        reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
                                      IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
-                                     0, IOMMU_RESV_MSI);
+                                     0, IOMMU_RESV_MSI, GFP_KERNEL);
        if (!reg)
                return;
        list_add_tail(&reg->list, head);
index 4893c24..65a3b3d 100644 (file)
@@ -504,7 +504,7 @@ static int iommu_insert_resv_region(struct iommu_resv_region *new,
        LIST_HEAD(stack);
 
        nr = iommu_alloc_resv_region(new->start, new->length,
-                                    new->prot, new->type);
+                                    new->prot, new->type, GFP_KERNEL);
        if (!nr)
                return -ENOMEM;
 
@@ -2579,11 +2579,12 @@ EXPORT_SYMBOL(iommu_put_resv_regions);
 
 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
                                                  size_t length, int prot,
-                                                 enum iommu_resv_type type)
+                                                 enum iommu_resv_type type,
+                                                 gfp_t gfp)
 {
        struct iommu_resv_region *region;
 
-       region = kzalloc(sizeof(*region), GFP_KERNEL);
+       region = kzalloc(sizeof(*region), gfp);
        if (!region)
                return NULL;
 
index 5a4e00e..2ab2ecf 100644 (file)
@@ -917,7 +917,8 @@ static void mtk_iommu_get_resv_regions(struct device *dev,
                        continue;
 
                region = iommu_alloc_resv_region(resv->iova_base, resv->size,
-                                                prot, IOMMU_RESV_RESERVED);
+                                                prot, IOMMU_RESV_RESERVED,
+                                                GFP_KERNEL);
                if (!region)
                        return;
 
index b7c2280..8b1b5c2 100644 (file)
@@ -490,11 +490,13 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev,
                fallthrough;
        case VIRTIO_IOMMU_RESV_MEM_T_RESERVED:
                region = iommu_alloc_resv_region(start, size, 0,
-                                                IOMMU_RESV_RESERVED);
+                                                IOMMU_RESV_RESERVED,
+                                                GFP_KERNEL);
                break;
        case VIRTIO_IOMMU_RESV_MEM_T_MSI:
                region = iommu_alloc_resv_region(start, size, prot,
-                                                IOMMU_RESV_MSI);
+                                                IOMMU_RESV_MSI,
+                                                GFP_KERNEL);
                break;
        }
        if (!region)
@@ -909,7 +911,8 @@ static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
         */
        if (!msi) {
                msi = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
-                                             prot, IOMMU_RESV_SW_MSI);
+                                             prot, IOMMU_RESV_SW_MSI,
+                                             GFP_KERNEL);
                if (!msi)
                        return;
 
index b9eeb87..07f0d79 100644 (file)
@@ -20,12 +20,12 @@ static struct gpiod_lookup_table *simatic_ipc_led_gpio_table;
 static struct gpiod_lookup_table simatic_ipc_led_gpio_table_127e = {
        .dev_id = "leds-gpio",
        .table = {
-               GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 52, NULL, 1, GPIO_ACTIVE_LOW),
-               GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 53, NULL, 2, GPIO_ACTIVE_LOW),
-               GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 57, NULL, 3, GPIO_ACTIVE_LOW),
-               GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 58, NULL, 4, GPIO_ACTIVE_LOW),
-               GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 60, NULL, 5, GPIO_ACTIVE_LOW),
-               GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 51, NULL, 0, GPIO_ACTIVE_LOW),
+               GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 52, NULL, 0, GPIO_ACTIVE_LOW),
+               GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 53, NULL, 1, GPIO_ACTIVE_LOW),
+               GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 57, NULL, 2, GPIO_ACTIVE_LOW),
+               GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 58, NULL, 3, GPIO_ACTIVE_LOW),
+               GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 60, NULL, 4, GPIO_ACTIVE_LOW),
+               GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 51, NULL, 5, GPIO_ACTIVE_LOW),
                GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 56, NULL, 6, GPIO_ACTIVE_LOW),
                GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 59, NULL, 7, GPIO_ACTIVE_HIGH),
        },
index 09c7ed2..9c5ef81 100644 (file)
@@ -795,7 +795,8 @@ static void __make_buffer_clean(struct dm_buffer *b)
 {
        BUG_ON(b->hold_count);
 
-       if (!b->state)  /* fast case */
+       /* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */
+       if (!smp_load_acquire(&b->state))       /* fast case */
                return;
 
        wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
@@ -816,7 +817,7 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
                BUG_ON(test_bit(B_DIRTY, &b->state));
 
                if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
-                   unlikely(test_bit(B_READING, &b->state)))
+                   unlikely(test_bit_acquire(B_READING, &b->state)))
                        continue;
 
                if (!b->hold_count) {
@@ -1058,7 +1059,7 @@ found_buffer:
         * If the user called both dm_bufio_prefetch and dm_bufio_get on
         * the same buffer, it would deadlock if we waited.
         */
-       if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
+       if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state)))
                return NULL;
 
        b->hold_count++;
@@ -1218,7 +1219,7 @@ void dm_bufio_release(struct dm_buffer *b)
                 * invalid buffer.
                 */
                if ((b->read_error || b->write_error) &&
-                   !test_bit(B_READING, &b->state) &&
+                   !test_bit_acquire(B_READING, &b->state) &&
                    !test_bit(B_WRITING, &b->state) &&
                    !test_bit(B_DIRTY, &b->state)) {
                        __unlink_buffer(b);
@@ -1479,7 +1480,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_release_move);
 
 static void forget_buffer_locked(struct dm_buffer *b)
 {
-       if (likely(!b->hold_count) && likely(!b->state)) {
+       if (likely(!b->hold_count) && likely(!smp_load_acquire(&b->state))) {
                __unlink_buffer(b);
                __free_buffer_wake(b);
        }
@@ -1639,7 +1640,7 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
 {
        if (!(gfp & __GFP_FS) ||
            (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
-               if (test_bit(B_READING, &b->state) ||
+               if (test_bit_acquire(B_READING, &b->state) ||
                    test_bit(B_WRITING, &b->state) ||
                    test_bit(B_DIRTY, &b->state))
                        return false;
index c05fc34..06eb31a 100644 (file)
@@ -166,7 +166,7 @@ struct dm_cache_policy_type {
        struct dm_cache_policy_type *real;
 
        /*
-        * Policies may store a hint for each each cache block.
+        * Policies may store a hint for each cache block.
         * Currently the size of this hint must be 0 or 4 bytes but we
         * expect to relax this in future.
         */
index 811b0a5..2f1cc66 100644 (file)
@@ -2035,7 +2035,7 @@ static void disable_passdown_if_not_supported(struct clone *clone)
                reason = "max discard sectors smaller than a region";
 
        if (reason) {
-               DMWARN("Destination device (%pd) %s: Disabling discard passdown.",
+               DMWARN("Destination device (%pg) %s: Disabling discard passdown.",
                       dest_dev, reason);
                clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
        }
index 98976aa..6b3f867 100644 (file)
@@ -434,10 +434,10 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
                hc = __get_name_cell(new);
 
        if (hc) {
-               DMWARN("Unable to change %s on mapped device %s to one that "
-                      "already exists: %s",
-                      change_uuid ? "uuid" : "name",
-                      param->name, new);
+               DMERR("Unable to change %s on mapped device %s to one that "
+                     "already exists: %s",
+                     change_uuid ? "uuid" : "name",
+                     param->name, new);
                dm_put(hc->md);
                up_write(&_hash_lock);
                kfree(new_data);
@@ -449,8 +449,8 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
         */
        hc = __get_name_cell(param->name);
        if (!hc) {
-               DMWARN("Unable to rename non-existent device, %s to %s%s",
-                      param->name, change_uuid ? "uuid " : "", new);
+               DMERR("Unable to rename non-existent device, %s to %s%s",
+                     param->name, change_uuid ? "uuid " : "", new);
                up_write(&_hash_lock);
                kfree(new_data);
                return ERR_PTR(-ENXIO);
@@ -460,9 +460,9 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
         * Does this device already have a uuid?
         */
        if (change_uuid && hc->uuid) {
-               DMWARN("Unable to change uuid of mapped device %s to %s "
-                      "because uuid is already set to %s",
-                      param->name, new, hc->uuid);
+               DMERR("Unable to change uuid of mapped device %s to %s "
+                     "because uuid is already set to %s",
+                     param->name, new, hc->uuid);
                dm_put(hc->md);
                up_write(&_hash_lock);
                kfree(new_data);
@@ -750,7 +750,7 @@ static int get_target_version(struct file *filp, struct dm_ioctl *param, size_t
 static int check_name(const char *name)
 {
        if (strchr(name, '/')) {
-               DMWARN("invalid device name");
+               DMERR("invalid device name");
                return -EINVAL;
        }
 
@@ -773,7 +773,7 @@ static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *src
        down_read(&_hash_lock);
        hc = dm_get_mdptr(md);
        if (!hc || hc->md != md) {
-               DMWARN("device has been removed from the dev hash table.");
+               DMERR("device has been removed from the dev hash table.");
                goto out;
        }
 
@@ -1026,7 +1026,7 @@ static int dev_rename(struct file *filp, struct dm_ioctl *param, size_t param_si
        if (new_data < param->data ||
            invalid_str(new_data, (void *) param + param_size) || !*new_data ||
            strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) {
-               DMWARN("Invalid new mapped device name or uuid string supplied.");
+               DMERR("Invalid new mapped device name or uuid string supplied.");
                return -EINVAL;
        }
 
@@ -1061,7 +1061,7 @@ static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t pa
 
        if (geostr < param->data ||
            invalid_str(geostr, (void *) param + param_size)) {
-               DMWARN("Invalid geometry supplied.");
+               DMERR("Invalid geometry supplied.");
                goto out;
        }
 
@@ -1069,13 +1069,13 @@ static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t pa
                   indata + 1, indata + 2, indata + 3, &dummy);
 
        if (x != 4) {
-               DMWARN("Unable to interpret geometry settings.");
+               DMERR("Unable to interpret geometry settings.");
                goto out;
        }
 
        if (indata[0] > 65535 || indata[1] > 255 ||
            indata[2] > 255 || indata[3] > ULONG_MAX) {
-               DMWARN("Geometry exceeds range limits.");
+               DMERR("Geometry exceeds range limits.");
                goto out;
        }
 
@@ -1387,7 +1387,7 @@ static int populate_table(struct dm_table *table,
        char *target_params;
 
        if (!param->target_count) {
-               DMWARN("populate_table: no targets specified");
+               DMERR("populate_table: no targets specified");
                return -EINVAL;
        }
 
@@ -1395,7 +1395,7 @@ static int populate_table(struct dm_table *table,
 
                r = next_target(spec, next, end, &spec, &target_params);
                if (r) {
-                       DMWARN("unable to find target");
+                       DMERR("unable to find target");
                        return r;
                }
 
@@ -1404,7 +1404,7 @@ static int populate_table(struct dm_table *table,
                                        (sector_t) spec->length,
                                        target_params);
                if (r) {
-                       DMWARN("error adding target to table");
+                       DMERR("error adding target to table");
                        return r;
                }
 
@@ -1451,8 +1451,8 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si
        if (immutable_target_type &&
            (immutable_target_type != dm_table_get_immutable_target_type(t)) &&
            !dm_table_get_wildcard_target(t)) {
-               DMWARN("can't replace immutable target type %s",
-                      immutable_target_type->name);
+               DMERR("can't replace immutable target type %s",
+                     immutable_target_type->name);
                r = -EINVAL;
                goto err_unlock_md_type;
        }
@@ -1461,12 +1461,12 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si
                /* setup md->queue to reflect md's type (may block) */
                r = dm_setup_md_queue(md, t);
                if (r) {
-                       DMWARN("unable to set up device queue for new table.");
+                       DMERR("unable to set up device queue for new table.");
                        goto err_unlock_md_type;
                }
        } else if (!is_valid_type(dm_get_md_type(md), dm_table_get_type(t))) {
-               DMWARN("can't change device type (old=%u vs new=%u) after initial table load.",
-                      dm_get_md_type(md), dm_table_get_type(t));
+               DMERR("can't change device type (old=%u vs new=%u) after initial table load.",
+                     dm_get_md_type(md), dm_table_get_type(t));
                r = -EINVAL;
                goto err_unlock_md_type;
        }
@@ -1477,7 +1477,7 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si
        down_write(&_hash_lock);
        hc = dm_get_mdptr(md);
        if (!hc || hc->md != md) {
-               DMWARN("device has been removed from the dev hash table.");
+               DMERR("device has been removed from the dev hash table.");
                up_write(&_hash_lock);
                r = -ENXIO;
                goto err_destroy_table;
@@ -1686,19 +1686,19 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
 
        if (tmsg < (struct dm_target_msg *) param->data ||
            invalid_str(tmsg->message, (void *) param + param_size)) {
-               DMWARN("Invalid target message parameters.");
+               DMERR("Invalid target message parameters.");
                r = -EINVAL;
                goto out;
        }
 
        r = dm_split_args(&argc, &argv, tmsg->message);
        if (r) {
-               DMWARN("Failed to split target message parameters");
+               DMERR("Failed to split target message parameters");
                goto out;
        }
 
        if (!argc) {
-               DMWARN("Empty message received.");
+               DMERR("Empty message received.");
                r = -EINVAL;
                goto out_argv;
        }
@@ -1718,12 +1718,12 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
 
        ti = dm_table_find_target(table, tmsg->sector);
        if (!ti) {
-               DMWARN("Target message sector outside device.");
+               DMERR("Target message sector outside device.");
                r = -EINVAL;
        } else if (ti->type->message)
                r = ti->type->message(ti, argc, argv, result, maxlen);
        else {
-               DMWARN("Target type does not support messages");
+               DMERR("Target type does not support messages");
                r = -EINVAL;
        }
 
@@ -1814,11 +1814,11 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user)
 
        if ((DM_VERSION_MAJOR != version[0]) ||
            (DM_VERSION_MINOR < version[1])) {
-               DMWARN("ioctl interface mismatch: "
-                      "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)",
-                      DM_VERSION_MAJOR, DM_VERSION_MINOR,
-                      DM_VERSION_PATCHLEVEL,
-                      version[0], version[1], version[2], cmd);
+               DMERR("ioctl interface mismatch: "
+                     "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)",
+                     DM_VERSION_MAJOR, DM_VERSION_MINOR,
+                     DM_VERSION_PATCHLEVEL,
+                     version[0], version[1], version[2], cmd);
                r = -EINVAL;
        }
 
@@ -1927,11 +1927,11 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
 
        if (cmd == DM_DEV_CREATE_CMD) {
                if (!*param->name) {
-                       DMWARN("name not supplied when creating device");
+                       DMERR("name not supplied when creating device");
                        return -EINVAL;
                }
        } else if (*param->uuid && *param->name) {
-               DMWARN("only supply one of name or uuid, cmd(%u)", cmd);
+               DMERR("only supply one of name or uuid, cmd(%u)", cmd);
                return -EINVAL;
        }
 
@@ -1978,7 +1978,7 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us
 
        fn = lookup_ioctl(cmd, &ioctl_flags);
        if (!fn) {
-               DMWARN("dm_ctl_ioctl: unknown command 0x%x", command);
+               DMERR("dm_ctl_ioctl: unknown command 0x%x", command);
                return -ENOTTY;
        }
 
@@ -2203,7 +2203,7 @@ int __init dm_early_create(struct dm_ioctl *dmi,
                                        (sector_t) spec_array[i]->length,
                                        target_params_array[i]);
                if (r) {
-                       DMWARN("error adding target to table");
+                       DMERR("error adding target to table");
                        goto err_destroy_table;
                }
        }
@@ -2216,7 +2216,7 @@ int __init dm_early_create(struct dm_ioctl *dmi,
        /* setup md->queue to reflect md's type (may block) */
        r = dm_setup_md_queue(md, t);
        if (r) {
-               DMWARN("unable to set up device queue for new table.");
+               DMERR("unable to set up device queue for new table.");
                goto err_destroy_table;
        }
 
index c640be4..5426367 100644 (file)
@@ -2529,7 +2529,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
                 * of the "sync" directive.
                 *
                 * With reshaping capability added, we must ensure that
-                * that the "sync" directive is disallowed during the reshape.
+                * the "sync" directive is disallowed during the reshape.
                 */
                if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
                        continue;
@@ -2590,7 +2590,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
 
 /*
  * Adjust data_offset and new_data_offset on all disk members of @rs
- * for out of place reshaping if requested by contructor
+ * for out of place reshaping if requested by constructor
  *
  * We need free space at the beginning of each raid disk for forward
  * and at the end for backward reshapes which userspace has to provide
index 3001b10..a41209a 100644 (file)
@@ -238,7 +238,7 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
                dm_requeue_original_request(tio, true);
                break;
        default:
-               DMWARN("unimplemented target endio return value: %d", r);
+               DMCRIT("unimplemented target endio return value: %d", r);
                BUG();
        }
 }
@@ -409,7 +409,7 @@ static int map_request(struct dm_rq_target_io *tio)
                dm_kill_unmapped_request(rq, BLK_STS_IOERR);
                break;
        default:
-               DMWARN("unimplemented target map return value: %d", r);
+               DMCRIT("unimplemented target map return value: %d", r);
                BUG();
        }
 
index 8326f9f..f105a71 100644 (file)
@@ -1220,7 +1220,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
                return 2; /* this wasn't a stats message */
 
        if (r == -EINVAL)
-               DMWARN("Invalid parameters for message %s", argv[0]);
+               DMCRIT("Invalid parameters for message %s", argv[0]);
 
        return r;
 }
index d8034ff..078da18 100644 (file)
@@ -234,12 +234,12 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
                return 0;
 
        if ((start >= dev_size) || (start + len > dev_size)) {
-               DMWARN("%s: %pg too small for target: "
-                      "start=%llu, len=%llu, dev_size=%llu",
-                      dm_device_name(ti->table->md), bdev,
-                      (unsigned long long)start,
-                      (unsigned long long)len,
-                      (unsigned long long)dev_size);
+               DMERR("%s: %pg too small for target: "
+                     "start=%llu, len=%llu, dev_size=%llu",
+                     dm_device_name(ti->table->md), bdev,
+                     (unsigned long long)start,
+                     (unsigned long long)len,
+                     (unsigned long long)dev_size);
                return 1;
        }
 
@@ -251,10 +251,10 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
                unsigned int zone_sectors = bdev_zone_sectors(bdev);
 
                if (start & (zone_sectors - 1)) {
-                       DMWARN("%s: start=%llu not aligned to h/w zone size %u of %pg",
-                              dm_device_name(ti->table->md),
-                              (unsigned long long)start,
-                              zone_sectors, bdev);
+                       DMERR("%s: start=%llu not aligned to h/w zone size %u of %pg",
+                             dm_device_name(ti->table->md),
+                             (unsigned long long)start,
+                             zone_sectors, bdev);
                        return 1;
                }
 
@@ -268,10 +268,10 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
                 * the sector range.
                 */
                if (len & (zone_sectors - 1)) {
-                       DMWARN("%s: len=%llu not aligned to h/w zone size %u of %pg",
-                              dm_device_name(ti->table->md),
-                              (unsigned long long)len,
-                              zone_sectors, bdev);
+                       DMERR("%s: len=%llu not aligned to h/w zone size %u of %pg",
+                             dm_device_name(ti->table->md),
+                             (unsigned long long)len,
+                             zone_sectors, bdev);
                        return 1;
                }
        }
@@ -280,20 +280,20 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
                return 0;
 
        if (start & (logical_block_size_sectors - 1)) {
-               DMWARN("%s: start=%llu not aligned to h/w "
-                      "logical block size %u of %pg",
-                      dm_device_name(ti->table->md),
-                      (unsigned long long)start,
-                      limits->logical_block_size, bdev);
+               DMERR("%s: start=%llu not aligned to h/w "
+                     "logical block size %u of %pg",
+                     dm_device_name(ti->table->md),
+                     (unsigned long long)start,
+                     limits->logical_block_size, bdev);
                return 1;
        }
 
        if (len & (logical_block_size_sectors - 1)) {
-               DMWARN("%s: len=%llu not aligned to h/w "
-                      "logical block size %u of %pg",
-                      dm_device_name(ti->table->md),
-                      (unsigned long long)len,
-                      limits->logical_block_size, bdev);
+               DMERR("%s: len=%llu not aligned to h/w "
+                     "logical block size %u of %pg",
+                     dm_device_name(ti->table->md),
+                     (unsigned long long)len,
+                     limits->logical_block_size, bdev);
                return 1;
        }
 
@@ -434,8 +434,8 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d)
                }
        }
        if (!found) {
-               DMWARN("%s: device %s not in table devices list",
-                      dm_device_name(ti->table->md), d->name);
+               DMERR("%s: device %s not in table devices list",
+                     dm_device_name(ti->table->md), d->name);
                return;
        }
        if (refcount_dec_and_test(&dd->count)) {
@@ -618,12 +618,12 @@ static int validate_hardware_logical_block_alignment(struct dm_table *t,
        }
 
        if (remaining) {
-               DMWARN("%s: table line %u (start sect %llu len %llu) "
-                      "not aligned to h/w logical block size %u",
-                      dm_device_name(t->md), i,
-                      (unsigned long long) ti->begin,
-                      (unsigned long long) ti->len,
-                      limits->logical_block_size);
+               DMERR("%s: table line %u (start sect %llu len %llu) "
+                     "not aligned to h/w logical block size %u",
+                     dm_device_name(t->md), i,
+                     (unsigned long long) ti->begin,
+                     (unsigned long long) ti->len,
+                     limits->logical_block_size);
                return -EINVAL;
        }
 
@@ -1008,7 +1008,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
        struct dm_md_mempools *pools;
 
        if (unlikely(type == DM_TYPE_NONE)) {
-               DMWARN("no table type is set, can't allocate mempools");
+               DMERR("no table type is set, can't allocate mempools");
                return -EINVAL;
        }
 
@@ -1112,7 +1112,7 @@ static bool integrity_profile_exists(struct gendisk *disk)
  * Get a disk whose integrity profile reflects the table's profile.
  * Returns NULL if integrity support was inconsistent or unavailable.
  */
-static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
+static struct gendisk *dm_table_get_integrity_disk(struct dm_table *t)
 {
        struct list_head *devices = dm_table_get_devices(t);
        struct dm_dev_internal *dd = NULL;
@@ -1185,10 +1185,10 @@ static int dm_table_register_integrity(struct dm_table *t)
         * profile the new profile should not conflict.
         */
        if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
-               DMWARN("%s: conflict with existing integrity profile: "
-                      "%s profile mismatch",
-                      dm_device_name(t->md),
-                      template_disk->disk_name);
+               DMERR("%s: conflict with existing integrity profile: "
+                     "%s profile mismatch",
+                     dm_device_name(t->md),
+                     template_disk->disk_name);
                return 1;
        }
 
@@ -1327,7 +1327,7 @@ static int dm_table_construct_crypto_profile(struct dm_table *t)
        if (t->md->queue &&
            !blk_crypto_has_capabilities(profile,
                                         t->md->queue->crypto_profile)) {
-               DMWARN("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
+               DMERR("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
                dm_destroy_crypto_profile(profile);
                return -EINVAL;
        }
index 8a00cc4..ccf5b85 100644 (file)
@@ -1401,14 +1401,16 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
 
        /* WQ_UNBOUND greatly improves performance when running on ramdisk */
        wq_flags = WQ_MEM_RECLAIM | WQ_UNBOUND;
-       if (v->use_tasklet) {
-               /*
-                * Allow verify_wq to preempt softirq since verification in
-                * tasklet will fall-back to using it for error handling
-                * (or if the bufio cache doesn't have required hashes).
-                */
-               wq_flags |= WQ_HIGHPRI;
-       }
+       /*
+        * Using WQ_HIGHPRI improves throughput and completion latency by
+        * reducing wait times when reading from a dm-verity device.
+        *
+        * Also as required for the "try_verify_in_tasklet" feature: WQ_HIGHPRI
+        * allows verify_wq to preempt softirq since verification in tasklet
+        * will fall-back to using it for error handling (or if the bufio cache
+        * doesn't have required hashes).
+        */
+       wq_flags |= WQ_HIGHPRI;
        v->verify_wq = alloc_workqueue("kverityd", wq_flags, num_online_cpus());
        if (!v->verify_wq) {
                ti->error = "Cannot allocate workqueue";
index 60549b6..95a1ee3 100644 (file)
@@ -864,7 +864,7 @@ int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
        sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
 
        if (geo->start > sz) {
-               DMWARN("Start sector is beyond the geometry limits.");
+               DMERR("Start sector is beyond the geometry limits.");
                return -EINVAL;
        }
 
@@ -1149,7 +1149,7 @@ static void clone_endio(struct bio *bio)
                        /* The target will handle the io */
                        return;
                default:
-                       DMWARN("unimplemented target endio return value: %d", r);
+                       DMCRIT("unimplemented target endio return value: %d", r);
                        BUG();
                }
        }
@@ -1455,7 +1455,7 @@ static void __map_bio(struct bio *clone)
                        dm_io_dec_pending(io, BLK_STS_DM_REQUEUE);
                break;
        default:
-               DMWARN("unimplemented target map return value: %d", r);
+               DMCRIT("unimplemented target map return value: %d", r);
                BUG();
        }
 }
@@ -2005,7 +2005,7 @@ static struct mapped_device *alloc_dev(int minor)
 
        md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
        if (!md) {
-               DMWARN("unable to allocate device, out of memory.");
+               DMERR("unable to allocate device, out of memory.");
                return NULL;
        }
 
@@ -2065,7 +2065,6 @@ static struct mapped_device *alloc_dev(int minor)
        md->disk->minors = 1;
        md->disk->flags |= GENHD_FL_NO_PART;
        md->disk->fops = &dm_blk_dops;
-       md->disk->queue = md->queue;
        md->disk->private_data = md;
        sprintf(md->disk->disk_name, "dm-%d", minor);
 
index ba6592b..283b78b 100644 (file)
@@ -24,7 +24,7 @@ if MEDIA_SUPPORT
 
 config MEDIA_SUPPORT_FILTER
        bool "Filter media drivers"
-       default y if !EMBEDDED && !EXPERT
+       default y if !EXPERT
        help
           Configuring the media subsystem can be complex, as there are
           hundreds of drivers and other config options.
index 41a7929..4f5ab3c 100644 (file)
@@ -1027,6 +1027,7 @@ static const u8 cec_msg_size[256] = {
        [CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR] = 2 | DIRECTED,
        [CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR] = 2 | DIRECTED,
        [CEC_MSG_SET_SYSTEM_AUDIO_MODE] = 3 | BOTH,
+       [CEC_MSG_SET_AUDIO_VOLUME_LEVEL] = 3 | DIRECTED,
        [CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST] = 2 | DIRECTED,
        [CEC_MSG_SYSTEM_AUDIO_MODE_STATUS] = 3 | DIRECTED,
        [CEC_MSG_SET_AUDIO_RATE] = 3 | DIRECTED,
index 3b583ed..6ebedc7 100644 (file)
@@ -44,6 +44,8 @@ static void handle_cec_message(struct cros_ec_cec *cros_ec_cec)
        uint8_t *cec_message = cros_ec->event_data.data.cec_message;
        unsigned int len = cros_ec->event_size;
 
+       if (len > CEC_MAX_MSG_SIZE)
+               len = CEC_MAX_MSG_SIZE;
        cros_ec_cec->rx_msg.len = len;
        memcpy(cros_ec_cec->rx_msg.msg, cec_message, len);
 
@@ -221,6 +223,8 @@ static const struct cec_dmi_match cec_dmi_match_table[] = {
        { "Google", "Moli", "0000:00:02.0", "Port B" },
        /* Google Kinox */
        { "Google", "Kinox", "0000:00:02.0", "Port B" },
+       /* Google Kuldax */
+       { "Google", "Kuldax", "0000:00:02.0", "Port B" },
 };
 
 static struct device *cros_ec_cec_find_hdmi_dev(struct device *dev,
index ce9a9d9..0a30e7a 100644 (file)
@@ -115,6 +115,8 @@ static irqreturn_t s5p_cec_irq_handler(int irq, void *priv)
                                dev_dbg(cec->dev, "Buffer overrun (worker did not process previous message)\n");
                        cec->rx = STATE_BUSY;
                        cec->msg.len = status >> 24;
+                       if (cec->msg.len > CEC_MAX_MSG_SIZE)
+                               cec->msg.len = CEC_MAX_MSG_SIZE;
                        cec->msg.rx_status = CEC_RX_STATUS_OK;
                        s5p_cec_get_rx_buf(cec, cec->msg.len,
                                        cec->msg.msg);
index 47d83e0..9807f54 100644 (file)
@@ -6660,7 +6660,7 @@ static int drxk_read_snr(struct dvb_frontend *fe, u16 *snr)
 static int drxk_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
 {
        struct drxk_state *state = fe->demodulator_priv;
-       u16 err;
+       u16 err = 0;
 
        dprintk(1, "\n");
 
index c6ab531..e408049 100644 (file)
@@ -406,7 +406,6 @@ static int ar0521_set_fmt(struct v4l2_subdev *sd,
                          struct v4l2_subdev_format *format)
 {
        struct ar0521_dev *sensor = to_ar0521_dev(sd);
-       int ret = 0;
 
        ar0521_adj_fmt(&format->format);
 
@@ -423,7 +422,7 @@ static int ar0521_set_fmt(struct v4l2_subdev *sd,
        }
 
        mutex_unlock(&sensor->lock);
-       return ret;
+       return 0;
 }
 
 static int ar0521_s_ctrl(struct v4l2_ctrl *ctrl)
@@ -756,10 +755,12 @@ static int ar0521_power_on(struct device *dev)
                gpiod_set_value(sensor->reset_gpio, 0);
        usleep_range(4500, 5000); /* min 45000 clocks */
 
-       for (cnt = 0; cnt < ARRAY_SIZE(initial_regs); cnt++)
-               if (ar0521_write_regs(sensor, initial_regs[cnt].data,
-                                     initial_regs[cnt].count))
+       for (cnt = 0; cnt < ARRAY_SIZE(initial_regs); cnt++) {
+               ret = ar0521_write_regs(sensor, initial_regs[cnt].data,
+                                       initial_regs[cnt].count);
+               if (ret)
                        goto off;
+       }
 
        ret = ar0521_write_reg(sensor, AR0521_REG_SERIAL_FORMAT,
                               AR0521_REG_SERIAL_FORMAT_MIPI |
index ee6bbbb..25bf113 100644 (file)
@@ -238,6 +238,43 @@ static int get_key_knc1(struct IR_i2c *ir, enum rc_proto *protocol,
        return 1;
 }
 
+static int get_key_geniatech(struct IR_i2c *ir, enum rc_proto *protocol,
+                            u32 *scancode, u8 *toggle)
+{
+       int i, rc;
+       unsigned char b;
+
+       /* poll IR chip */
+       for (i = 0; i < 4; i++) {
+               rc = i2c_master_recv(ir->c, &b, 1);
+               if (rc == 1)
+                       break;
+               msleep(20);
+       }
+       if (rc != 1) {
+               dev_dbg(&ir->rc->dev, "read error\n");
+               if (rc < 0)
+                       return rc;
+               return -EIO;
+       }
+
+       /* don't repeat the key */
+       if (ir->old == b)
+               return 0;
+       ir->old = b;
+
+       /* decode to RC5 */
+       b &= 0x7f;
+       b = (b - 1) / 2;
+
+       dev_dbg(&ir->rc->dev, "key %02x\n", b);
+
+       *protocol = RC_PROTO_RC5;
+       *scancode = b;
+       *toggle = ir->old >> 7;
+       return 1;
+}
+
 static int get_key_avermedia_cardbus(struct IR_i2c *ir, enum rc_proto *protocol,
                                     u32 *scancode, u8 *toggle)
 {
@@ -766,6 +803,13 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
                rc_proto    = RC_PROTO_BIT_OTHER;
                ir_codes    = RC_MAP_EMPTY;
                break;
+       case 0x33:
+               name        = "Geniatech";
+               ir->get_key = get_key_geniatech;
+               rc_proto    = RC_PROTO_BIT_RC5;
+               ir_codes    = RC_MAP_TOTAL_MEDIA_IN_HAND_02;
+               ir->old     = 0xfc;
+               break;
        case 0x6b:
                name        = "FusionHDTV";
                ir->get_key = get_key_fusionhdtv;
@@ -825,6 +869,9 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
                case IR_KBD_GET_KEY_KNC1:
                        ir->get_key = get_key_knc1;
                        break;
+               case IR_KBD_GET_KEY_GENIATECH:
+                       ir->get_key = get_key_geniatech;
+                       break;
                case IR_KBD_GET_KEY_FUSIONHDTV:
                        ir->get_key = get_key_fusionhdtv;
                        break;
index 246d8d1..20f548a 100644 (file)
@@ -8,7 +8,7 @@
 
 #include <linux/bitfield.h>
 #include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
 #include <linux/module.h>
 #include <linux/of_graph.h>
index fe18e52..46d91cd 100644 (file)
@@ -633,7 +633,7 @@ static int mt9v111_hw_config(struct mt9v111_dev *mt9v111)
 
        /*
         * Set pixel integration time to the whole frame time.
-        * This value controls the the shutter delay when running with AE
+        * This value controls the shutter delay when running with AE
         * disabled. If longer than frame time, it affects the output
         * frame rate.
         */
index 1852e1c..2d74039 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/regulator/consumer.h>
 #include <linux/slab.h>
 #include <linux/types.h>
@@ -447,8 +448,6 @@ struct ov5640_dev {
        /* lock to protect all members below */
        struct mutex lock;
 
-       int power_count;
-
        struct v4l2_mbus_framefmt fmt;
        bool pending_fmt_change;
 
@@ -2696,39 +2695,24 @@ power_off:
        return ret;
 }
 
-/* --------------- Subdev Operations --------------- */
-
-static int ov5640_s_power(struct v4l2_subdev *sd, int on)
+static int ov5640_sensor_suspend(struct device *dev)
 {
-       struct ov5640_dev *sensor = to_ov5640_dev(sd);
-       int ret = 0;
-
-       mutex_lock(&sensor->lock);
-
-       /*
-        * If the power count is modified from 0 to != 0 or from != 0 to 0,
-        * update the power state.
-        */
-       if (sensor->power_count == !on) {
-               ret = ov5640_set_power(sensor, !!on);
-               if (ret)
-                       goto out;
-       }
+       struct v4l2_subdev *sd = dev_get_drvdata(dev);
+       struct ov5640_dev *ov5640 = to_ov5640_dev(sd);
 
-       /* Update the power count. */
-       sensor->power_count += on ? 1 : -1;
-       WARN_ON(sensor->power_count < 0);
-out:
-       mutex_unlock(&sensor->lock);
+       return ov5640_set_power(ov5640, false);
+}
 
-       if (on && !ret && sensor->power_count == 1) {
-               /* restore controls */
-               ret = v4l2_ctrl_handler_setup(&sensor->ctrls.handler);
-       }
+static int ov5640_sensor_resume(struct device *dev)
+{
+       struct v4l2_subdev *sd = dev_get_drvdata(dev);
+       struct ov5640_dev *ov5640 = to_ov5640_dev(sd);
 
-       return ret;
+       return ov5640_set_power(ov5640, true);
 }
 
+/* --------------- Subdev Operations --------------- */
+
 static int ov5640_try_frame_interval(struct ov5640_dev *sensor,
                                     struct v4l2_fract *fi,
                                     u32 width, u32 height)
@@ -3314,6 +3298,9 @@ static int ov5640_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
 
        /* v4l2_ctrl_lock() locks our own mutex */
 
+       if (!pm_runtime_get_if_in_use(&sensor->i2c_client->dev))
+               return 0;
+
        switch (ctrl->id) {
        case V4L2_CID_AUTOGAIN:
                val = ov5640_get_gain(sensor);
@@ -3329,6 +3316,8 @@ static int ov5640_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
                break;
        }
 
+       pm_runtime_put_autosuspend(&sensor->i2c_client->dev);
+
        return 0;
 }
 
@@ -3358,9 +3347,9 @@ static int ov5640_s_ctrl(struct v4l2_ctrl *ctrl)
        /*
         * If the device is not powered up by the host driver do
         * not apply any controls to H/W at this time. Instead
-        * the controls will be restored right after power-up.
+        * the controls will be restored at start streaming time.
         */
-       if (sensor->power_count == 0)
+       if (!pm_runtime_get_if_in_use(&sensor->i2c_client->dev))
                return 0;
 
        switch (ctrl->id) {
@@ -3402,6 +3391,8 @@ static int ov5640_s_ctrl(struct v4l2_ctrl *ctrl)
                break;
        }
 
+       pm_runtime_put_autosuspend(&sensor->i2c_client->dev);
+
        return ret;
 }
 
@@ -3677,6 +3668,18 @@ static int ov5640_s_stream(struct v4l2_subdev *sd, int enable)
        struct ov5640_dev *sensor = to_ov5640_dev(sd);
        int ret = 0;
 
+       if (enable) {
+               ret = pm_runtime_resume_and_get(&sensor->i2c_client->dev);
+               if (ret < 0)
+                       return ret;
+
+               ret = v4l2_ctrl_handler_setup(&sensor->ctrls.handler);
+               if (ret) {
+                       pm_runtime_put(&sensor->i2c_client->dev);
+                       return ret;
+               }
+       }
+
        mutex_lock(&sensor->lock);
 
        if (sensor->streaming == !enable) {
@@ -3701,8 +3704,13 @@ static int ov5640_s_stream(struct v4l2_subdev *sd, int enable)
                if (!ret)
                        sensor->streaming = enable;
        }
+
 out:
        mutex_unlock(&sensor->lock);
+
+       if (!enable || ret)
+               pm_runtime_put_autosuspend(&sensor->i2c_client->dev);
+
        return ret;
 }
 
@@ -3724,7 +3732,6 @@ static int ov5640_init_cfg(struct v4l2_subdev *sd,
 }
 
 static const struct v4l2_subdev_core_ops ov5640_core_ops = {
-       .s_power = ov5640_s_power,
        .log_status = v4l2_ctrl_subdev_log_status,
        .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
        .unsubscribe_event = v4l2_event_subdev_unsubscribe,
@@ -3770,26 +3777,20 @@ static int ov5640_check_chip_id(struct ov5640_dev *sensor)
        int ret = 0;
        u16 chip_id;
 
-       ret = ov5640_set_power_on(sensor);
-       if (ret)
-               return ret;
-
        ret = ov5640_read_reg16(sensor, OV5640_REG_CHIP_ID, &chip_id);
        if (ret) {
                dev_err(&client->dev, "%s: failed to read chip identifier\n",
                        __func__);
-               goto power_off;
+               return ret;
        }
 
        if (chip_id != 0x5640) {
                dev_err(&client->dev, "%s: wrong chip identifier, expected 0x5640, got 0x%x\n",
                        __func__, chip_id);
-               ret = -ENXIO;
+               return -ENXIO;
        }
 
-power_off:
-       ov5640_set_power_off(sensor);
-       return ret;
+       return 0;
 }
 
 static int ov5640_probe(struct i2c_client *client)
@@ -3880,26 +3881,43 @@ static int ov5640_probe(struct i2c_client *client)
 
        ret = ov5640_get_regulators(sensor);
        if (ret)
-               return ret;
+               goto entity_cleanup;
 
        mutex_init(&sensor->lock);
 
-       ret = ov5640_check_chip_id(sensor);
+       ret = ov5640_init_controls(sensor);
        if (ret)
                goto entity_cleanup;
 
-       ret = ov5640_init_controls(sensor);
-       if (ret)
+       ret = ov5640_sensor_resume(dev);
+       if (ret) {
+               dev_err(dev, "failed to power on\n");
                goto entity_cleanup;
+       }
+
+       pm_runtime_set_active(dev);
+       pm_runtime_get_noresume(dev);
+       pm_runtime_enable(dev);
+
+       ret = ov5640_check_chip_id(sensor);
+       if (ret)
+               goto err_pm_runtime;
 
        ret = v4l2_async_register_subdev_sensor(&sensor->sd);
        if (ret)
-               goto free_ctrls;
+               goto err_pm_runtime;
+
+       pm_runtime_set_autosuspend_delay(dev, 1000);
+       pm_runtime_use_autosuspend(dev);
+       pm_runtime_put_autosuspend(dev);
 
        return 0;
 
-free_ctrls:
+err_pm_runtime:
+       pm_runtime_put_noidle(dev);
+       pm_runtime_disable(dev);
        v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+       ov5640_sensor_suspend(dev);
 entity_cleanup:
        media_entity_cleanup(&sensor->sd.entity);
        mutex_destroy(&sensor->lock);
@@ -3910,6 +3928,12 @@ static void ov5640_remove(struct i2c_client *client)
 {
        struct v4l2_subdev *sd = i2c_get_clientdata(client);
        struct ov5640_dev *sensor = to_ov5640_dev(sd);
+       struct device *dev = &client->dev;
+
+       pm_runtime_disable(dev);
+       if (!pm_runtime_status_suspended(dev))
+               ov5640_sensor_suspend(dev);
+       pm_runtime_set_suspended(dev);
 
        v4l2_async_unregister_subdev(&sensor->sd);
        media_entity_cleanup(&sensor->sd.entity);
@@ -3917,6 +3941,10 @@ static void ov5640_remove(struct i2c_client *client)
        mutex_destroy(&sensor->lock);
 }
 
+static const struct dev_pm_ops ov5640_pm_ops = {
+       SET_RUNTIME_PM_OPS(ov5640_sensor_suspend, ov5640_sensor_resume, NULL)
+};
+
 static const struct i2c_device_id ov5640_id[] = {
        {"ov5640", 0},
        {},
@@ -3933,6 +3961,7 @@ static struct i2c_driver ov5640_i2c_driver = {
        .driver = {
                .name  = "ov5640",
                .of_match_table = ov5640_dt_ids,
+               .pm = &ov5640_pm_ops,
        },
        .id_table = ov5640_id,
        .probe_new = ov5640_probe,
index a233c34..cae1866 100644 (file)
@@ -3034,11 +3034,13 @@ static int ov8865_probe(struct i2c_client *client)
                                       &rate);
        if (!ret && sensor->extclk) {
                ret = clk_set_rate(sensor->extclk, rate);
-               if (ret)
-                       return dev_err_probe(dev, ret,
-                                            "failed to set clock rate\n");
+               if (ret) {
+                       dev_err_probe(dev, ret, "failed to set clock rate\n");
+                       goto error_endpoint;
+               }
        } else if (ret && !sensor->extclk) {
-               return dev_err_probe(dev, ret, "invalid clock config\n");
+               dev_err_probe(dev, ret, "invalid clock config\n");
+               goto error_endpoint;
        }
 
        sensor->extclk_rate = rate ? rate : clk_get_rate(sensor->extclk);
index b8176a3..25020d5 100644 (file)
@@ -581,7 +581,7 @@ static void __media_device_unregister_entity(struct media_entity *entity)
        struct media_device *mdev = entity->graph_obj.mdev;
        struct media_link *link, *tmp;
        struct media_interface *intf;
-       unsigned int i;
+       struct media_pad *iter;
 
        ida_free(&mdev->entity_internal_idx, entity->internal_idx);
 
@@ -597,8 +597,8 @@ static void __media_device_unregister_entity(struct media_entity *entity)
        __media_entity_remove_links(entity);
 
        /* Remove all pads that belong to this entity */
-       for (i = 0; i < entity->num_pads; i++)
-               media_gobj_destroy(&entity->pads[i].graph_obj);
+       media_entity_for_each_pad(entity, iter)
+               media_gobj_destroy(&iter->graph_obj);
 
        /* Remove the entity */
        media_gobj_destroy(&entity->graph_obj);
@@ -610,7 +610,7 @@ int __must_check media_device_register_entity(struct media_device *mdev,
                                              struct media_entity *entity)
 {
        struct media_entity_notify *notify, *next;
-       unsigned int i;
+       struct media_pad *iter;
        int ret;
 
        if (entity->function == MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN ||
@@ -639,9 +639,8 @@ int __must_check media_device_register_entity(struct media_device *mdev,
        media_gobj_create(mdev, MEDIA_GRAPH_ENTITY, &entity->graph_obj);
 
        /* Initialize objects at the pads */
-       for (i = 0; i < entity->num_pads; i++)
-               media_gobj_create(mdev, MEDIA_GRAPH_PAD,
-                              &entity->pads[i].graph_obj);
+       media_entity_for_each_pad(entity, iter)
+               media_gobj_create(mdev, MEDIA_GRAPH_PAD, &iter->graph_obj);
 
        /* invoke entity_notify callbacks */
        list_for_each_entry_safe(notify, next, &mdev->entity_notify, list)
index afd1bd7..b8bcbc7 100644 (file)
@@ -59,10 +59,12 @@ static inline const char *link_type_name(struct media_link *link)
        }
 }
 
-__must_check int __media_entity_enum_init(struct media_entity_enum *ent_enum,
-                                         int idx_max)
+__must_check int media_entity_enum_init(struct media_entity_enum *ent_enum,
+                                       struct media_device *mdev)
 {
-       idx_max = ALIGN(idx_max, BITS_PER_LONG);
+       int idx_max;
+
+       idx_max = ALIGN(mdev->entity_internal_idx_max + 1, BITS_PER_LONG);
        ent_enum->bmap = bitmap_zalloc(idx_max, GFP_KERNEL);
        if (!ent_enum->bmap)
                return -ENOMEM;
@@ -71,7 +73,7 @@ __must_check int __media_entity_enum_init(struct media_entity_enum *ent_enum,
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(__media_entity_enum_init);
+EXPORT_SYMBOL_GPL(media_entity_enum_init);
 
 void media_entity_enum_cleanup(struct media_entity_enum *ent_enum)
 {
@@ -193,7 +195,8 @@ int media_entity_pads_init(struct media_entity *entity, u16 num_pads,
                           struct media_pad *pads)
 {
        struct media_device *mdev = entity->graph_obj.mdev;
-       unsigned int i;
+       struct media_pad *iter;
+       unsigned int i = 0;
 
        if (num_pads >= MEDIA_ENTITY_MAX_PADS)
                return -E2BIG;
@@ -204,12 +207,12 @@ int media_entity_pads_init(struct media_entity *entity, u16 num_pads,
        if (mdev)
                mutex_lock(&mdev->graph_mutex);
 
-       for (i = 0; i < num_pads; i++) {
-               pads[i].entity = entity;
-               pads[i].index = i;
+       media_entity_for_each_pad(entity, iter) {
+               iter->entity = entity;
+               iter->index = i++;
                if (mdev)
                        media_gobj_create(mdev, MEDIA_GRAPH_PAD,
-                                       &entity->pads[i].graph_obj);
+                                         &iter->graph_obj);
        }
 
        if (mdev)
@@ -223,6 +226,33 @@ EXPORT_SYMBOL_GPL(media_entity_pads_init);
  * Graph traversal
  */
 
+/*
+ * This function checks the interdependency inside the entity between @pad0
+ * and @pad1. If two pads are interdependent they are part of the same pipeline
+ * and enabling one of the pads means that the other pad will become "locked"
+ * and doesn't allow configuration changes.
+ *
+ * This function uses the &media_entity_operations.has_pad_interdep() operation
+ * to check the dependency inside the entity between @pad0 and @pad1. If the
+ * has_pad_interdep operation is not implemented, all pads of the entity are
+ * considered to be interdependent.
+ */
+static bool media_entity_has_pad_interdep(struct media_entity *entity,
+                                         unsigned int pad0, unsigned int pad1)
+{
+       if (pad0 >= entity->num_pads || pad1 >= entity->num_pads)
+               return false;
+
+       if (entity->pads[pad0].flags & entity->pads[pad1].flags &
+           (MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_SOURCE))
+               return false;
+
+       if (!entity->ops || !entity->ops->has_pad_interdep)
+               return true;
+
+       return entity->ops->has_pad_interdep(entity, pad0, pad1);
+}
+
 static struct media_entity *
 media_entity_other(struct media_entity *entity, struct media_link *link)
 {
@@ -367,139 +397,435 @@ struct media_entity *media_graph_walk_next(struct media_graph *graph)
 }
 EXPORT_SYMBOL_GPL(media_graph_walk_next);
 
-int media_entity_get_fwnode_pad(struct media_entity *entity,
-                               struct fwnode_handle *fwnode,
-                               unsigned long direction_flags)
+/* -----------------------------------------------------------------------------
+ * Pipeline management
+ */
+
+/*
+ * The pipeline traversal stack stores pads that are reached during graph
+ * traversal, with a list of links to be visited to continue the traversal.
+ * When a new pad is reached, an entry is pushed on the top of the stack and
+ * points to the incoming pad and the first link of the entity.
+ *
+ * To find further pads in the pipeline, the traversal algorithm follows
+ * internal pad dependencies in the entity, and then links in the graph. It
+ * does so by iterating over all links of the entity, and following enabled
+ * links that originate from a pad that is internally connected to the incoming
+ * pad, as reported by the media_entity_has_pad_interdep() function.
+ */
+
+/**
+ * struct media_pipeline_walk_entry - Entry in the pipeline traversal stack
+ *
+ * @pad: The media pad being visited
+ * @links: Links left to be visited
+ */
+struct media_pipeline_walk_entry {
+       struct media_pad *pad;
+       struct list_head *links;
+};
+
+/**
+ * struct media_pipeline_walk - State used by the media pipeline traversal
+ *                             algorithm
+ *
+ * @mdev: The media device
+ * @stack: Depth-first search stack
+ * @stack.size: Number of allocated entries in @stack.entries
+ * @stack.top: Index of the top stack entry (-1 if the stack is empty)
+ * @stack.entries: Stack entries
+ */
+struct media_pipeline_walk {
+       struct media_device *mdev;
+
+       struct {
+               unsigned int size;
+               int top;
+               struct media_pipeline_walk_entry *entries;
+       } stack;
+};
+
+#define MEDIA_PIPELINE_STACK_GROW_STEP         16
+
+static struct media_pipeline_walk_entry *
+media_pipeline_walk_top(struct media_pipeline_walk *walk)
 {
-       struct fwnode_endpoint endpoint;
-       unsigned int i;
+       return &walk->stack.entries[walk->stack.top];
+}
+
+static bool media_pipeline_walk_empty(struct media_pipeline_walk *walk)
+{
+       return walk->stack.top == -1;
+}
+
+/* Increase the stack size by MEDIA_PIPELINE_STACK_GROW_STEP elements. */
+static int media_pipeline_walk_resize(struct media_pipeline_walk *walk)
+{
+       struct media_pipeline_walk_entry *entries;
+       unsigned int new_size;
+
+       /* Safety check, to avoid stack overflows in case of bugs. */
+       if (walk->stack.size >= 256)
+               return -E2BIG;
+
+       new_size = walk->stack.size + MEDIA_PIPELINE_STACK_GROW_STEP;
+
+       entries = krealloc(walk->stack.entries,
+                          new_size * sizeof(*walk->stack.entries),
+                          GFP_KERNEL);
+       if (!entries)
+               return -ENOMEM;
+
+       walk->stack.entries = entries;
+       walk->stack.size = new_size;
+
+       return 0;
+}
+
+/* Push a new entry on the stack. */
+static int media_pipeline_walk_push(struct media_pipeline_walk *walk,
+                                   struct media_pad *pad)
+{
+       struct media_pipeline_walk_entry *entry;
        int ret;
 
-       if (!entity->ops || !entity->ops->get_fwnode_pad) {
-               for (i = 0; i < entity->num_pads; i++) {
-                       if (entity->pads[i].flags & direction_flags)
-                               return i;
+       if (walk->stack.top + 1 >= walk->stack.size) {
+               ret = media_pipeline_walk_resize(walk);
+               if (ret)
+                       return ret;
+       }
+
+       walk->stack.top++;
+       entry = media_pipeline_walk_top(walk);
+       entry->pad = pad;
+       entry->links = pad->entity->links.next;
+
+       dev_dbg(walk->mdev->dev,
+               "media pipeline: pushed entry %u: '%s':%u\n",
+               walk->stack.top, pad->entity->name, pad->index);
+
+       return 0;
+}
+
+/*
+ * Move the top entry link cursor to the next link. If all links of the entry
+ * have been visited, pop the entry itself.
+ */
+static void media_pipeline_walk_pop(struct media_pipeline_walk *walk)
+{
+       struct media_pipeline_walk_entry *entry;
+
+       if (WARN_ON(walk->stack.top < 0))
+               return;
+
+       entry = media_pipeline_walk_top(walk);
+
+       if (entry->links->next == &entry->pad->entity->links) {
+               dev_dbg(walk->mdev->dev,
+                       "media pipeline: entry %u has no more links, popping\n",
+                       walk->stack.top);
+
+               walk->stack.top--;
+               return;
+       }
+
+       entry->links = entry->links->next;
+
+       dev_dbg(walk->mdev->dev,
+               "media pipeline: moved entry %u to next link\n",
+               walk->stack.top);
+}
+
+/* Free all memory allocated while walking the pipeline. */
+static void media_pipeline_walk_destroy(struct media_pipeline_walk *walk)
+{
+       kfree(walk->stack.entries);
+}
+
+/* Add a pad to the pipeline and push it to the stack. */
+static int media_pipeline_add_pad(struct media_pipeline *pipe,
+                                 struct media_pipeline_walk *walk,
+                                 struct media_pad *pad)
+{
+       struct media_pipeline_pad *ppad;
+
+       list_for_each_entry(ppad, &pipe->pads, list) {
+               if (ppad->pad == pad) {
+                       dev_dbg(pad->graph_obj.mdev->dev,
+                               "media pipeline: already contains pad '%s':%u\n",
+                               pad->entity->name, pad->index);
+                       return 0;
                }
+       }
 
-               return -ENXIO;
+       ppad = kzalloc(sizeof(*ppad), GFP_KERNEL);
+       if (!ppad)
+               return -ENOMEM;
+
+       ppad->pipe = pipe;
+       ppad->pad = pad;
+
+       list_add_tail(&ppad->list, &pipe->pads);
+
+       dev_dbg(pad->graph_obj.mdev->dev,
+               "media pipeline: added pad '%s':%u\n",
+               pad->entity->name, pad->index);
+
+       return media_pipeline_walk_push(walk, pad);
+}
+
+/* Explore the next link of the entity at the top of the stack. */
+static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
+                                           struct media_pipeline_walk *walk)
+{
+       struct media_pipeline_walk_entry *entry = media_pipeline_walk_top(walk);
+       struct media_pad *pad;
+       struct media_link *link;
+       struct media_pad *local;
+       struct media_pad *remote;
+       int ret;
+
+       pad = entry->pad;
+       link = list_entry(entry->links, typeof(*link), list);
+       media_pipeline_walk_pop(walk);
+
+       dev_dbg(walk->mdev->dev,
+               "media pipeline: exploring link '%s':%u -> '%s':%u\n",
+               link->source->entity->name, link->source->index,
+               link->sink->entity->name, link->sink->index);
+
+       /* Skip links that are not enabled. */
+       if (!(link->flags & MEDIA_LNK_FL_ENABLED)) {
+               dev_dbg(walk->mdev->dev,
+                       "media pipeline: skipping link (disabled)\n");
+               return 0;
        }
 
-       ret = fwnode_graph_parse_endpoint(fwnode, &endpoint);
+       /* Get the local pad and remote pad. */
+       if (link->source->entity == pad->entity) {
+               local = link->source;
+               remote = link->sink;
+       } else {
+               local = link->sink;
+               remote = link->source;
+       }
+
+       /*
+        * Skip links that originate from a different pad than the incoming pad
+        * that is not connected internally in the entity to the incoming pad.
+        */
+       if (pad != local &&
+           !media_entity_has_pad_interdep(pad->entity, pad->index, local->index)) {
+               dev_dbg(walk->mdev->dev,
+                       "media pipeline: skipping link (no route)\n");
+               return 0;
+       }
+
+       /*
+        * Add the local and remote pads of the link to the pipeline and push
+        * them to the stack, if they're not already present.
+        */
+       ret = media_pipeline_add_pad(pipe, walk, local);
        if (ret)
                return ret;
 
-       ret = entity->ops->get_fwnode_pad(entity, &endpoint);
-       if (ret < 0)
+       ret = media_pipeline_add_pad(pipe, walk, remote);
+       if (ret)
                return ret;
 
-       if (ret >= entity->num_pads)
-               return -ENXIO;
+       return 0;
+}
 
-       if (!(entity->pads[ret].flags & direction_flags))
-               return -ENXIO;
+static void media_pipeline_cleanup(struct media_pipeline *pipe)
+{
+       while (!list_empty(&pipe->pads)) {
+               struct media_pipeline_pad *ppad;
 
-       return ret;
+               ppad = list_first_entry(&pipe->pads, typeof(*ppad), list);
+               list_del(&ppad->list);
+               kfree(ppad);
+       }
 }
-EXPORT_SYMBOL_GPL(media_entity_get_fwnode_pad);
 
-/* -----------------------------------------------------------------------------
- * Pipeline management
- */
+static int media_pipeline_populate(struct media_pipeline *pipe,
+                                  struct media_pad *pad)
+{
+       struct media_pipeline_walk walk = { };
+       struct media_pipeline_pad *ppad;
+       int ret;
+
+       /*
+        * Populate the media pipeline by walking the media graph, starting
+        * from @pad.
+        */
+       INIT_LIST_HEAD(&pipe->pads);
+       pipe->mdev = pad->graph_obj.mdev;
+
+       walk.mdev = pipe->mdev;
+       walk.stack.top = -1;
+       ret = media_pipeline_add_pad(pipe, &walk, pad);
+       if (ret)
+               goto done;
+
+       /*
+        * Use a depth-first search algorithm: as long as the stack is not
+        * empty, explore the next link of the top entry. The
+        * media_pipeline_explore_next_link() function will either move to the
+        * next link, pop the entry if fully visited, or add new entries on
+        * top.
+        */
+       while (!media_pipeline_walk_empty(&walk)) {
+               ret = media_pipeline_explore_next_link(pipe, &walk);
+               if (ret)
+                       goto done;
+       }
+
+       dev_dbg(pad->graph_obj.mdev->dev,
+               "media pipeline populated, found pads:\n");
+
+       list_for_each_entry(ppad, &pipe->pads, list)
+               dev_dbg(pad->graph_obj.mdev->dev, "- '%s':%u\n",
+                       ppad->pad->entity->name, ppad->pad->index);
+
+       WARN_ON(walk.stack.top != -1);
 
-__must_check int __media_pipeline_start(struct media_entity *entity,
+       ret = 0;
+
+done:
+       media_pipeline_walk_destroy(&walk);
+
+       if (ret)
+               media_pipeline_cleanup(pipe);
+
+       return ret;
+}
+
+__must_check int __media_pipeline_start(struct media_pad *pad,
                                        struct media_pipeline *pipe)
 {
-       struct media_device *mdev = entity->graph_obj.mdev;
-       struct media_graph *graph = &pipe->graph;
-       struct media_entity *entity_err = entity;
-       struct media_link *link;
+       struct media_device *mdev = pad->entity->graph_obj.mdev;
+       struct media_pipeline_pad *err_ppad;
+       struct media_pipeline_pad *ppad;
        int ret;
 
-       if (pipe->streaming_count) {
-               pipe->streaming_count++;
+       lockdep_assert_held(&mdev->graph_mutex);
+
+       /*
+        * If the entity is already part of a pipeline, that pipeline must
+        * be the same as the pipe given to media_pipeline_start().
+        */
+       if (WARN_ON(pad->pipe && pad->pipe != pipe))
+               return -EINVAL;
+
+       /*
+        * If the pipeline has already been started, it is guaranteed to be
+        * valid, so just increase the start count.
+        */
+       if (pipe->start_count) {
+               pipe->start_count++;
                return 0;
        }
 
-       ret = media_graph_walk_init(&pipe->graph, mdev);
+       /*
+        * Populate the pipeline. This populates the media_pipeline pads list
+        * with media_pipeline_pad instances for each pad found during graph
+        * walk.
+        */
+       ret = media_pipeline_populate(pipe, pad);
        if (ret)
                return ret;
 
-       media_graph_walk_start(&pipe->graph, entity);
+       /*
+        * Now that all the pads in the pipeline have been gathered, perform
+        * the validation steps.
+        */
+
+       list_for_each_entry(ppad, &pipe->pads, list) {
+               struct media_pad *pad = ppad->pad;
+               struct media_entity *entity = pad->entity;
+               bool has_enabled_link = false;
+               bool has_link = false;
+               struct media_link *link;
 
-       while ((entity = media_graph_walk_next(graph))) {
-               DECLARE_BITMAP(active, MEDIA_ENTITY_MAX_PADS);
-               DECLARE_BITMAP(has_no_links, MEDIA_ENTITY_MAX_PADS);
+               dev_dbg(mdev->dev, "Validating pad '%s':%u\n", pad->entity->name,
+                       pad->index);
 
-               if (entity->pipe && entity->pipe != pipe) {
-                       pr_err("Pipe active for %s. Can't start for %s\n",
-                               entity->name,
-                               entity_err->name);
+               /*
+                * 1. Ensure that the pad doesn't already belong to a different
+                * pipeline.
+                */
+               if (pad->pipe) {
+                       dev_dbg(mdev->dev, "Failed to start pipeline: pad '%s':%u busy\n",
+                               pad->entity->name, pad->index);
                        ret = -EBUSY;
                        goto error;
                }
 
-               /* Already streaming --- no need to check. */
-               if (entity->pipe)
-                       continue;
-
-               entity->pipe = pipe;
-
-               if (!entity->ops || !entity->ops->link_validate)
-                       continue;
-
-               bitmap_zero(active, entity->num_pads);
-               bitmap_fill(has_no_links, entity->num_pads);
-
+               /*
+                * 2. Validate all active links whose sink is the current pad.
+                * Validation of the source pads is performed in the context of
+                * the connected sink pad to avoid duplicating checks.
+                */
                for_each_media_entity_data_link(entity, link) {
-                       struct media_pad *pad = link->sink->entity == entity
-                                               ? link->sink : link->source;
+                       /* Skip links unrelated to the current pad. */
+                       if (link->sink != pad && link->source != pad)
+                               continue;
 
-                       /* Mark that a pad is connected by a link. */
-                       bitmap_clear(has_no_links, pad->index, 1);
+                       /* Record if the pad has links and enabled links. */
+                       if (link->flags & MEDIA_LNK_FL_ENABLED)
+                               has_enabled_link = true;
+                       has_link = true;
 
                        /*
-                        * Pads that either do not need to connect or
-                        * are connected through an enabled link are
-                        * fine.
+                        * Validate the link if it's enabled and has the
+                        * current pad as its sink.
                         */
-                       if (!(pad->flags & MEDIA_PAD_FL_MUST_CONNECT) ||
-                           link->flags & MEDIA_LNK_FL_ENABLED)
-                               bitmap_set(active, pad->index, 1);
+                       if (!(link->flags & MEDIA_LNK_FL_ENABLED))
+                               continue;
 
-                       /*
-                        * Link validation will only take place for
-                        * sink ends of the link that are enabled.
-                        */
-                       if (link->sink != pad ||
-                           !(link->flags & MEDIA_LNK_FL_ENABLED))
+                       if (link->sink != pad)
+                               continue;
+
+                       if (!entity->ops || !entity->ops->link_validate)
                                continue;
 
                        ret = entity->ops->link_validate(link);
-                       if (ret < 0 && ret != -ENOIOCTLCMD) {
-                               dev_dbg(entity->graph_obj.mdev->dev,
-                                       "link validation failed for '%s':%u -> '%s':%u, error %d\n",
+                       if (ret) {
+                               dev_dbg(mdev->dev,
+                                       "Link '%s':%u -> '%s':%u failed validation: %d\n",
                                        link->source->entity->name,
                                        link->source->index,
-                                       entity->name, link->sink->index, ret);
+                                       link->sink->entity->name,
+                                       link->sink->index, ret);
                                goto error;
                        }
-               }
 
-               /* Either no links or validated links are fine. */
-               bitmap_or(active, active, has_no_links, entity->num_pads);
+                       dev_dbg(mdev->dev,
+                               "Link '%s':%u -> '%s':%u is valid\n",
+                               link->source->entity->name,
+                               link->source->index,
+                               link->sink->entity->name,
+                               link->sink->index);
+               }
 
-               if (!bitmap_full(active, entity->num_pads)) {
+               /*
+                * 3. If the pad has the MEDIA_PAD_FL_MUST_CONNECT flag set,
+                * ensure that it has either no link or an enabled link.
+                */
+               if ((pad->flags & MEDIA_PAD_FL_MUST_CONNECT) && has_link &&
+                   !has_enabled_link) {
+                       dev_dbg(mdev->dev,
+                               "Pad '%s':%u must be connected by an enabled link\n",
+                               pad->entity->name, pad->index);
                        ret = -ENOLINK;
-                       dev_dbg(entity->graph_obj.mdev->dev,
-                               "'%s':%u must be connected by an enabled link\n",
-                               entity->name,
-                               (unsigned)find_first_zero_bit(
-                                       active, entity->num_pads));
                        goto error;
                }
+
+               /* Validation passed, store the pipe pointer in the pad. */
+               pad->pipe = pipe;
        }
 
-       pipe->streaming_count++;
+       pipe->start_count++;
 
        return 0;
 
@@ -508,42 +834,37 @@ error:
         * Link validation on graph failed. We revert what we did and
         * return the error.
         */
-       media_graph_walk_start(graph, entity_err);
 
-       while ((entity_err = media_graph_walk_next(graph))) {
-               entity_err->pipe = NULL;
-
-               /*
-                * We haven't started entities further than this so we quit
-                * here.
-                */
-               if (entity_err == entity)
+       list_for_each_entry(err_ppad, &pipe->pads, list) {
+               if (err_ppad == ppad)
                        break;
+
+               err_ppad->pad->pipe = NULL;
        }
 
-       media_graph_walk_cleanup(graph);
+       media_pipeline_cleanup(pipe);
 
        return ret;
 }
 EXPORT_SYMBOL_GPL(__media_pipeline_start);
 
-__must_check int media_pipeline_start(struct media_entity *entity,
+__must_check int media_pipeline_start(struct media_pad *pad,
                                      struct media_pipeline *pipe)
 {
-       struct media_device *mdev = entity->graph_obj.mdev;
+       struct media_device *mdev = pad->entity->graph_obj.mdev;
        int ret;
 
        mutex_lock(&mdev->graph_mutex);
-       ret = __media_pipeline_start(entity, pipe);
+       ret = __media_pipeline_start(pad, pipe);
        mutex_unlock(&mdev->graph_mutex);
        return ret;
 }
 EXPORT_SYMBOL_GPL(media_pipeline_start);
 
-void __media_pipeline_stop(struct media_entity *entity)
+void __media_pipeline_stop(struct media_pad *pad)
 {
-       struct media_graph *graph = &entity->pipe->graph;
-       struct media_pipeline *pipe = entity->pipe;
+       struct media_pipeline *pipe = pad->pipe;
+       struct media_pipeline_pad *ppad;
 
        /*
         * If the following check fails, the driver has performed an
@@ -552,29 +873,65 @@ void __media_pipeline_stop(struct media_entity *entity)
        if (WARN_ON(!pipe))
                return;
 
-       if (--pipe->streaming_count)
+       if (--pipe->start_count)
                return;
 
-       media_graph_walk_start(graph, entity);
-
-       while ((entity = media_graph_walk_next(graph)))
-               entity->pipe = NULL;
+       list_for_each_entry(ppad, &pipe->pads, list)
+               ppad->pad->pipe = NULL;
 
-       media_graph_walk_cleanup(graph);
+       media_pipeline_cleanup(pipe);
 
+       if (pipe->allocated)
+               kfree(pipe);
 }
 EXPORT_SYMBOL_GPL(__media_pipeline_stop);
 
-void media_pipeline_stop(struct media_entity *entity)
+void media_pipeline_stop(struct media_pad *pad)
 {
-       struct media_device *mdev = entity->graph_obj.mdev;
+       struct media_device *mdev = pad->entity->graph_obj.mdev;
 
        mutex_lock(&mdev->graph_mutex);
-       __media_pipeline_stop(entity);
+       __media_pipeline_stop(pad);
        mutex_unlock(&mdev->graph_mutex);
 }
 EXPORT_SYMBOL_GPL(media_pipeline_stop);
 
+__must_check int media_pipeline_alloc_start(struct media_pad *pad)
+{
+       struct media_device *mdev = pad->entity->graph_obj.mdev;
+       struct media_pipeline *new_pipe = NULL;
+       struct media_pipeline *pipe;
+       int ret;
+
+       mutex_lock(&mdev->graph_mutex);
+
+       /*
+        * Is the entity already part of a pipeline? If not, we need to allocate
+        * a pipe.
+        */
+       pipe = media_pad_pipeline(pad);
+       if (!pipe) {
+               new_pipe = kzalloc(sizeof(*new_pipe), GFP_KERNEL);
+               if (!new_pipe) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               pipe = new_pipe;
+               pipe->allocated = true;
+       }
+
+       ret = __media_pipeline_start(pad, pipe);
+       if (ret)
+               kfree(new_pipe);
+
+out:
+       mutex_unlock(&mdev->graph_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(media_pipeline_alloc_start);
+
 /* -----------------------------------------------------------------------------
  * Links management
  */
@@ -829,7 +1186,7 @@ int __media_entity_setup_link(struct media_link *link, u32 flags)
 {
        const u32 mask = MEDIA_LNK_FL_ENABLED;
        struct media_device *mdev;
-       struct media_entity *source, *sink;
+       struct media_pad *source, *sink;
        int ret = -EBUSY;
 
        if (link == NULL)
@@ -845,12 +1202,11 @@ int __media_entity_setup_link(struct media_link *link, u32 flags)
        if (link->flags == flags)
                return 0;
 
-       source = link->source->entity;
-       sink = link->sink->entity;
+       source = link->source;
+       sink = link->sink;
 
        if (!(link->flags & MEDIA_LNK_FL_DYNAMIC) &&
-           (media_entity_is_streaming(source) ||
-            media_entity_is_streaming(sink)))
+           (media_pad_is_streaming(source) || media_pad_is_streaming(sink)))
                return -EBUSY;
 
        mdev = source->graph_obj.mdev;
@@ -991,6 +1347,60 @@ struct media_pad *media_pad_remote_pad_unique(const struct media_pad *pad)
 }
 EXPORT_SYMBOL_GPL(media_pad_remote_pad_unique);
 
+int media_entity_get_fwnode_pad(struct media_entity *entity,
+                               struct fwnode_handle *fwnode,
+                               unsigned long direction_flags)
+{
+       struct fwnode_endpoint endpoint;
+       unsigned int i;
+       int ret;
+
+       if (!entity->ops || !entity->ops->get_fwnode_pad) {
+               for (i = 0; i < entity->num_pads; i++) {
+                       if (entity->pads[i].flags & direction_flags)
+                               return i;
+               }
+
+               return -ENXIO;
+       }
+
+       ret = fwnode_graph_parse_endpoint(fwnode, &endpoint);
+       if (ret)
+               return ret;
+
+       ret = entity->ops->get_fwnode_pad(entity, &endpoint);
+       if (ret < 0)
+               return ret;
+
+       if (ret >= entity->num_pads)
+               return -ENXIO;
+
+       if (!(entity->pads[ret].flags & direction_flags))
+               return -ENXIO;
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(media_entity_get_fwnode_pad);
+
+struct media_pipeline *media_entity_pipeline(struct media_entity *entity)
+{
+       struct media_pad *pad;
+
+       media_entity_for_each_pad(entity, pad) {
+               if (pad->pipe)
+                       return pad->pipe;
+       }
+
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(media_entity_pipeline);
+
+struct media_pipeline *media_pad_pipeline(struct media_pad *pad)
+{
+       return pad->pipe;
+}
+EXPORT_SYMBOL_GPL(media_pad_pipeline);
+
 static void media_interface_init(struct media_device *mdev,
                                 struct media_interface *intf,
                                 u32 gobj_type,
index d335864..ee6e711 100644 (file)
@@ -339,7 +339,7 @@ void cx18_av_std_setup(struct cx18 *cx)
 
                /*
                 * For a 13.5 Mpps clock and 15,625 Hz line rate, a line is
-                * is 864 pixels = 720 active + 144 blanking.  ITU-R BT.601
+                * 864 pixels = 720 active + 144 blanking.  ITU-R BT.601
                 * specifies 12 luma clock periods or ~ 0.9 * 13.5 Mpps after
                 * the end of active video to start a horizontal line, so that
                 * leaves 132 pixels of hblank to ignore.
@@ -399,7 +399,7 @@ void cx18_av_std_setup(struct cx18 *cx)
 
                /*
                 * For a 13.5 Mpps clock and 15,734.26 Hz line rate, a line is
-                * is 858 pixels = 720 active + 138 blanking.  The Hsync leading
+                * 858 pixels = 720 active + 138 blanking.  The Hsync leading
                 * edge should happen 1.2 us * 13.5 Mpps ~= 16 pixels after the
                 * end of active video, leaving 122 pixels of hblank to ignore
                 * before active video starts.
index ce0ef0b..a04a1d3 100644 (file)
@@ -586,7 +586,7 @@ void cx88_i2c_init_ir(struct cx88_core *core)
 {
        struct i2c_board_info info;
        static const unsigned short default_addr_list[] = {
-               0x18, 0x6b, 0x71,
+               0x18, 0x33, 0x6b, 0x71,
                I2C_CLIENT_END
        };
        static const unsigned short pvr2000_addr_list[] = {
index b509c2a..c0ef03e 100644 (file)
@@ -1388,6 +1388,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
        }
                fallthrough;
        case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
+       case CX88_BOARD_NOTONLYTV_LV3H:
                request_module("ir-kbd-i2c");
        }
 
index a3fe547..390bd5e 100644 (file)
@@ -989,7 +989,7 @@ static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
                return r;
        }
 
-       r = media_pipeline_start(&q->vdev.entity, &q->pipe);
+       r = video_device_pipeline_start(&q->vdev, &q->pipe);
        if (r)
                goto fail_pipeline;
 
@@ -1009,7 +1009,7 @@ static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
 fail_csi2_subdev:
        cio2_hw_exit(cio2, q);
 fail_hw:
-       media_pipeline_stop(&q->vdev.entity);
+       video_device_pipeline_stop(&q->vdev);
 fail_pipeline:
        dev_dbg(dev, "failed to start streaming (%d)\n", r);
        cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
@@ -1030,7 +1030,7 @@ static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
        cio2_hw_exit(cio2, q);
        synchronize_irq(cio2->pci_dev->irq);
        cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
-       media_pipeline_stop(&q->vdev.entity);
+       video_device_pipeline_stop(&q->vdev);
        pm_runtime_put(dev);
        cio2->streaming = false;
 }
index 8a3eed9..b779e0b 100644 (file)
@@ -603,6 +603,10 @@ static int vpu_v4l2_release(struct vpu_inst *inst)
                inst->workqueue = NULL;
        }
 
+       if (inst->fh.m2m_ctx) {
+               v4l2_m2m_ctx_release(inst->fh.m2m_ctx);
+               inst->fh.m2m_ctx = NULL;
+       }
        v4l2_ctrl_handler_free(&inst->ctrl_handler);
        mutex_destroy(&inst->lock);
        v4l2_fh_del(&inst->fh);
@@ -685,13 +689,6 @@ int vpu_v4l2_close(struct file *file)
 
        vpu_trace(vpu->dev, "tgid = %d, pid = %d, inst = %p\n", inst->tgid, inst->pid, inst);
 
-       vpu_inst_lock(inst);
-       if (inst->fh.m2m_ctx) {
-               v4l2_m2m_ctx_release(inst->fh.m2m_ctx);
-               inst->fh.m2m_ctx = NULL;
-       }
-       vpu_inst_unlock(inst);
-
        call_void_vop(inst, release);
        vpu_inst_unregister(inst);
        vpu_inst_put(inst);
index a0b22b0..435e703 100644 (file)
@@ -421,7 +421,7 @@ static inline void coda9_jpeg_write_huff_values(struct coda_dev *dev, u8 *bits,
                coda_write(dev, (s32)values[i], CODA9_REG_JPEG_HUFF_DATA);
 }
 
-static int coda9_jpeg_dec_huff_setup(struct coda_ctx *ctx)
+static void coda9_jpeg_dec_huff_setup(struct coda_ctx *ctx)
 {
        struct coda_huff_tab *huff_tab = ctx->params.jpeg_huff_tab;
        struct coda_dev *dev = ctx->dev;
@@ -455,7 +455,6 @@ static int coda9_jpeg_dec_huff_setup(struct coda_ctx *ctx)
        coda9_jpeg_write_huff_values(dev, huff_tab->luma_ac, 162);
        coda9_jpeg_write_huff_values(dev, huff_tab->chroma_ac, 162);
        coda_write(dev, 0x000, CODA9_REG_JPEG_HUFF_CTRL);
-       return 0;
 }
 
 static inline void coda9_jpeg_write_qmat_tab(struct coda_dev *dev,
@@ -1394,14 +1393,8 @@ static int coda9_jpeg_prepare_decode(struct coda_ctx *ctx)
        coda_write(dev, ctx->params.jpeg_restart_interval,
                        CODA9_REG_JPEG_RST_INTVAL);
 
-       if (ctx->params.jpeg_huff_tab) {
-               ret = coda9_jpeg_dec_huff_setup(ctx);
-               if (ret < 0) {
-                       v4l2_err(&dev->v4l2_dev,
-                                "failed to set up Huffman tables: %d\n", ret);
-                       return ret;
-               }
-       }
+       if (ctx->params.jpeg_huff_tab)
+               coda9_jpeg_dec_huff_setup(ctx);
 
        coda9_jpeg_qmat_setup(ctx);
 
index 29f6c1c..86c0546 100644 (file)
@@ -457,7 +457,7 @@ err_cmdq_data:
        kfree(path);
        atomic_dec(&mdp->job_count);
        wake_up(&mdp->callback_wq);
-       if (cmd->pkt.buf_size > 0)
+       if (cmd && cmd->pkt.buf_size > 0)
                mdp_cmdq_pkt_destroy(&cmd->pkt);
        kfree(comps);
        kfree(cmd);
index e62abf3..d3eaf88 100644 (file)
@@ -682,7 +682,7 @@ int mdp_comp_clock_on(struct device *dev, struct mdp_comp *comp)
        int i, ret;
 
        if (comp->comp_dev) {
-               ret = pm_runtime_get_sync(comp->comp_dev);
+               ret = pm_runtime_resume_and_get(comp->comp_dev);
                if (ret < 0) {
                        dev_err(dev,
                                "Failed to get power, err %d. type:%d id:%d\n",
@@ -699,6 +699,7 @@ int mdp_comp_clock_on(struct device *dev, struct mdp_comp *comp)
                        dev_err(dev,
                                "Failed to enable clk %d. type:%d id:%d\n",
                                i, comp->type, comp->id);
+                       pm_runtime_put(comp->comp_dev);
                        return ret;
                }
        }
@@ -869,7 +870,7 @@ static struct mdp_comp *mdp_comp_create(struct mdp_dev *mdp,
 
        ret = mdp_comp_init(mdp, node, comp, id);
        if (ret) {
-               kfree(comp);
+               devm_kfree(dev, comp);
                return ERR_PTR(ret);
        }
        mdp->comp[id] = comp;
@@ -930,7 +931,7 @@ void mdp_comp_destroy(struct mdp_dev *mdp)
                if (mdp->comp[i]) {
                        pm_runtime_disable(mdp->comp[i]->comp_dev);
                        mdp_comp_deinit(mdp->comp[i]);
-                       kfree(mdp->comp[i]);
+                       devm_kfree(mdp->comp[i]->comp_dev, mdp->comp[i]);
                        mdp->comp[i] = NULL;
                }
        }
index cde5957..c413e59 100644 (file)
@@ -289,7 +289,8 @@ err_deinit_comp:
        mdp_comp_destroy(mdp);
 err_return:
        for (i = 0; i < MDP_PIPE_MAX; i++)
-               mtk_mutex_put(mdp->mdp_mutex[i]);
+               if (mdp)
+                       mtk_mutex_put(mdp->mdp_mutex[i]);
        kfree(mdp);
        dev_dbg(dev, "Errno %d\n", ret);
        return ret;
index 9f58443..a72bed9 100644 (file)
@@ -173,7 +173,8 @@ int mdp_vpu_dev_init(struct mdp_vpu_dev *vpu, struct mtk_scp *scp,
        /* vpu work_size was set in mdp_vpu_ipi_handle_init_ack */
 
        mem_size = vpu_alloc_size;
-       if (mdp_vpu_shared_mem_alloc(vpu)) {
+       err = mdp_vpu_shared_mem_alloc(vpu);
+       if (err) {
                dev_err(&mdp->pdev->dev, "VPU memory alloc fail!");
                goto err_mem_alloc;
        }
index b3b0577..f6d48c3 100644 (file)
@@ -373,7 +373,7 @@ static const struct v4l2_ctrl_ops dw100_ctrl_ops = {
  * The coordinates are saved in UQ12.4 fixed point format.
  */
 static void dw100_ctrl_dewarping_map_init(const struct v4l2_ctrl *ctrl,
-                                         u32 from_idx, u32 elems,
+                                         u32 from_idx,
                                          union v4l2_ctrl_ptr ptr)
 {
        struct dw100_ctx *ctx =
@@ -398,7 +398,7 @@ static void dw100_ctrl_dewarping_map_init(const struct v4l2_ctrl *ctrl,
        ctx->map_height = mh;
        ctx->map_size = mh * mw * sizeof(u32);
 
-       for (idx = from_idx; idx < elems; idx++) {
+       for (idx = from_idx; idx < ctrl->elems; idx++) {
                qy = min_t(u32, (idx / mw) * qdy, qsh);
                qx = min_t(u32, (idx % mw) * qdx, qsw);
                map[idx] = dw100_map_format_coordinates(qx, qy);
index 290df04..81fb3a5 100644 (file)
@@ -493,7 +493,7 @@ static int video_start_streaming(struct vb2_queue *q, unsigned int count)
        struct v4l2_subdev *subdev;
        int ret;
 
-       ret = media_pipeline_start(&vdev->entity, &video->pipe);
+       ret = video_device_pipeline_start(vdev, &video->pipe);
        if (ret < 0)
                return ret;
 
@@ -522,7 +522,7 @@ static int video_start_streaming(struct vb2_queue *q, unsigned int count)
        return 0;
 
 error:
-       media_pipeline_stop(&vdev->entity);
+       video_device_pipeline_stop(vdev);
 
        video->ops->flush_buffers(video, VB2_BUF_STATE_QUEUED);
 
@@ -553,7 +553,7 @@ static void video_stop_streaming(struct vb2_queue *q)
                v4l2_subdev_call(subdev, video, s_stream, 0);
        }
 
-       media_pipeline_stop(&vdev->entity);
+       video_device_pipeline_stop(vdev);
 
        video->ops->flush_buffers(video, VB2_BUF_STATE_ERROR);
 }
index 60de420..ab6a29f 100644 (file)
@@ -1800,7 +1800,7 @@ bool venus_helper_check_format(struct venus_inst *inst, u32 v4l2_pixfmt)
        struct venus_core *core = inst->core;
        u32 fmt = to_hfi_raw_fmt(v4l2_pixfmt);
        struct hfi_plat_caps *caps;
-       u32 buftype;
+       bool found;
 
        if (!fmt)
                return false;
@@ -1809,12 +1809,13 @@ bool venus_helper_check_format(struct venus_inst *inst, u32 v4l2_pixfmt)
        if (!caps)
                return false;
 
-       if (inst->session_type == VIDC_SESSION_TYPE_DEC)
-               buftype = HFI_BUFFER_OUTPUT2;
-       else
-               buftype = HFI_BUFFER_OUTPUT;
+       found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT, fmt);
+       if (found)
+               goto done;
 
-       return find_fmt_from_caps(caps, buftype, fmt);
+       found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt);
+done:
+       return found;
 }
 EXPORT_SYMBOL_GPL(venus_helper_check_format);
 
index 1968f09..e00aedb 100644 (file)
@@ -569,8 +569,6 @@ irqreturn_t hfi_isr(int irq, void *dev)
 
 int hfi_create(struct venus_core *core, const struct hfi_core_ops *ops)
 {
-       int ret;
-
        if (!ops)
                return -EINVAL;
 
@@ -579,9 +577,8 @@ int hfi_create(struct venus_core *core, const struct hfi_core_ops *ops)
        core->state = CORE_UNINIT;
        init_completion(&core->done);
        pkt_set_version(core->res->hfi_version);
-       ret = venus_hfi_create(core);
 
-       return ret;
+       return venus_hfi_create(core);
 }
 
 void hfi_destroy(struct venus_core *core)
index ac0bb45..4ceaba3 100644 (file)
@@ -183,6 +183,8 @@ vdec_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
                else
                        return NULL;
                fmt = find_format(inst, pixmp->pixelformat, f->type);
+               if (!fmt)
+                       return NULL;
        }
 
        pixmp->width = clamp(pixmp->width, frame_width_min(inst),
index 86918ae..cdb1254 100644 (file)
@@ -192,10 +192,8 @@ venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
        pixmp->height = clamp(pixmp->height, frame_height_min(inst),
                              frame_height_max(inst));
 
-       if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-               pixmp->width = ALIGN(pixmp->width, 128);
-               pixmp->height = ALIGN(pixmp->height, 32);
-       }
+       pixmp->width = ALIGN(pixmp->width, 128);
+       pixmp->height = ALIGN(pixmp->height, 32);
 
        pixmp->width = ALIGN(pixmp->width, 2);
        pixmp->height = ALIGN(pixmp->height, 2);
@@ -392,7 +390,7 @@ static int venc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
        struct v4l2_fract *timeperframe = &out->timeperframe;
        u64 us_per_frame, fps;
 
-       if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+       if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
            a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
                return -EINVAL;
 
@@ -424,7 +422,7 @@ static int venc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
 {
        struct venus_inst *inst = to_inst(file);
 
-       if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+       if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
            a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
                return -EINVAL;
 
@@ -509,6 +507,19 @@ static int venc_enum_frameintervals(struct file *file, void *fh,
        return 0;
 }
 
+static int venc_subscribe_event(struct v4l2_fh *fh,
+                               const struct v4l2_event_subscription *sub)
+{
+       switch (sub->type) {
+       case V4L2_EVENT_EOS:
+               return v4l2_event_subscribe(fh, sub, 2, NULL);
+       case V4L2_EVENT_CTRL:
+               return v4l2_ctrl_subscribe_event(fh, sub);
+       default:
+               return -EINVAL;
+       }
+}
+
 static const struct v4l2_ioctl_ops venc_ioctl_ops = {
        .vidioc_querycap = venc_querycap,
        .vidioc_enum_fmt_vid_cap = venc_enum_fmt,
@@ -534,8 +545,9 @@ static const struct v4l2_ioctl_ops venc_ioctl_ops = {
        .vidioc_g_parm = venc_g_parm,
        .vidioc_enum_framesizes = venc_enum_framesizes,
        .vidioc_enum_frameintervals = venc_enum_frameintervals,
-       .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+       .vidioc_subscribe_event = venc_subscribe_event,
        .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+       .vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd,
 };
 
 static int venc_pm_get(struct venus_inst *inst)
@@ -686,7 +698,8 @@ static int venc_set_properties(struct venus_inst *inst)
                        return ret;
        }
 
-       if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) {
+       if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC &&
+           ctr->profile.hevc == V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10) {
                struct hfi_hdr10_pq_sei hdr10;
                unsigned int c;
 
index ed44e58..7468e43 100644 (file)
@@ -8,6 +8,7 @@
 
 #include "core.h"
 #include "venc.h"
+#include "helpers.h"
 
 #define BITRATE_MIN            32000
 #define BITRATE_MAX            160000000
@@ -336,8 +337,6 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
                 * if we disable 8x8 transform for HP.
                 */
 
-               if (ctrl->val == 0)
-                       return -EINVAL;
 
                ctr->h264_8x8_transform = ctrl->val;
                break;
@@ -348,15 +347,41 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
        return 0;
 }
 
+static int venc_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+       struct venus_inst *inst = ctrl_to_inst(ctrl);
+       struct hfi_buffer_requirements bufreq;
+       enum hfi_version ver = inst->core->res->hfi_version;
+       int ret;
+
+       switch (ctrl->id) {
+       case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
+               ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
+               if (!ret)
+                       ctrl->val = HFI_BUFREQ_COUNT_MIN(&bufreq, ver);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static const struct v4l2_ctrl_ops venc_ctrl_ops = {
        .s_ctrl = venc_op_s_ctrl,
+       .g_volatile_ctrl = venc_op_g_volatile_ctrl,
 };
 
 int venc_ctrl_init(struct venus_inst *inst)
 {
        int ret;
+       struct v4l2_ctrl_hdr10_mastering_display p_hdr10_mastering = {
+               { 34000, 13250, 7500 },
+               { 16000, 34500, 3000 }, 15635, 16450, 10000000, 500,
+       };
+       struct v4l2_ctrl_hdr10_cll_info p_hdr10_cll = { 1000, 400 };
 
-       ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 58);
+       ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 59);
        if (ret)
                return ret;
 
@@ -437,6 +462,9 @@ int venc_ctrl_init(struct venus_inst *inst)
                0, V4L2_MPEG_VIDEO_VP8_PROFILE_0);
 
        v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+                         V4L2_CID_MIN_BUFFERS_FOR_OUTPUT, 4, 11, 1, 4);
+
+       v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
                V4L2_CID_MPEG_VIDEO_BITRATE, BITRATE_MIN, BITRATE_MAX,
                BITRATE_STEP, BITRATE_DEFAULT);
 
@@ -579,11 +607,11 @@ int venc_ctrl_init(struct venus_inst *inst)
 
        v4l2_ctrl_new_std_compound(&inst->ctrl_handler, &venc_ctrl_ops,
                                   V4L2_CID_COLORIMETRY_HDR10_CLL_INFO,
-                                  v4l2_ctrl_ptr_create(NULL));
+                                  v4l2_ctrl_ptr_create(&p_hdr10_cll));
 
        v4l2_ctrl_new_std_compound(&inst->ctrl_handler, &venc_ctrl_ops,
                                   V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY,
-                                  v4l2_ctrl_ptr_create(NULL));
+                                  v4l2_ctrl_ptr_create((void *)&p_hdr10_mastering));
 
        v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
                               V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE,
index 968a742..2f7daa8 100644 (file)
@@ -786,9 +786,8 @@ static int rvin_csi2_link_notify(struct media_link *link, u32 flags,
                return 0;
 
        /*
-        * Don't allow link changes if any entity in the graph is
-        * streaming, modifying the CHSEL register fields can disrupt
-        * running streams.
+        * Don't allow link changes if any stream in the graph is active as
+        * modifying the CHSEL register fields can disrupt running streams.
         */
        media_device_for_each_entity(entity, &group->mdev)
                if (media_entity_is_streaming(entity))
index 8d37fbd..3aea96d 100644 (file)
@@ -1244,8 +1244,6 @@ static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd,
 
 static int rvin_set_stream(struct rvin_dev *vin, int on)
 {
-       struct media_pipeline *pipe;
-       struct media_device *mdev;
        struct v4l2_subdev *sd;
        struct media_pad *pad;
        int ret;
@@ -1265,7 +1263,7 @@ static int rvin_set_stream(struct rvin_dev *vin, int on)
        sd = media_entity_to_v4l2_subdev(pad->entity);
 
        if (!on) {
-               media_pipeline_stop(&vin->vdev.entity);
+               video_device_pipeline_stop(&vin->vdev);
                return v4l2_subdev_call(sd, video, s_stream, 0);
        }
 
@@ -1273,17 +1271,7 @@ static int rvin_set_stream(struct rvin_dev *vin, int on)
        if (ret)
                return ret;
 
-       /*
-        * The graph lock needs to be taken to protect concurrent
-        * starts of multiple VIN instances as they might share
-        * a common subdevice down the line and then should use
-        * the same pipe.
-        */
-       mdev = vin->vdev.entity.graph_obj.mdev;
-       mutex_lock(&mdev->graph_mutex);
-       pipe = sd->entity.pipe ? sd->entity.pipe : &vin->vdev.pipe;
-       ret = __media_pipeline_start(&vin->vdev.entity, pipe);
-       mutex_unlock(&mdev->graph_mutex);
+       ret = video_device_pipeline_alloc_start(&vin->vdev);
        if (ret)
                return ret;
 
@@ -1291,7 +1279,7 @@ static int rvin_set_stream(struct rvin_dev *vin, int on)
        if (ret == -ENOIOCTLCMD)
                ret = 0;
        if (ret)
-               media_pipeline_stop(&vin->vdev.entity);
+               video_device_pipeline_stop(&vin->vdev);
 
        return ret;
 }
index df1606b..9d24647 100644 (file)
@@ -927,7 +927,7 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq)
        }
        mutex_unlock(&pipe->lock);
 
-       media_pipeline_stop(&video->video.entity);
+       video_device_pipeline_stop(&video->video);
        vsp1_video_release_buffers(video);
        vsp1_video_pipeline_put(pipe);
 }
@@ -1046,7 +1046,7 @@ vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
                return PTR_ERR(pipe);
        }
 
-       ret = __media_pipeline_start(&video->video.entity, &pipe->pipe);
+       ret = __video_device_pipeline_start(&video->video, &pipe->pipe);
        if (ret < 0) {
                mutex_unlock(&mdev->graph_mutex);
                goto err_pipe;
@@ -1070,7 +1070,7 @@ vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
        return 0;
 
 err_stop:
-       media_pipeline_stop(&video->video.entity);
+       video_device_pipeline_stop(&video->video);
 err_pipe:
        vsp1_video_pipeline_put(pipe);
        return ret;
index d5904c9..d454068 100644 (file)
@@ -913,7 +913,7 @@ static void rkisp1_cap_stream_disable(struct rkisp1_capture *cap)
  *
  * Call s_stream(false) in the reverse order from
  * rkisp1_pipeline_stream_enable() and disable the DMA engine.
- * Should be called before media_pipeline_stop()
+ * Should be called before video_device_pipeline_stop()
  */
 static void rkisp1_pipeline_stream_disable(struct rkisp1_capture *cap)
        __must_hold(&cap->rkisp1->stream_lock)
@@ -926,7 +926,7 @@ static void rkisp1_pipeline_stream_disable(struct rkisp1_capture *cap)
         * If the other capture is streaming, isp and sensor nodes shouldn't
         * be disabled, skip them.
         */
-       if (rkisp1->pipe.streaming_count < 2)
+       if (rkisp1->pipe.start_count < 2)
                v4l2_subdev_call(&rkisp1->isp.sd, video, s_stream, false);
 
        v4l2_subdev_call(&rkisp1->resizer_devs[cap->id].sd, video, s_stream,
@@ -937,7 +937,7 @@ static void rkisp1_pipeline_stream_disable(struct rkisp1_capture *cap)
  * rkisp1_pipeline_stream_enable - enable nodes in the pipeline
  *
  * Enable the DMA Engine and call s_stream(true) through the pipeline.
- * Should be called after media_pipeline_start()
+ * Should be called after video_device_pipeline_start()
  */
 static int rkisp1_pipeline_stream_enable(struct rkisp1_capture *cap)
        __must_hold(&cap->rkisp1->stream_lock)
@@ -956,7 +956,7 @@ static int rkisp1_pipeline_stream_enable(struct rkisp1_capture *cap)
         * If the other capture is streaming, isp and sensor nodes are already
         * enabled, skip them.
         */
-       if (rkisp1->pipe.streaming_count > 1)
+       if (rkisp1->pipe.start_count > 1)
                return 0;
 
        ret = v4l2_subdev_call(&rkisp1->isp.sd, video, s_stream, true);
@@ -994,7 +994,7 @@ static void rkisp1_vb2_stop_streaming(struct vb2_queue *queue)
 
        rkisp1_dummy_buf_destroy(cap);
 
-       media_pipeline_stop(&node->vdev.entity);
+       video_device_pipeline_stop(&node->vdev);
 
        mutex_unlock(&cap->rkisp1->stream_lock);
 }
@@ -1008,7 +1008,7 @@ rkisp1_vb2_start_streaming(struct vb2_queue *queue, unsigned int count)
 
        mutex_lock(&cap->rkisp1->stream_lock);
 
-       ret = media_pipeline_start(entity, &cap->rkisp1->pipe);
+       ret = video_device_pipeline_start(&cap->vnode.vdev, &cap->rkisp1->pipe);
        if (ret) {
                dev_err(cap->rkisp1->dev, "start pipeline failed %d\n", ret);
                goto err_ret_buffers;
@@ -1044,7 +1044,7 @@ err_pipe_pm_put:
 err_destroy_dummy:
        rkisp1_dummy_buf_destroy(cap);
 err_pipeline_stop:
-       media_pipeline_stop(entity);
+       video_device_pipeline_stop(&cap->vnode.vdev);
 err_ret_buffers:
        rkisp1_return_all_buffers(cap, VB2_BUF_STATE_QUEUED);
        mutex_unlock(&cap->rkisp1->stream_lock);
@@ -1273,11 +1273,12 @@ static int rkisp1_capture_link_validate(struct media_link *link)
        struct rkisp1_capture *cap = video_get_drvdata(vdev);
        const struct rkisp1_capture_fmt_cfg *fmt =
                rkisp1_find_fmt_cfg(cap, cap->pix.fmt.pixelformat);
-       struct v4l2_subdev_format sd_fmt;
+       struct v4l2_subdev_format sd_fmt = {
+               .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+               .pad = link->source->index,
+       };
        int ret;
 
-       sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
-       sd_fmt.pad = link->source->index;
        ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt);
        if (ret)
                return ret;
index 8056997..a1293c4 100644 (file)
@@ -378,6 +378,7 @@ struct rkisp1_params {
        struct v4l2_format vdev_fmt;
 
        enum v4l2_quantization quantization;
+       enum v4l2_ycbcr_encoding ycbcr_encoding;
        enum rkisp1_fmt_raw_pat_type raw_type;
 };
 
@@ -556,17 +557,32 @@ void rkisp1_sd_adjust_crop(struct v4l2_rect *crop,
  */
 const struct rkisp1_mbus_info *rkisp1_mbus_info_get_by_code(u32 mbus_code);
 
-/* rkisp1_params_configure - configure the params when stream starts.
- *                          This function is called by the isp entity upon stream starts.
- *                          The function applies the initial configuration of the parameters.
+/*
+ * rkisp1_params_pre_configure - Configure the params before stream start
  *
- * @params:      pointer to rkisp1_params.
+ * @params:      pointer to rkisp1_params
  * @bayer_pat:   the bayer pattern on the isp video sink pad
  * @quantization: the quantization configured on the isp's src pad
+ * @ycbcr_encoding: the ycbcr_encoding configured on the isp's src pad
+ *
+ * This function is called by the ISP entity just before the ISP gets started.
+ * It applies the initial ISP parameters from the first params buffer, but
+ * skips LSC as it needs to be configured after the ISP is started.
+ */
+void rkisp1_params_pre_configure(struct rkisp1_params *params,
+                                enum rkisp1_fmt_raw_pat_type bayer_pat,
+                                enum v4l2_quantization quantization,
+                                enum v4l2_ycbcr_encoding ycbcr_encoding);
+
+/*
+ * rkisp1_params_post_configure - Configure the params after stream start
+ *
+ * @params:      pointer to rkisp1_params
+ *
+ * This function is called by the ISP entity just after the ISP gets started.
+ * It applies the initial ISP LSC parameters from the first params buffer.
  */
-void rkisp1_params_configure(struct rkisp1_params *params,
-                            enum rkisp1_fmt_raw_pat_type bayer_pat,
-                            enum v4l2_quantization quantization);
+void rkisp1_params_post_configure(struct rkisp1_params *params);
 
 /* rkisp1_params_disable - disable all parameters.
  *                        This function is called by the isp entity upon stream start
index 383a3ec..585cf3f 100644 (file)
@@ -231,10 +231,11 @@ static int rkisp1_config_isp(struct rkisp1_isp *isp,
                struct v4l2_mbus_framefmt *src_frm;
 
                src_frm = rkisp1_isp_get_pad_fmt(isp, NULL,
-                                                RKISP1_ISP_PAD_SINK_VIDEO,
+                                                RKISP1_ISP_PAD_SOURCE_VIDEO,
                                                 V4L2_SUBDEV_FORMAT_ACTIVE);
-               rkisp1_params_configure(&rkisp1->params, sink_fmt->bayer_pat,
-                                       src_frm->quantization);
+               rkisp1_params_pre_configure(&rkisp1->params, sink_fmt->bayer_pat,
+                                           src_frm->quantization,
+                                           src_frm->ycbcr_enc);
        }
 
        return 0;
@@ -340,6 +341,9 @@ static void rkisp1_isp_start(struct rkisp1_isp *isp)
               RKISP1_CIF_ISP_CTRL_ISP_ENABLE |
               RKISP1_CIF_ISP_CTRL_ISP_INFORM_ENABLE;
        rkisp1_write(rkisp1, RKISP1_CIF_ISP_CTRL, val);
+
+       if (isp->src_fmt->pixel_enc != V4L2_PIXEL_ENC_BAYER)
+               rkisp1_params_post_configure(&rkisp1->params);
 }
 
 /* ----------------------------------------------------------------------------
@@ -431,12 +435,17 @@ static int rkisp1_isp_init_config(struct v4l2_subdev *sd,
        struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
        struct v4l2_rect *sink_crop, *src_crop;
 
+       /* Video. */
        sink_fmt = v4l2_subdev_get_try_format(sd, sd_state,
                                              RKISP1_ISP_PAD_SINK_VIDEO);
        sink_fmt->width = RKISP1_DEFAULT_WIDTH;
        sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
        sink_fmt->field = V4L2_FIELD_NONE;
        sink_fmt->code = RKISP1_DEF_SINK_PAD_FMT;
+       sink_fmt->colorspace = V4L2_COLORSPACE_RAW;
+       sink_fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+       sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+       sink_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
 
        sink_crop = v4l2_subdev_get_try_crop(sd, sd_state,
                                             RKISP1_ISP_PAD_SINK_VIDEO);
@@ -449,11 +458,16 @@ static int rkisp1_isp_init_config(struct v4l2_subdev *sd,
                                             RKISP1_ISP_PAD_SOURCE_VIDEO);
        *src_fmt = *sink_fmt;
        src_fmt->code = RKISP1_DEF_SRC_PAD_FMT;
+       src_fmt->colorspace = V4L2_COLORSPACE_SRGB;
+       src_fmt->xfer_func = V4L2_XFER_FUNC_SRGB;
+       src_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+       src_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
 
        src_crop = v4l2_subdev_get_try_crop(sd, sd_state,
                                            RKISP1_ISP_PAD_SOURCE_VIDEO);
        *src_crop = *sink_crop;
 
+       /* Parameters and statistics. */
        sink_fmt = v4l2_subdev_get_try_format(sd, sd_state,
                                              RKISP1_ISP_PAD_SINK_PARAMS);
        src_fmt = v4l2_subdev_get_try_format(sd, sd_state,
@@ -472,40 +486,105 @@ static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp,
                                   struct v4l2_mbus_framefmt *format,
                                   unsigned int which)
 {
-       const struct rkisp1_mbus_info *mbus_info;
+       const struct rkisp1_mbus_info *sink_info;
+       const struct rkisp1_mbus_info *src_info;
+       struct v4l2_mbus_framefmt *sink_fmt;
        struct v4l2_mbus_framefmt *src_fmt;
        const struct v4l2_rect *src_crop;
+       bool set_csc;
 
+       sink_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
+                                         RKISP1_ISP_PAD_SINK_VIDEO, which);
        src_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
                                         RKISP1_ISP_PAD_SOURCE_VIDEO, which);
        src_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
                                           RKISP1_ISP_PAD_SOURCE_VIDEO, which);
 
+       /*
+        * Media bus code. The ISP can operate in pass-through mode (Bayer in,
+        * Bayer out or YUV in, YUV out) or process Bayer data to YUV, but
+        * can't convert from YUV to Bayer.
+        */
+       sink_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
+
        src_fmt->code = format->code;
-       mbus_info = rkisp1_mbus_info_get_by_code(src_fmt->code);
-       if (!mbus_info || !(mbus_info->direction & RKISP1_ISP_SD_SRC)) {
+       src_info = rkisp1_mbus_info_get_by_code(src_fmt->code);
+       if (!src_info || !(src_info->direction & RKISP1_ISP_SD_SRC)) {
                src_fmt->code = RKISP1_DEF_SRC_PAD_FMT;
-               mbus_info = rkisp1_mbus_info_get_by_code(src_fmt->code);
+               src_info = rkisp1_mbus_info_get_by_code(src_fmt->code);
        }
-       if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
-               isp->src_fmt = mbus_info;
+
+       if (sink_info->pixel_enc == V4L2_PIXEL_ENC_YUV &&
+           src_info->pixel_enc == V4L2_PIXEL_ENC_BAYER) {
+               src_fmt->code = sink_fmt->code;
+               src_info = sink_info;
+       }
+
+       /*
+        * The source width and height must be identical to the source crop
+        * size.
+        */
        src_fmt->width  = src_crop->width;
        src_fmt->height = src_crop->height;
 
        /*
-        * The CSC API is used to allow userspace to force full
-        * quantization on YUV formats.
+        * Copy the color space for the sink pad. When converting from Bayer to
+        * YUV, default to a limited quantization range.
         */
-       if (format->flags & V4L2_MBUS_FRAMEFMT_SET_CSC &&
-           format->quantization == V4L2_QUANTIZATION_FULL_RANGE &&
-           mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV)
-               src_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
-       else if (mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV)
+       src_fmt->colorspace = sink_fmt->colorspace;
+       src_fmt->xfer_func = sink_fmt->xfer_func;
+       src_fmt->ycbcr_enc = sink_fmt->ycbcr_enc;
+
+       if (sink_info->pixel_enc == V4L2_PIXEL_ENC_BAYER &&
+           src_info->pixel_enc == V4L2_PIXEL_ENC_YUV)
                src_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
        else
-               src_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+               src_fmt->quantization = sink_fmt->quantization;
+
+       /*
+        * Allow setting the source color space fields when the SET_CSC flag is
+        * set and the source format is YUV. If the sink format is YUV, don't
+        * set the color primaries, transfer function or YCbCr encoding as the
+        * ISP is bypassed in that case and passes YUV data through without
+        * modifications.
+        *
+        * The color primaries and transfer function are configured through the
+        * cross-talk matrix and tone curve respectively. Settings for those
+        * hardware blocks are conveyed through the ISP parameters buffer, as
+        * they need to combine color space information with other image tuning
+        * characteristics and can't thus be computed by the kernel based on the
+        * color space. The source pad colorspace and xfer_func fields are thus
+        * ignored by the driver, but can be set by userspace to propagate
+        * accurate color space information down the pipeline.
+        */
+       set_csc = format->flags & V4L2_MBUS_FRAMEFMT_SET_CSC;
+
+       if (set_csc && src_info->pixel_enc == V4L2_PIXEL_ENC_YUV) {
+               if (sink_info->pixel_enc == V4L2_PIXEL_ENC_BAYER) {
+                       if (format->colorspace != V4L2_COLORSPACE_DEFAULT)
+                               src_fmt->colorspace = format->colorspace;
+                       if (format->xfer_func != V4L2_XFER_FUNC_DEFAULT)
+                               src_fmt->xfer_func = format->xfer_func;
+                       if (format->ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT)
+                               src_fmt->ycbcr_enc = format->ycbcr_enc;
+               }
+
+               if (format->quantization != V4L2_QUANTIZATION_DEFAULT)
+                       src_fmt->quantization = format->quantization;
+       }
 
        *format = *src_fmt;
+
+       /*
+        * Restore the SET_CSC flag if it was set to indicate support for the
+        * CSC setting API.
+        */
+       if (set_csc)
+               format->flags |= V4L2_MBUS_FRAMEFMT_SET_CSC;
+
+       /* Store the source format info when setting the active format. */
+       if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
+               isp->src_fmt = src_info;
 }
 
 static void rkisp1_isp_set_src_crop(struct rkisp1_isp *isp,
@@ -573,6 +652,7 @@ static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp,
        const struct rkisp1_mbus_info *mbus_info;
        struct v4l2_mbus_framefmt *sink_fmt;
        struct v4l2_rect *sink_crop;
+       bool is_yuv;
 
        sink_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
                                          RKISP1_ISP_PAD_SINK_VIDEO,
@@ -593,6 +673,36 @@ static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp,
                                   RKISP1_ISP_MIN_HEIGHT,
                                   RKISP1_ISP_MAX_HEIGHT);
 
+       /*
+        * Adjust the color space fields. Accept any color primaries and
+        * transfer function for both YUV and Bayer. For YUV any YCbCr encoding
+        * and quantization range is also accepted. For Bayer formats, the YCbCr
+        * encoding isn't applicable, and the quantization range can only be
+        * full.
+        */
+       is_yuv = mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV;
+
+       sink_fmt->colorspace = format->colorspace ? :
+                              (is_yuv ? V4L2_COLORSPACE_SRGB :
+                               V4L2_COLORSPACE_RAW);
+       sink_fmt->xfer_func = format->xfer_func ? :
+                             V4L2_MAP_XFER_FUNC_DEFAULT(sink_fmt->colorspace);
+       if (is_yuv) {
+               sink_fmt->ycbcr_enc = format->ycbcr_enc ? :
+                       V4L2_MAP_YCBCR_ENC_DEFAULT(sink_fmt->colorspace);
+               sink_fmt->quantization = format->quantization ? :
+                       V4L2_MAP_QUANTIZATION_DEFAULT(false, sink_fmt->colorspace,
+                                                     sink_fmt->ycbcr_enc);
+       } else {
+               /*
+                * The YCbCr encoding isn't applicable for non-YUV formats, but
+                * V4L2 has no "no encoding" value. Hardcode it to Rec. 601, it
+                * should be ignored by userspace.
+                */
+               sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+               sink_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+       }
+
        *format = *sink_fmt;
 
        /* Propagate to in crop */
index 9da7dc1..d8731eb 100644 (file)
@@ -18,6 +18,8 @@
 #define RKISP1_ISP_PARAMS_REQ_BUFS_MIN 2
 #define RKISP1_ISP_PARAMS_REQ_BUFS_MAX 8
 
+#define RKISP1_ISP_DPCC_METHODS_SET(n) \
+                       (RKISP1_CIF_ISP_DPCC_METHODS_SET_1 + 0x4 * (n))
 #define RKISP1_ISP_DPCC_LINE_THRESH(n) \
                        (RKISP1_CIF_ISP_DPCC_LINE_THRESH_1 + 0x14 * (n))
 #define RKISP1_ISP_DPCC_LINE_MAD_FAC(n) \
@@ -56,39 +58,47 @@ static void rkisp1_dpcc_config(struct rkisp1_params *params,
        unsigned int i;
        u32 mode;
 
-       /* avoid to override the old enable value */
+       /*
+        * The enable bit is controlled in rkisp1_isp_isr_other_config() and
+        * must be preserved. The grayscale mode should be configured
+        * automatically based on the media bus code on the ISP sink pad, so
+        * only the STAGE1_ENABLE bit can be set by userspace.
+        */
        mode = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_DPCC_MODE);
-       mode &= RKISP1_CIF_ISP_DPCC_ENA;
-       mode |= arg->mode & ~RKISP1_CIF_ISP_DPCC_ENA;
+       mode &= RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE;
+       mode |= arg->mode & RKISP1_CIF_ISP_DPCC_MODE_STAGE1_ENABLE;
        rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_MODE, mode);
+
        rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_OUTPUT_MODE,
-                    arg->output_mode);
+                    arg->output_mode & RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_MASK);
        rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_SET_USE,
-                    arg->set_use);
-
-       rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_METHODS_SET_1,
-                    arg->methods[0].method);
-       rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_METHODS_SET_2,
-                    arg->methods[1].method);
-       rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_METHODS_SET_3,
-                    arg->methods[2].method);
+                    arg->set_use & RKISP1_CIF_ISP_DPCC_SET_USE_MASK);
+
        for (i = 0; i < RKISP1_CIF_ISP_DPCC_METHODS_MAX; i++) {
+               rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_METHODS_SET(i),
+                            arg->methods[i].method &
+                            RKISP1_CIF_ISP_DPCC_METHODS_SET_MASK);
                rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_LINE_THRESH(i),
-                            arg->methods[i].line_thresh);
+                            arg->methods[i].line_thresh &
+                            RKISP1_CIF_ISP_DPCC_LINE_THRESH_MASK);
                rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_LINE_MAD_FAC(i),
-                            arg->methods[i].line_mad_fac);
+                            arg->methods[i].line_mad_fac &
+                            RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_MASK);
                rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_PG_FAC(i),
-                            arg->methods[i].pg_fac);
+                            arg->methods[i].pg_fac &
+                            RKISP1_CIF_ISP_DPCC_PG_FAC_MASK);
                rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_RND_THRESH(i),
-                            arg->methods[i].rnd_thresh);
+                            arg->methods[i].rnd_thresh &
+                            RKISP1_CIF_ISP_DPCC_RND_THRESH_MASK);
                rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_RG_FAC(i),
-                            arg->methods[i].rg_fac);
+                            arg->methods[i].rg_fac &
+                            RKISP1_CIF_ISP_DPCC_RG_FAC_MASK);
        }
 
        rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_RND_OFFS,
-                    arg->rnd_offs);
+                    arg->rnd_offs & RKISP1_CIF_ISP_DPCC_RND_OFFS_MASK);
        rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_RO_LIMITS,
-                    arg->ro_limits);
+                    arg->ro_limits & RKISP1_CIF_ISP_DPCC_RO_LIMIT_MASK);
 }
 
 /* ISP black level subtraction interface function */
@@ -188,149 +198,131 @@ static void
 rkisp1_lsc_matrix_config_v10(struct rkisp1_params *params,
                             const struct rkisp1_cif_isp_lsc_config *pconfig)
 {
-       unsigned int isp_lsc_status, sram_addr, isp_lsc_table_sel, i, j, data;
+       struct rkisp1_device *rkisp1 = params->rkisp1;
+       u32 lsc_status, sram_addr, lsc_table_sel;
+       unsigned int i, j;
 
-       isp_lsc_status = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_LSC_STATUS);
+       lsc_status = rkisp1_read(rkisp1, RKISP1_CIF_ISP_LSC_STATUS);
 
        /* RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153 = ( 17 * 18 ) >> 1 */
-       sram_addr = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ?
+       sram_addr = lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE ?
                    RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_0 :
                    RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153;
-       rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_ADDR, sram_addr);
-       rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR, sram_addr);
-       rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR, sram_addr);
-       rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_ADDR, sram_addr);
+       rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_ADDR, sram_addr);
+       rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR, sram_addr);
+       rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR, sram_addr);
+       rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_ADDR, sram_addr);
 
        /* program data tables (table size is 9 * 17 = 153) */
        for (i = 0; i < RKISP1_CIF_ISP_LSC_SAMPLES_MAX; i++) {
+               const __u16 *r_tbl = pconfig->r_data_tbl[i];
+               const __u16 *gr_tbl = pconfig->gr_data_tbl[i];
+               const __u16 *gb_tbl = pconfig->gb_data_tbl[i];
+               const __u16 *b_tbl = pconfig->b_data_tbl[i];
+
                /*
                 * 17 sectors with 2 values in one DWORD = 9
                 * DWORDs (2nd value of last DWORD unused)
                 */
                for (j = 0; j < RKISP1_CIF_ISP_LSC_SAMPLES_MAX - 1; j += 2) {
-                       data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->r_data_tbl[i][j],
-                                                                pconfig->r_data_tbl[i][j + 1]);
-                       rkisp1_write(params->rkisp1,
-                                    RKISP1_CIF_ISP_LSC_R_TABLE_DATA, data);
-
-                       data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gr_data_tbl[i][j],
-                                                                pconfig->gr_data_tbl[i][j + 1]);
-                       rkisp1_write(params->rkisp1,
-                                    RKISP1_CIF_ISP_LSC_GR_TABLE_DATA, data);
-
-                       data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gb_data_tbl[i][j],
-                                                                pconfig->gb_data_tbl[i][j + 1]);
-                       rkisp1_write(params->rkisp1,
-                                    RKISP1_CIF_ISP_LSC_GB_TABLE_DATA, data);
-
-                       data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->b_data_tbl[i][j],
-                                                                pconfig->b_data_tbl[i][j + 1]);
-                       rkisp1_write(params->rkisp1,
-                                    RKISP1_CIF_ISP_LSC_B_TABLE_DATA, data);
+                       rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_DATA,
+                                    RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(
+                                       r_tbl[j], r_tbl[j + 1]));
+                       rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_DATA,
+                                    RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(
+                                       gr_tbl[j], gr_tbl[j + 1]));
+                       rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_DATA,
+                                    RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(
+                                       gb_tbl[j], gb_tbl[j + 1]));
+                       rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_DATA,
+                                    RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(
+                                       b_tbl[j], b_tbl[j + 1]));
                }
-               data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->r_data_tbl[i][j], 0);
-               rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_DATA,
-                            data);
 
-               data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gr_data_tbl[i][j], 0);
-               rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_DATA,
-                            data);
-
-               data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gb_data_tbl[i][j], 0);
-               rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_DATA,
-                            data);
-
-               data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->b_data_tbl[i][j], 0);
-               rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_DATA,
-                            data);
+               rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_DATA,
+                            RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(r_tbl[j], 0));
+               rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_DATA,
+                            RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(gr_tbl[j], 0));
+               rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_DATA,
+                            RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(gb_tbl[j], 0));
+               rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_DATA,
+                            RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(b_tbl[j], 0));
        }
-       isp_lsc_table_sel = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ?
-                           RKISP1_CIF_ISP_LSC_TABLE_0 :
-                           RKISP1_CIF_ISP_LSC_TABLE_1;
-       rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_TABLE_SEL,
-                    isp_lsc_table_sel);
+
+       lsc_table_sel = lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE ?
+                       RKISP1_CIF_ISP_LSC_TABLE_0 : RKISP1_CIF_ISP_LSC_TABLE_1;
+       rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_TABLE_SEL, lsc_table_sel);
 }
 
 static void
 rkisp1_lsc_matrix_config_v12(struct rkisp1_params *params,
                             const struct rkisp1_cif_isp_lsc_config *pconfig)
 {
-       unsigned int isp_lsc_status, sram_addr, isp_lsc_table_sel, i, j, data;
+       struct rkisp1_device *rkisp1 = params->rkisp1;
+       u32 lsc_status, sram_addr, lsc_table_sel;
+       unsigned int i, j;
 
-       isp_lsc_status = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_LSC_STATUS);
+       lsc_status = rkisp1_read(rkisp1, RKISP1_CIF_ISP_LSC_STATUS);
 
        /* RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153 = ( 17 * 18 ) >> 1 */
-       sram_addr = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ?
-                    RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_0 :
-                    RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153;
-       rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_ADDR, sram_addr);
-       rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR, sram_addr);
-       rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR, sram_addr);
-       rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_ADDR, sram_addr);
+       sram_addr = lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE ?
+                   RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_0 :
+                   RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153;
+       rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_ADDR, sram_addr);
+       rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR, sram_addr);
+       rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR, sram_addr);
+       rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_ADDR, sram_addr);
 
        /* program data tables (table size is 9 * 17 = 153) */
        for (i = 0; i < RKISP1_CIF_ISP_LSC_SAMPLES_MAX; i++) {
+               const __u16 *r_tbl = pconfig->r_data_tbl[i];
+               const __u16 *gr_tbl = pconfig->gr_data_tbl[i];
+               const __u16 *gb_tbl = pconfig->gb_data_tbl[i];
+               const __u16 *b_tbl = pconfig->b_data_tbl[i];
+
                /*
                 * 17 sectors with 2 values in one DWORD = 9
                 * DWORDs (2nd value of last DWORD unused)
                 */
                for (j = 0; j < RKISP1_CIF_ISP_LSC_SAMPLES_MAX - 1; j += 2) {
-                       data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
-                                       pconfig->r_data_tbl[i][j],
-                                       pconfig->r_data_tbl[i][j + 1]);
-                       rkisp1_write(params->rkisp1,
-                                    RKISP1_CIF_ISP_LSC_R_TABLE_DATA, data);
-
-                       data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
-                                       pconfig->gr_data_tbl[i][j],
-                                       pconfig->gr_data_tbl[i][j + 1]);
-                       rkisp1_write(params->rkisp1,
-                                    RKISP1_CIF_ISP_LSC_GR_TABLE_DATA, data);
-
-                       data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
-                                       pconfig->gb_data_tbl[i][j],
-                                       pconfig->gb_data_tbl[i][j + 1]);
-                       rkisp1_write(params->rkisp1,
-                                    RKISP1_CIF_ISP_LSC_GB_TABLE_DATA, data);
-
-                       data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
-                                       pconfig->b_data_tbl[i][j],
-                                       pconfig->b_data_tbl[i][j + 1]);
-                       rkisp1_write(params->rkisp1,
-                                    RKISP1_CIF_ISP_LSC_B_TABLE_DATA, data);
+                       rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_DATA,
+                                    RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
+                                       r_tbl[j], r_tbl[j + 1]));
+                       rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_DATA,
+                                    RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
+                                       gr_tbl[j], gr_tbl[j + 1]));
+                       rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_DATA,
+                                    RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
+                                       gb_tbl[j], gb_tbl[j + 1]));
+                       rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_DATA,
+                                    RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
+                                       b_tbl[j], b_tbl[j + 1]));
                }
 
-               data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->r_data_tbl[i][j], 0);
-               rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_DATA,
-                            data);
-
-               data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->gr_data_tbl[i][j], 0);
-               rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_DATA,
-                            data);
-
-               data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->gb_data_tbl[i][j], 0);
-               rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_DATA,
-                            data);
-
-               data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->b_data_tbl[i][j], 0);
-               rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_DATA,
-                            data);
+               rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_DATA,
+                            RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(r_tbl[j], 0));
+               rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_DATA,
+                            RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(gr_tbl[j], 0));
+               rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_DATA,
+                            RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(gb_tbl[j], 0));
+               rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_DATA,
+                            RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(b_tbl[j], 0));
        }
-       isp_lsc_table_sel = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ?
-                           RKISP1_CIF_ISP_LSC_TABLE_0 :
-                           RKISP1_CIF_ISP_LSC_TABLE_1;
-       rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_TABLE_SEL,
-                    isp_lsc_table_sel);
+
+       lsc_table_sel = lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE ?
+                       RKISP1_CIF_ISP_LSC_TABLE_0 : RKISP1_CIF_ISP_LSC_TABLE_1;
+       rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_TABLE_SEL, lsc_table_sel);
 }
 
 static void rkisp1_lsc_config(struct rkisp1_params *params,
                              const struct rkisp1_cif_isp_lsc_config *arg)
 {
-       unsigned int i, data;
-       u32 lsc_ctrl;
+       struct rkisp1_device *rkisp1 = params->rkisp1;
+       u32 lsc_ctrl, data;
+       unsigned int i;
 
        /* To config must be off , store the current status firstly */
-       lsc_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_LSC_CTRL);
+       lsc_ctrl = rkisp1_read(rkisp1, RKISP1_CIF_ISP_LSC_CTRL);
        rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_LSC_CTRL,
                                RKISP1_CIF_ISP_LSC_CTRL_ENA);
        params->ops->lsc_matrix_config(params, arg);
@@ -339,38 +331,31 @@ static void rkisp1_lsc_config(struct rkisp1_params *params,
                /* program x size tables */
                data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->x_size_tbl[i * 2],
                                                    arg->x_size_tbl[i * 2 + 1]);
-               rkisp1_write(params->rkisp1,
-                            RKISP1_CIF_ISP_LSC_XSIZE_01 + i * 4, data);
+               rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_XSIZE(i), data);
 
                /* program x grad tables */
-               data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->x_grad_tbl[i * 2],
+               data = RKISP1_CIF_ISP_LSC_SECT_GRAD(arg->x_grad_tbl[i * 2],
                                                    arg->x_grad_tbl[i * 2 + 1]);
-               rkisp1_write(params->rkisp1,
-                            RKISP1_CIF_ISP_LSC_XGRAD_01 + i * 4, data);
+               rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_XGRAD(i), data);
 
                /* program y size tables */
                data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->y_size_tbl[i * 2],
                                                    arg->y_size_tbl[i * 2 + 1]);
-               rkisp1_write(params->rkisp1,
-                            RKISP1_CIF_ISP_LSC_YSIZE_01 + i * 4, data);
+               rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_YSIZE(i), data);
 
                /* program y grad tables */
-               data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->y_grad_tbl[i * 2],
+               data = RKISP1_CIF_ISP_LSC_SECT_GRAD(arg->y_grad_tbl[i * 2],
                                                    arg->y_grad_tbl[i * 2 + 1]);
-               rkisp1_write(params->rkisp1,
-                            RKISP1_CIF_ISP_LSC_YGRAD_01 + i * 4, data);
+               rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_YGRAD(i), data);
        }
 
        /* restore the lsc ctrl status */
-       if (lsc_ctrl & RKISP1_CIF_ISP_LSC_CTRL_ENA) {
-               rkisp1_param_set_bits(params,
-                                     RKISP1_CIF_ISP_LSC_CTRL,
+       if (lsc_ctrl & RKISP1_CIF_ISP_LSC_CTRL_ENA)
+               rkisp1_param_set_bits(params, RKISP1_CIF_ISP_LSC_CTRL,
                                      RKISP1_CIF_ISP_LSC_CTRL_ENA);
-       } else {
-               rkisp1_param_clear_bits(params,
-                                       RKISP1_CIF_ISP_LSC_CTRL,
+       else
+               rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_LSC_CTRL,
                                        RKISP1_CIF_ISP_LSC_CTRL_ENA);
-       }
 }
 
 /* ISP Filtering function */
@@ -1066,39 +1051,96 @@ static void rkisp1_ie_enable(struct rkisp1_params *params, bool en)
        }
 }
 
-static void rkisp1_csm_config(struct rkisp1_params *params, bool full_range)
+static void rkisp1_csm_config(struct rkisp1_params *params)
 {
-       static const u16 full_range_coeff[] = {
-               0x0026, 0x004b, 0x000f,
-               0x01ea, 0x01d6, 0x0040,
-               0x0040, 0x01ca, 0x01f6
+       struct csm_coeffs {
+               u16 limited[9];
+               u16 full[9];
+       };
+       static const struct csm_coeffs rec601_coeffs = {
+               .limited = {
+                       0x0021, 0x0042, 0x000d,
+                       0x01ed, 0x01db, 0x0038,
+                       0x0038, 0x01d1, 0x01f7,
+               },
+               .full = {
+                       0x0026, 0x004b, 0x000f,
+                       0x01ea, 0x01d6, 0x0040,
+                       0x0040, 0x01ca, 0x01f6,
+               },
        };
-       static const u16 limited_range_coeff[] = {
-               0x0021, 0x0040, 0x000d,
-               0x01ed, 0x01db, 0x0038,
-               0x0038, 0x01d1, 0x01f7,
+       static const struct csm_coeffs rec709_coeffs = {
+               .limited = {
+                       0x0018, 0x0050, 0x0008,
+                       0x01f3, 0x01d5, 0x0038,
+                       0x0038, 0x01cd, 0x01fb,
+               },
+               .full = {
+                       0x001b, 0x005c, 0x0009,
+                       0x01f1, 0x01cf, 0x0040,
+                       0x0040, 0x01c6, 0x01fa,
+               },
        };
+       static const struct csm_coeffs rec2020_coeffs = {
+               .limited = {
+                       0x001d, 0x004c, 0x0007,
+                       0x01f0, 0x01d8, 0x0038,
+                       0x0038, 0x01cd, 0x01fb,
+               },
+               .full = {
+                       0x0022, 0x0057, 0x0008,
+                       0x01ee, 0x01d2, 0x0040,
+                       0x0040, 0x01c5, 0x01fb,
+               },
+       };
+       static const struct csm_coeffs smpte240m_coeffs = {
+               .limited = {
+                       0x0018, 0x004f, 0x000a,
+                       0x01f3, 0x01d5, 0x0038,
+                       0x0038, 0x01ce, 0x01fa,
+               },
+               .full = {
+                       0x001b, 0x005a, 0x000b,
+                       0x01f1, 0x01cf, 0x0040,
+                       0x0040, 0x01c7, 0x01f9,
+               },
+       };
+
+       const struct csm_coeffs *coeffs;
+       const u16 *csm;
        unsigned int i;
 
-       if (full_range) {
-               for (i = 0; i < ARRAY_SIZE(full_range_coeff); i++)
-                       rkisp1_write(params->rkisp1,
-                                    RKISP1_CIF_ISP_CC_COEFF_0 + i * 4,
-                                    full_range_coeff[i]);
+       switch (params->ycbcr_encoding) {
+       case V4L2_YCBCR_ENC_601:
+       default:
+               coeffs = &rec601_coeffs;
+               break;
+       case V4L2_YCBCR_ENC_709:
+               coeffs = &rec709_coeffs;
+               break;
+       case V4L2_YCBCR_ENC_BT2020:
+               coeffs = &rec2020_coeffs;
+               break;
+       case V4L2_YCBCR_ENC_SMPTE240M:
+               coeffs = &smpte240m_coeffs;
+               break;
+       }
 
+       if (params->quantization == V4L2_QUANTIZATION_FULL_RANGE) {
+               csm = coeffs->full;
                rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
                                      RKISP1_CIF_ISP_CTRL_ISP_CSM_Y_FULL_ENA |
                                      RKISP1_CIF_ISP_CTRL_ISP_CSM_C_FULL_ENA);
        } else {
-               for (i = 0; i < ARRAY_SIZE(limited_range_coeff); i++)
-                       rkisp1_write(params->rkisp1,
-                                    RKISP1_CIF_ISP_CC_COEFF_0 + i * 4,
-                                    limited_range_coeff[i]);
-
+               csm = coeffs->limited;
                rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
                                        RKISP1_CIF_ISP_CTRL_ISP_CSM_Y_FULL_ENA |
                                        RKISP1_CIF_ISP_CTRL_ISP_CSM_C_FULL_ENA);
        }
+
+       for (i = 0; i < 9; i++)
+               rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CC_COEFF_0 + i * 4,
+                            csm[i]);
 }
 
 /* ISP De-noise Pre-Filter(DPF) function */
@@ -1216,11 +1258,11 @@ rkisp1_isp_isr_other_config(struct rkisp1_params *params,
                if (module_ens & RKISP1_CIF_ISP_MODULE_DPCC)
                        rkisp1_param_set_bits(params,
                                              RKISP1_CIF_ISP_DPCC_MODE,
-                                             RKISP1_CIF_ISP_DPCC_ENA);
+                                             RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE);
                else
                        rkisp1_param_clear_bits(params,
                                                RKISP1_CIF_ISP_DPCC_MODE,
-                                               RKISP1_CIF_ISP_DPCC_ENA);
+                                               RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE);
        }
 
        /* update bls config */
@@ -1255,22 +1297,6 @@ rkisp1_isp_isr_other_config(struct rkisp1_params *params,
                                                RKISP1_CIF_ISP_CTRL_ISP_GAMMA_IN_ENA);
        }
 
-       /* update lsc config */
-       if (module_cfg_update & RKISP1_CIF_ISP_MODULE_LSC)
-               rkisp1_lsc_config(params,
-                                 &new_params->others.lsc_config);
-
-       if (module_en_update & RKISP1_CIF_ISP_MODULE_LSC) {
-               if (module_ens & RKISP1_CIF_ISP_MODULE_LSC)
-                       rkisp1_param_set_bits(params,
-                                             RKISP1_CIF_ISP_LSC_CTRL,
-                                             RKISP1_CIF_ISP_LSC_CTRL_ENA);
-               else
-                       rkisp1_param_clear_bits(params,
-                                               RKISP1_CIF_ISP_LSC_CTRL,
-                                               RKISP1_CIF_ISP_LSC_CTRL_ENA);
-       }
-
        /* update awb gains */
        if (module_cfg_update & RKISP1_CIF_ISP_MODULE_AWB_GAIN)
                params->ops->awb_gain_config(params, &new_params->others.awb_gain_config);
@@ -1387,6 +1413,33 @@ rkisp1_isp_isr_other_config(struct rkisp1_params *params,
        }
 }
 
+static void
+rkisp1_isp_isr_lsc_config(struct rkisp1_params *params,
+                         const struct rkisp1_params_cfg *new_params)
+{
+       unsigned int module_en_update, module_cfg_update, module_ens;
+
+       module_en_update = new_params->module_en_update;
+       module_cfg_update = new_params->module_cfg_update;
+       module_ens = new_params->module_ens;
+
+       /* update lsc config */
+       if (module_cfg_update & RKISP1_CIF_ISP_MODULE_LSC)
+               rkisp1_lsc_config(params,
+                                 &new_params->others.lsc_config);
+
+       if (module_en_update & RKISP1_CIF_ISP_MODULE_LSC) {
+               if (module_ens & RKISP1_CIF_ISP_MODULE_LSC)
+                       rkisp1_param_set_bits(params,
+                                             RKISP1_CIF_ISP_LSC_CTRL,
+                                             RKISP1_CIF_ISP_LSC_CTRL_ENA);
+               else
+                       rkisp1_param_clear_bits(params,
+                                               RKISP1_CIF_ISP_LSC_CTRL,
+                                               RKISP1_CIF_ISP_LSC_CTRL_ENA);
+       }
+}
+
 static void rkisp1_isp_isr_meas_config(struct rkisp1_params *params,
                                       struct  rkisp1_params_cfg *new_params)
 {
@@ -1448,47 +1501,60 @@ static void rkisp1_isp_isr_meas_config(struct rkisp1_params *params,
        }
 }
 
-static void rkisp1_params_apply_params_cfg(struct rkisp1_params *params,
-                                          unsigned int frame_sequence)
+static bool rkisp1_params_get_buffer(struct rkisp1_params *params,
+                                    struct rkisp1_buffer **buf,
+                                    struct rkisp1_params_cfg **cfg)
 {
-       struct rkisp1_params_cfg *new_params;
-       struct rkisp1_buffer *cur_buf = NULL;
-
        if (list_empty(&params->params))
-               return;
-
-       cur_buf = list_first_entry(&params->params,
-                                  struct rkisp1_buffer, queue);
+               return false;
 
-       new_params = (struct rkisp1_params_cfg *)vb2_plane_vaddr(&cur_buf->vb.vb2_buf, 0);
+       *buf = list_first_entry(&params->params, struct rkisp1_buffer, queue);
+       *cfg = vb2_plane_vaddr(&(*buf)->vb.vb2_buf, 0);
 
-       rkisp1_isp_isr_other_config(params, new_params);
-       rkisp1_isp_isr_meas_config(params, new_params);
-
-       /* update shadow register immediately */
-       rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL, RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD);
+       return true;
+}
 
-       list_del(&cur_buf->queue);
+static void rkisp1_params_complete_buffer(struct rkisp1_params *params,
+                                         struct rkisp1_buffer *buf,
+                                         unsigned int frame_sequence)
+{
+       list_del(&buf->queue);
 
-       cur_buf->vb.sequence = frame_sequence;
-       vb2_buffer_done(&cur_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+       buf->vb.sequence = frame_sequence;
+       vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
 }
 
 void rkisp1_params_isr(struct rkisp1_device *rkisp1)
 {
-       /*
-        * This isr is called when the ISR finishes processing a frame (RKISP1_CIF_ISP_FRAME).
-        * Configurations performed here will be applied on the next frame.
-        * Since frame_sequence is updated on the vertical sync signal, we should use
-        * frame_sequence + 1 here to indicate to userspace on which frame these parameters
-        * are being applied.
-        */
-       unsigned int frame_sequence = rkisp1->isp.frame_sequence + 1;
        struct rkisp1_params *params = &rkisp1->params;
+       struct rkisp1_params_cfg *new_params;
+       struct rkisp1_buffer *cur_buf;
 
        spin_lock(&params->config_lock);
-       rkisp1_params_apply_params_cfg(params, frame_sequence);
 
+       if (!rkisp1_params_get_buffer(params, &cur_buf, &new_params))
+               goto unlock;
+
+       rkisp1_isp_isr_other_config(params, new_params);
+       rkisp1_isp_isr_lsc_config(params, new_params);
+       rkisp1_isp_isr_meas_config(params, new_params);
+
+       /* update shadow register immediately */
+       rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
+                             RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD);
+
+       /*
+        * This isr is called when the ISR finishes processing a frame
+        * (RKISP1_CIF_ISP_FRAME). Configurations performed here will be
+        * applied on the next frame. Since frame_sequence is updated on the
+        * vertical sync signal, we should use frame_sequence + 1 here to
+        * indicate to userspace on which frame these parameters are being
+        * applied.
+        */
+       rkisp1_params_complete_buffer(params, cur_buf,
+                                     rkisp1->isp.frame_sequence + 1);
+
+unlock:
        spin_unlock(&params->config_lock);
 }
 
@@ -1531,9 +1597,18 @@ static const struct rkisp1_cif_isp_afc_config rkisp1_afc_params_default_config =
        14
 };
 
-static void rkisp1_params_config_parameter(struct rkisp1_params *params)
+void rkisp1_params_pre_configure(struct rkisp1_params *params,
+                                enum rkisp1_fmt_raw_pat_type bayer_pat,
+                                enum v4l2_quantization quantization,
+                                enum v4l2_ycbcr_encoding ycbcr_encoding)
 {
        struct rkisp1_cif_isp_hst_config hst = rkisp1_hst_params_default_config;
+       struct rkisp1_params_cfg *new_params;
+       struct rkisp1_buffer *cur_buf;
+
+       params->quantization = quantization;
+       params->ycbcr_encoding = ycbcr_encoding;
+       params->raw_type = bayer_pat;
 
        params->ops->awb_meas_config(params, &rkisp1_awb_params_default_config);
        params->ops->awb_meas_enable(params, &rkisp1_awb_params_default_config,
@@ -1552,27 +1627,55 @@ static void rkisp1_params_config_parameter(struct rkisp1_params *params)
        rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP_V10,
                              rkisp1_hst_params_default_config.mode);
 
-       /* set the  range */
-       if (params->quantization == V4L2_QUANTIZATION_FULL_RANGE)
-               rkisp1_csm_config(params, true);
-       else
-               rkisp1_csm_config(params, false);
+       rkisp1_csm_config(params);
 
        spin_lock_irq(&params->config_lock);
 
        /* apply the first buffer if there is one already */
-       rkisp1_params_apply_params_cfg(params, 0);
 
+       if (!rkisp1_params_get_buffer(params, &cur_buf, &new_params))
+               goto unlock;
+
+       rkisp1_isp_isr_other_config(params, new_params);
+       rkisp1_isp_isr_meas_config(params, new_params);
+
+       /* update shadow register immediately */
+       rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
+                             RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD);
+
+unlock:
        spin_unlock_irq(&params->config_lock);
 }
 
-void rkisp1_params_configure(struct rkisp1_params *params,
-                            enum rkisp1_fmt_raw_pat_type bayer_pat,
-                            enum v4l2_quantization quantization)
+void rkisp1_params_post_configure(struct rkisp1_params *params)
 {
-       params->quantization = quantization;
-       params->raw_type = bayer_pat;
-       rkisp1_params_config_parameter(params);
+       struct rkisp1_params_cfg *new_params;
+       struct rkisp1_buffer *cur_buf;
+
+       spin_lock_irq(&params->config_lock);
+
+       /*
+        * Apply LSC parameters from the first buffer (if any is already
+        * available. This must be done after the ISP gets started in the
+        * ISP8000Nano v18.02 (found in the i.MX8MP) as access to the LSC RAM
+        * is gated by the ISP_CTRL.ISP_ENABLE bit. As this initialization
+        * ordering doesn't affect other ISP versions negatively, do so
+        * unconditionally.
+        */
+
+       if (!rkisp1_params_get_buffer(params, &cur_buf, &new_params))
+               goto unlock;
+
+       rkisp1_isp_isr_lsc_config(params, new_params);
+
+       /* update shadow register immediately */
+       rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
+                             RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD);
+
+       rkisp1_params_complete_buffer(params, cur_buf, 0);
+
+unlock:
+       spin_unlock_irq(&params->config_lock);
 }
 
 /*
@@ -1582,7 +1685,7 @@ void rkisp1_params_configure(struct rkisp1_params *params,
 void rkisp1_params_disable(struct rkisp1_params *params)
 {
        rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_DPCC_MODE,
-                               RKISP1_CIF_ISP_DPCC_ENA);
+                               RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE);
        rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_LSC_CTRL,
                                RKISP1_CIF_ISP_LSC_CTRL_ENA);
        rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_BLS_CTRL,
index dd3e6c3..421cc73 100644 (file)
        (((v0) & 0x1FFF) | (((v1) & 0x1FFF) << 13))
 #define RKISP1_CIF_ISP_LSC_SECT_SIZE(v0, v1)      \
        (((v0) & 0xFFF) | (((v1) & 0xFFF) << 16))
-#define RKISP1_CIF_ISP_LSC_GRAD_SIZE(v0, v1)      \
+#define RKISP1_CIF_ISP_LSC_SECT_GRAD(v0, v1)      \
        (((v0) & 0xFFF) | (((v1) & 0xFFF) << 16))
 
 /* LSC: ISP_LSC_TABLE_SEL */
 #define RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA_READ(x)  (((x) >> 11) & 1)
 
 /* DPCC */
-/* ISP_DPCC_MODE */
-#define RKISP1_CIF_ISP_DPCC_ENA                                BIT(0)
-#define RKISP1_CIF_ISP_DPCC_MODE_MAX                   0x07
-#define RKISP1_CIF_ISP_DPCC_OUTPUTMODE_MAX             0x0F
-#define RKISP1_CIF_ISP_DPCC_SETUSE_MAX                 0x0F
-#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RESERVED       0xFFFFE000
-#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_RESERVED       0xFFFF0000
-#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_RESERVED      0xFFFFC0C0
-#define RKISP1_CIF_ISP_DPCC_PG_FAC_RESERVED            0xFFFFC0C0
-#define RKISP1_CIF_ISP_DPCC_RND_THRESH_RESERVED                0xFFFF0000
-#define RKISP1_CIF_ISP_DPCC_RG_FAC_RESERVED            0xFFFFC0C0
-#define RKISP1_CIF_ISP_DPCC_RO_LIMIT_RESERVED          0xFFFFF000
-#define RKISP1_CIF_ISP_DPCC_RND_OFFS_RESERVED          0xFFFFF000
+#define RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE           BIT(0)
+#define RKISP1_CIF_ISP_DPCC_MODE_GRAYSCALE_MODE                BIT(1)
+#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_MASK           GENMASK(3, 0)
+#define RKISP1_CIF_ISP_DPCC_SET_USE_MASK               GENMASK(3, 0)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_MASK           0x00001f1f
+#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_MASK           0x0000ffff
+#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_MASK          0x00003f3f
+#define RKISP1_CIF_ISP_DPCC_PG_FAC_MASK                        0x00003f3f
+#define RKISP1_CIF_ISP_DPCC_RND_THRESH_MASK            0x0000ffff
+#define RKISP1_CIF_ISP_DPCC_RG_FAC_MASK                        0x00003f3f
+#define RKISP1_CIF_ISP_DPCC_RO_LIMIT_MASK              0x00000fff
+#define RKISP1_CIF_ISP_DPCC_RND_OFFS_MASK              0x00000fff
 
 /* BLS */
 /* ISP_BLS_CTRL */
 #define RKISP1_CIF_ISP_LSC_GR_TABLE_DATA       (RKISP1_CIF_ISP_LSC_BASE + 0x00000018)
 #define RKISP1_CIF_ISP_LSC_B_TABLE_DATA                (RKISP1_CIF_ISP_LSC_BASE + 0x0000001C)
 #define RKISP1_CIF_ISP_LSC_GB_TABLE_DATA       (RKISP1_CIF_ISP_LSC_BASE + 0x00000020)
-#define RKISP1_CIF_ISP_LSC_XGRAD_01            (RKISP1_CIF_ISP_LSC_BASE + 0x00000024)
-#define RKISP1_CIF_ISP_LSC_XGRAD_23            (RKISP1_CIF_ISP_LSC_BASE + 0x00000028)
-#define RKISP1_CIF_ISP_LSC_XGRAD_45            (RKISP1_CIF_ISP_LSC_BASE + 0x0000002C)
-#define RKISP1_CIF_ISP_LSC_XGRAD_67            (RKISP1_CIF_ISP_LSC_BASE + 0x00000030)
-#define RKISP1_CIF_ISP_LSC_YGRAD_01            (RKISP1_CIF_ISP_LSC_BASE + 0x00000034)
-#define RKISP1_CIF_ISP_LSC_YGRAD_23            (RKISP1_CIF_ISP_LSC_BASE + 0x00000038)
-#define RKISP1_CIF_ISP_LSC_YGRAD_45            (RKISP1_CIF_ISP_LSC_BASE + 0x0000003C)
-#define RKISP1_CIF_ISP_LSC_YGRAD_67            (RKISP1_CIF_ISP_LSC_BASE + 0x00000040)
-#define RKISP1_CIF_ISP_LSC_XSIZE_01            (RKISP1_CIF_ISP_LSC_BASE + 0x00000044)
-#define RKISP1_CIF_ISP_LSC_XSIZE_23            (RKISP1_CIF_ISP_LSC_BASE + 0x00000048)
-#define RKISP1_CIF_ISP_LSC_XSIZE_45            (RKISP1_CIF_ISP_LSC_BASE + 0x0000004C)
-#define RKISP1_CIF_ISP_LSC_XSIZE_67            (RKISP1_CIF_ISP_LSC_BASE + 0x00000050)
-#define RKISP1_CIF_ISP_LSC_YSIZE_01            (RKISP1_CIF_ISP_LSC_BASE + 0x00000054)
-#define RKISP1_CIF_ISP_LSC_YSIZE_23            (RKISP1_CIF_ISP_LSC_BASE + 0x00000058)
-#define RKISP1_CIF_ISP_LSC_YSIZE_45            (RKISP1_CIF_ISP_LSC_BASE + 0x0000005C)
-#define RKISP1_CIF_ISP_LSC_YSIZE_67            (RKISP1_CIF_ISP_LSC_BASE + 0x00000060)
+#define RKISP1_CIF_ISP_LSC_XGRAD(n)            (RKISP1_CIF_ISP_LSC_BASE + 0x00000024 + (n) * 4)
+#define RKISP1_CIF_ISP_LSC_YGRAD(n)            (RKISP1_CIF_ISP_LSC_BASE + 0x00000034 + (n) * 4)
+#define RKISP1_CIF_ISP_LSC_XSIZE(n)            (RKISP1_CIF_ISP_LSC_BASE + 0x00000044 + (n) * 4)
+#define RKISP1_CIF_ISP_LSC_YSIZE(n)            (RKISP1_CIF_ISP_LSC_BASE + 0x00000054 + (n) * 4)
 #define RKISP1_CIF_ISP_LSC_TABLE_SEL           (RKISP1_CIF_ISP_LSC_BASE + 0x00000064)
 #define RKISP1_CIF_ISP_LSC_STATUS              (RKISP1_CIF_ISP_LSC_BASE + 0x00000068)
 
index f4caa8f..f76afd8 100644 (file)
@@ -411,6 +411,10 @@ static int rkisp1_rsz_init_config(struct v4l2_subdev *sd,
        sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
        sink_fmt->field = V4L2_FIELD_NONE;
        sink_fmt->code = RKISP1_DEF_FMT;
+       sink_fmt->colorspace = V4L2_COLORSPACE_SRGB;
+       sink_fmt->xfer_func = V4L2_XFER_FUNC_SRGB;
+       sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+       sink_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
 
        sink_crop = v4l2_subdev_get_try_crop(sd, sd_state,
                                             RKISP1_RSZ_PAD_SINK);
@@ -503,6 +507,7 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
        const struct rkisp1_mbus_info *mbus_info;
        struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
        struct v4l2_rect *sink_crop;
+       bool is_yuv;
 
        sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SINK,
                                          which);
@@ -524,9 +529,6 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
        if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
                rsz->pixel_enc = mbus_info->pixel_enc;
 
-       /* Propagete to source pad */
-       src_fmt->code = sink_fmt->code;
-
        sink_fmt->width = clamp_t(u32, format->width,
                                  RKISP1_ISP_MIN_WIDTH,
                                  RKISP1_ISP_MAX_WIDTH);
@@ -534,8 +536,45 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
                                   RKISP1_ISP_MIN_HEIGHT,
                                   RKISP1_ISP_MAX_HEIGHT);
 
+       /*
+        * Adjust the color space fields. Accept any color primaries and
+        * transfer function for both YUV and Bayer. For YUV any YCbCr encoding
+        * and quantization range is also accepted. For Bayer formats, the YCbCr
+        * encoding isn't applicable, and the quantization range can only be
+        * full.
+        */
+       is_yuv = mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV;
+
+       sink_fmt->colorspace = format->colorspace ? :
+                              (is_yuv ? V4L2_COLORSPACE_SRGB :
+                               V4L2_COLORSPACE_RAW);
+       sink_fmt->xfer_func = format->xfer_func ? :
+                             V4L2_MAP_XFER_FUNC_DEFAULT(sink_fmt->colorspace);
+       if (is_yuv) {
+               sink_fmt->ycbcr_enc = format->ycbcr_enc ? :
+                       V4L2_MAP_YCBCR_ENC_DEFAULT(sink_fmt->colorspace);
+               sink_fmt->quantization = format->quantization ? :
+                       V4L2_MAP_QUANTIZATION_DEFAULT(false, sink_fmt->colorspace,
+                                                     sink_fmt->ycbcr_enc);
+       } else {
+               /*
+                * The YCbCr encoding isn't applicable for non-YUV formats, but
+                * V4L2 has no "no encoding" value. Hardcode it to Rec. 601, it
+                * should be ignored by userspace.
+                */
+               sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+               sink_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+       }
+
        *format = *sink_fmt;
 
+       /* Propagate the media bus code and color space to the source pad. */
+       src_fmt->code = sink_fmt->code;
+       src_fmt->colorspace = sink_fmt->colorspace;
+       src_fmt->xfer_func = sink_fmt->xfer_func;
+       src_fmt->ycbcr_enc = sink_fmt->ycbcr_enc;
+       src_fmt->quantization = sink_fmt->quantization;
+
        /* Update sink crop */
        rkisp1_rsz_set_sink_crop(rsz, sd_state, sink_crop, which);
 }
index 03638c8..e3b95a2 100644 (file)
@@ -524,7 +524,7 @@ static int fimc_capture_release(struct file *file)
        mutex_lock(&fimc->lock);
 
        if (close && vc->streaming) {
-               media_pipeline_stop(&vc->ve.vdev.entity);
+               video_device_pipeline_stop(&vc->ve.vdev);
                vc->streaming = false;
        }
 
@@ -1176,7 +1176,6 @@ static int fimc_cap_streamon(struct file *file, void *priv,
 {
        struct fimc_dev *fimc = video_drvdata(file);
        struct fimc_vid_cap *vc = &fimc->vid_cap;
-       struct media_entity *entity = &vc->ve.vdev.entity;
        struct fimc_source_info *si = NULL;
        struct v4l2_subdev *sd;
        int ret;
@@ -1184,7 +1183,7 @@ static int fimc_cap_streamon(struct file *file, void *priv,
        if (fimc_capture_active(fimc))
                return -EBUSY;
 
-       ret = media_pipeline_start(entity, &vc->ve.pipe->mp);
+       ret = video_device_pipeline_start(&vc->ve.vdev, &vc->ve.pipe->mp);
        if (ret < 0)
                return ret;
 
@@ -1218,7 +1217,7 @@ static int fimc_cap_streamon(struct file *file, void *priv,
        }
 
 err_p_stop:
-       media_pipeline_stop(entity);
+       video_device_pipeline_stop(&vc->ve.vdev);
        return ret;
 }
 
@@ -1234,7 +1233,7 @@ static int fimc_cap_streamoff(struct file *file, void *priv,
                return ret;
 
        if (vc->streaming) {
-               media_pipeline_stop(&vc->ve.vdev.entity);
+               video_device_pipeline_stop(&vc->ve.vdev);
                vc->streaming = false;
        }
 
index 8f12240..f6a302f 100644 (file)
@@ -312,7 +312,7 @@ static int isp_video_release(struct file *file)
        is_singular_file = v4l2_fh_is_singular_file(file);
 
        if (is_singular_file && ivc->streaming) {
-               media_pipeline_stop(entity);
+               video_device_pipeline_stop(&ivc->ve.vdev);
                ivc->streaming = 0;
        }
 
@@ -490,10 +490,9 @@ static int isp_video_streamon(struct file *file, void *priv,
 {
        struct fimc_isp *isp = video_drvdata(file);
        struct exynos_video_entity *ve = &isp->video_capture.ve;
-       struct media_entity *me = &ve->vdev.entity;
        int ret;
 
-       ret = media_pipeline_start(me, &ve->pipe->mp);
+       ret = video_device_pipeline_start(&ve->vdev, &ve->pipe->mp);
        if (ret < 0)
                return ret;
 
@@ -508,7 +507,7 @@ static int isp_video_streamon(struct file *file, void *priv,
        isp->video_capture.streaming = 1;
        return 0;
 p_stop:
-       media_pipeline_stop(me);
+       video_device_pipeline_stop(&ve->vdev);
        return ret;
 }
 
@@ -523,7 +522,7 @@ static int isp_video_streamoff(struct file *file, void *priv,
        if (ret < 0)
                return ret;
 
-       media_pipeline_stop(&video->ve.vdev.entity);
+       video_device_pipeline_stop(&video->ve.vdev);
        video->streaming = 0;
        return 0;
 }
index 41b0a4a..e185a40 100644 (file)
@@ -516,7 +516,7 @@ static int fimc_lite_release(struct file *file)
        if (v4l2_fh_is_singular_file(file) &&
            atomic_read(&fimc->out_path) == FIMC_IO_DMA) {
                if (fimc->streaming) {
-                       media_pipeline_stop(entity);
+                       video_device_pipeline_stop(&fimc->ve.vdev);
                        fimc->streaming = false;
                }
                fimc_lite_stop_capture(fimc, false);
@@ -812,13 +812,12 @@ static int fimc_lite_streamon(struct file *file, void *priv,
                              enum v4l2_buf_type type)
 {
        struct fimc_lite *fimc = video_drvdata(file);
-       struct media_entity *entity = &fimc->ve.vdev.entity;
        int ret;
 
        if (fimc_lite_active(fimc))
                return -EBUSY;
 
-       ret = media_pipeline_start(entity, &fimc->ve.pipe->mp);
+       ret = video_device_pipeline_start(&fimc->ve.vdev, &fimc->ve.pipe->mp);
        if (ret < 0)
                return ret;
 
@@ -835,7 +834,7 @@ static int fimc_lite_streamon(struct file *file, void *priv,
        }
 
 err_p_stop:
-       media_pipeline_stop(entity);
+       video_device_pipeline_stop(&fimc->ve.vdev);
        return 0;
 }
 
@@ -849,7 +848,7 @@ static int fimc_lite_streamoff(struct file *file, void *priv,
        if (ret < 0)
                return ret;
 
-       media_pipeline_stop(&fimc->ve.vdev.entity);
+       video_device_pipeline_stop(&fimc->ve.vdev);
        fimc->streaming = false;
        return 0;
 }
index c2d8f1e..db106eb 100644 (file)
@@ -848,13 +848,13 @@ static int s3c_camif_streamon(struct file *file, void *priv,
        if (s3c_vp_active(vp))
                return 0;
 
-       ret = media_pipeline_start(sensor, camif->m_pipeline);
+       ret = media_pipeline_start(sensor->pads, camif->m_pipeline);
        if (ret < 0)
                return ret;
 
        ret = camif_pipeline_validate(camif);
        if (ret < 0) {
-               media_pipeline_stop(sensor);
+               media_pipeline_stop(sensor->pads);
                return ret;
        }
 
@@ -878,7 +878,7 @@ static int s3c_camif_streamoff(struct file *file, void *priv,
 
        ret = vb2_streamoff(&vp->vb_queue, type);
        if (ret == 0)
-               media_pipeline_stop(&camif->sensor.sd->entity);
+               media_pipeline_stop(camif->sensor.sd->entity.pads);
        return ret;
 }
 
index 2ca95ab..37458d4 100644 (file)
@@ -751,7 +751,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
                goto err_unlocked;
        }
 
-       ret = media_pipeline_start(&dcmi->vdev->entity, &dcmi->pipeline);
+       ret = video_device_pipeline_start(dcmi->vdev, &dcmi->pipeline);
        if (ret < 0) {
                dev_err(dcmi->dev, "%s: Failed to start streaming, media pipeline start error (%d)\n",
                        __func__, ret);
@@ -865,7 +865,7 @@ err_pipeline_stop:
        dcmi_pipeline_stop(dcmi);
 
 err_media_pipeline_stop:
-       media_pipeline_stop(&dcmi->vdev->entity);
+       video_device_pipeline_stop(dcmi->vdev);
 
 err_pm_put:
        pm_runtime_put(dcmi->dev);
@@ -892,7 +892,7 @@ static void dcmi_stop_streaming(struct vb2_queue *vq)
 
        dcmi_pipeline_stop(dcmi);
 
-       media_pipeline_stop(&dcmi->vdev->entity);
+       video_device_pipeline_stop(dcmi->vdev);
 
        spin_lock_irq(&dcmi->irqlock);
 
index 7960e68..60610c0 100644 (file)
@@ -3,7 +3,7 @@
 config VIDEO_SUN4I_CSI
        tristate "Allwinner A10 CMOS Sensor Interface Support"
        depends on V4L_PLATFORM_DRIVERS
-       depends on VIDEO_DEV && COMMON_CLK  && HAS_DMA
+       depends on VIDEO_DEV && COMMON_CLK && RESET_CONTROLLER && HAS_DMA
        depends on ARCH_SUNXI || COMPILE_TEST
        select MEDIA_CONTROLLER
        select VIDEO_V4L2_SUBDEV_API
index 0912a1b..a3e826a 100644 (file)
@@ -266,7 +266,7 @@ static int sun4i_csi_start_streaming(struct vb2_queue *vq, unsigned int count)
                goto err_clear_dma_queue;
        }
 
-       ret = media_pipeline_start(&csi->vdev.entity, &csi->vdev.pipe);
+       ret = video_device_pipeline_alloc_start(&csi->vdev);
        if (ret < 0)
                goto err_free_scratch_buffer;
 
@@ -330,7 +330,7 @@ err_disable_device:
        sun4i_csi_capture_stop(csi);
 
 err_disable_pipeline:
-       media_pipeline_stop(&csi->vdev.entity);
+       video_device_pipeline_stop(&csi->vdev);
 
 err_free_scratch_buffer:
        dma_free_coherent(csi->dev, csi->scratch.size, csi->scratch.vaddr,
@@ -359,7 +359,7 @@ static void sun4i_csi_stop_streaming(struct vb2_queue *vq)
        return_all_buffers(csi, VB2_BUF_STATE_ERROR);
        spin_unlock_irqrestore(&csi->qlock, flags);
 
-       media_pipeline_stop(&csi->vdev.entity);
+       video_device_pipeline_stop(&csi->vdev);
 
        dma_free_coherent(csi->dev, csi->scratch.size, csi->scratch.vaddr,
                          csi->scratch.paddr);
index 0345901..886006f 100644 (file)
@@ -1,13 +1,15 @@
 # SPDX-License-Identifier: GPL-2.0-only
 config VIDEO_SUN6I_CSI
-       tristate "Allwinner V3s Camera Sensor Interface driver"
-       depends on V4L_PLATFORM_DRIVERS
-       depends on VIDEO_DEV && COMMON_CLK  && HAS_DMA
+       tristate "Allwinner A31 Camera Sensor Interface (CSI) Driver"
+       depends on V4L_PLATFORM_DRIVERS && VIDEO_DEV
        depends on ARCH_SUNXI || COMPILE_TEST
+       depends on PM && COMMON_CLK && RESET_CONTROLLER && HAS_DMA
        select MEDIA_CONTROLLER
        select VIDEO_V4L2_SUBDEV_API
        select VIDEOBUF2_DMA_CONTIG
-       select REGMAP_MMIO
        select V4L2_FWNODE
+       select REGMAP_MMIO
        help
-          Support for the Allwinner Camera Sensor Interface Controller on V3s.
+          Support for the Allwinner A31 Camera Sensor Interface (CSI)
+          controller, also found on other platforms such as the A83T, H3,
+          V3/V3s or A64.
index a971587..8b99c17 100644 (file)
 #include <linux/sched.h>
 #include <linux/sizes.h>
 #include <linux/slab.h>
+#include <media/v4l2-mc.h>
 
 #include "sun6i_csi.h"
 #include "sun6i_csi_reg.h"
 
-#define MODULE_NAME    "sun6i-csi"
-
-struct sun6i_csi_dev {
-       struct sun6i_csi                csi;
-       struct device                   *dev;
-
-       struct regmap                   *regmap;
-       struct clk                      *clk_mod;
-       struct clk                      *clk_ram;
-       struct reset_control            *rstc_bus;
-
-       int                             planar_offset[3];
-};
-
-static inline struct sun6i_csi_dev *sun6i_csi_to_dev(struct sun6i_csi *csi)
-{
-       return container_of(csi, struct sun6i_csi_dev, csi);
-}
+/* Helpers */
 
 /* TODO add 10&12 bit YUV, RGB support */
-bool sun6i_csi_is_format_supported(struct sun6i_csi *csi,
+bool sun6i_csi_is_format_supported(struct sun6i_csi_device *csi_dev,
                                   u32 pixformat, u32 mbus_code)
 {
-       struct sun6i_csi_dev *sdev = sun6i_csi_to_dev(csi);
+       struct sun6i_csi_v4l2 *v4l2 = &csi_dev->v4l2;
 
        /*
         * Some video receivers have the ability to be compatible with
         * 8bit and 16bit bus width.
         * Identify the media bus format from device tree.
         */
-       if ((sdev->csi.v4l2_ep.bus_type == V4L2_MBUS_PARALLEL
-            || sdev->csi.v4l2_ep.bus_type == V4L2_MBUS_BT656)
-            && sdev->csi.v4l2_ep.bus.parallel.bus_width == 16) {
+       if ((v4l2->v4l2_ep.bus_type == V4L2_MBUS_PARALLEL
+            || v4l2->v4l2_ep.bus_type == V4L2_MBUS_BT656)
+            && v4l2->v4l2_ep.bus.parallel.bus_width == 16) {
                switch (pixformat) {
                case V4L2_PIX_FMT_NV12_16L16:
                case V4L2_PIX_FMT_NV12:
@@ -76,13 +60,14 @@ bool sun6i_csi_is_format_supported(struct sun6i_csi *csi,
                        case MEDIA_BUS_FMT_YVYU8_1X16:
                                return true;
                        default:
-                               dev_dbg(sdev->dev, "Unsupported mbus code: 0x%x\n",
+                               dev_dbg(csi_dev->dev,
+                                       "Unsupported mbus code: 0x%x\n",
                                        mbus_code);
                                break;
                        }
                        break;
                default:
-                       dev_dbg(sdev->dev, "Unsupported pixformat: 0x%x\n",
+                       dev_dbg(csi_dev->dev, "Unsupported pixformat: 0x%x\n",
                                pixformat);
                        break;
                }
@@ -139,7 +124,7 @@ bool sun6i_csi_is_format_supported(struct sun6i_csi *csi,
                case MEDIA_BUS_FMT_YVYU8_2X8:
                        return true;
                default:
-                       dev_dbg(sdev->dev, "Unsupported mbus code: 0x%x\n",
+                       dev_dbg(csi_dev->dev, "Unsupported mbus code: 0x%x\n",
                                mbus_code);
                        break;
                }
@@ -154,67 +139,37 @@ bool sun6i_csi_is_format_supported(struct sun6i_csi *csi,
                return (mbus_code == MEDIA_BUS_FMT_JPEG_1X8);
 
        default:
-               dev_dbg(sdev->dev, "Unsupported pixformat: 0x%x\n", pixformat);
+               dev_dbg(csi_dev->dev, "Unsupported pixformat: 0x%x\n",
+                       pixformat);
                break;
        }
 
        return false;
 }
 
-int sun6i_csi_set_power(struct sun6i_csi *csi, bool enable)
+int sun6i_csi_set_power(struct sun6i_csi_device *csi_dev, bool enable)
 {
-       struct sun6i_csi_dev *sdev = sun6i_csi_to_dev(csi);
-       struct device *dev = sdev->dev;
-       struct regmap *regmap = sdev->regmap;
+       struct device *dev = csi_dev->dev;
+       struct regmap *regmap = csi_dev->regmap;
        int ret;
 
        if (!enable) {
                regmap_update_bits(regmap, CSI_EN_REG, CSI_EN_CSI_EN, 0);
+               pm_runtime_put(dev);
 
-               clk_disable_unprepare(sdev->clk_ram);
-               if (of_device_is_compatible(dev->of_node,
-                                           "allwinner,sun50i-a64-csi"))
-                       clk_rate_exclusive_put(sdev->clk_mod);
-               clk_disable_unprepare(sdev->clk_mod);
-               reset_control_assert(sdev->rstc_bus);
                return 0;
        }
 
-       ret = clk_prepare_enable(sdev->clk_mod);
-       if (ret) {
-               dev_err(sdev->dev, "Enable csi clk err %d\n", ret);
+       ret = pm_runtime_resume_and_get(dev);
+       if (ret < 0)
                return ret;
-       }
-
-       if (of_device_is_compatible(dev->of_node, "allwinner,sun50i-a64-csi"))
-               clk_set_rate_exclusive(sdev->clk_mod, 300000000);
-
-       ret = clk_prepare_enable(sdev->clk_ram);
-       if (ret) {
-               dev_err(sdev->dev, "Enable clk_dram_csi clk err %d\n", ret);
-               goto clk_mod_disable;
-       }
-
-       ret = reset_control_deassert(sdev->rstc_bus);
-       if (ret) {
-               dev_err(sdev->dev, "reset err %d\n", ret);
-               goto clk_ram_disable;
-       }
 
        regmap_update_bits(regmap, CSI_EN_REG, CSI_EN_CSI_EN, CSI_EN_CSI_EN);
 
        return 0;
-
-clk_ram_disable:
-       clk_disable_unprepare(sdev->clk_ram);
-clk_mod_disable:
-       if (of_device_is_compatible(dev->of_node, "allwinner,sun50i-a64-csi"))
-               clk_rate_exclusive_put(sdev->clk_mod);
-       clk_disable_unprepare(sdev->clk_mod);
-       return ret;
 }
 
-static enum csi_input_fmt get_csi_input_format(struct sun6i_csi_dev *sdev,
+static enum csi_input_fmt get_csi_input_format(struct sun6i_csi_device *csi_dev,
                                               u32 mbus_code, u32 pixformat)
 {
        /* non-YUV */
@@ -232,12 +187,13 @@ static enum csi_input_fmt get_csi_input_format(struct sun6i_csi_dev *sdev,
        }
 
        /* not support YUV420 input format yet */
-       dev_dbg(sdev->dev, "Select YUV422 as default input format of CSI.\n");
+       dev_dbg(csi_dev->dev, "Select YUV422 as default input format of CSI.\n");
        return CSI_INPUT_FORMAT_YUV422;
 }
 
-static enum csi_output_fmt get_csi_output_format(struct sun6i_csi_dev *sdev,
-                                                u32 pixformat, u32 field)
+static enum csi_output_fmt
+get_csi_output_format(struct sun6i_csi_device *csi_dev, u32 pixformat,
+                     u32 field)
 {
        bool buf_interlaced = false;
 
@@ -296,14 +252,14 @@ static enum csi_output_fmt get_csi_output_format(struct sun6i_csi_dev *sdev,
                return buf_interlaced ? CSI_FRAME_RAW_8 : CSI_FIELD_RAW_8;
 
        default:
-               dev_warn(sdev->dev, "Unsupported pixformat: 0x%x\n", pixformat);
+               dev_warn(csi_dev->dev, "Unsupported pixformat: 0x%x\n", pixformat);
                break;
        }
 
        return CSI_FIELD_RAW_8;
 }
 
-static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_dev *sdev,
+static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_device *csi_dev,
                                            u32 mbus_code, u32 pixformat)
 {
        /* Input sequence does not apply to non-YUV formats */
@@ -330,7 +286,7 @@ static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_dev *sdev,
                case MEDIA_BUS_FMT_YVYU8_2X8:
                        return CSI_INPUT_SEQ_YVYU;
                default:
-                       dev_warn(sdev->dev, "Unsupported mbus code: 0x%x\n",
+                       dev_warn(csi_dev->dev, "Unsupported mbus code: 0x%x\n",
                                 mbus_code);
                        break;
                }
@@ -352,7 +308,7 @@ static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_dev *sdev,
                case MEDIA_BUS_FMT_YVYU8_2X8:
                        return CSI_INPUT_SEQ_YUYV;
                default:
-                       dev_warn(sdev->dev, "Unsupported mbus code: 0x%x\n",
+                       dev_warn(csi_dev->dev, "Unsupported mbus code: 0x%x\n",
                                 mbus_code);
                        break;
                }
@@ -362,7 +318,7 @@ static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_dev *sdev,
                return CSI_INPUT_SEQ_YUYV;
 
        default:
-               dev_warn(sdev->dev, "Unsupported pixformat: 0x%x, defaulting to YUYV\n",
+               dev_warn(csi_dev->dev, "Unsupported pixformat: 0x%x, defaulting to YUYV\n",
                         pixformat);
                break;
        }
@@ -370,23 +326,23 @@ static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_dev *sdev,
        return CSI_INPUT_SEQ_YUYV;
 }
 
-static void sun6i_csi_setup_bus(struct sun6i_csi_dev *sdev)
+static void sun6i_csi_setup_bus(struct sun6i_csi_device *csi_dev)
 {
-       struct v4l2_fwnode_endpoint *endpoint = &sdev->csi.v4l2_ep;
-       struct sun6i_csi *csi = &sdev->csi;
+       struct v4l2_fwnode_endpoint *endpoint = &csi_dev->v4l2.v4l2_ep;
+       struct sun6i_csi_config *config = &csi_dev->config;
        unsigned char bus_width;
        u32 flags;
        u32 cfg;
        bool input_interlaced = false;
 
-       if (csi->config.field == V4L2_FIELD_INTERLACED
-           || csi->config.field == V4L2_FIELD_INTERLACED_TB
-           || csi->config.field == V4L2_FIELD_INTERLACED_BT)
+       if (config->field == V4L2_FIELD_INTERLACED
+           || config->field == V4L2_FIELD_INTERLACED_TB
+           || config->field == V4L2_FIELD_INTERLACED_BT)
                input_interlaced = true;
 
        bus_width = endpoint->bus.parallel.bus_width;
 
-       regmap_read(sdev->regmap, CSI_IF_CFG_REG, &cfg);
+       regmap_read(csi_dev->regmap, CSI_IF_CFG_REG, &cfg);
 
        cfg &= ~(CSI_IF_CFG_CSI_IF_MASK | CSI_IF_CFG_MIPI_IF_MASK |
                 CSI_IF_CFG_IF_DATA_WIDTH_MASK |
@@ -434,7 +390,7 @@ static void sun6i_csi_setup_bus(struct sun6i_csi_dev *sdev)
                        cfg |= CSI_IF_CFG_CLK_POL_FALLING_EDGE;
                break;
        default:
-               dev_warn(sdev->dev, "Unsupported bus type: %d\n",
+               dev_warn(csi_dev->dev, "Unsupported bus type: %d\n",
                         endpoint->bus_type);
                break;
        }
@@ -452,54 +408,54 @@ static void sun6i_csi_setup_bus(struct sun6i_csi_dev *sdev)
        case 16: /* No need to configure DATA_WIDTH for 16bit */
                break;
        default:
-               dev_warn(sdev->dev, "Unsupported bus width: %u\n", bus_width);
+               dev_warn(csi_dev->dev, "Unsupported bus width: %u\n", bus_width);
                break;
        }
 
-       regmap_write(sdev->regmap, CSI_IF_CFG_REG, cfg);
+       regmap_write(csi_dev->regmap, CSI_IF_CFG_REG, cfg);
 }
 
-static void sun6i_csi_set_format(struct sun6i_csi_dev *sdev)
+static void sun6i_csi_set_format(struct sun6i_csi_device *csi_dev)
 {
-       struct sun6i_csi *csi = &sdev->csi;
+       struct sun6i_csi_config *config = &csi_dev->config;
        u32 cfg;
        u32 val;
 
-       regmap_read(sdev->regmap, CSI_CH_CFG_REG, &cfg);
+       regmap_read(csi_dev->regmap, CSI_CH_CFG_REG, &cfg);
 
        cfg &= ~(CSI_CH_CFG_INPUT_FMT_MASK |
                 CSI_CH_CFG_OUTPUT_FMT_MASK | CSI_CH_CFG_VFLIP_EN |
                 CSI_CH_CFG_HFLIP_EN | CSI_CH_CFG_FIELD_SEL_MASK |
                 CSI_CH_CFG_INPUT_SEQ_MASK);
 
-       val = get_csi_input_format(sdev, csi->config.code,
-                                  csi->config.pixelformat);
+       val = get_csi_input_format(csi_dev, config->code,
+                                  config->pixelformat);
        cfg |= CSI_CH_CFG_INPUT_FMT(val);
 
-       val = get_csi_output_format(sdev, csi->config.pixelformat,
-                                   csi->config.field);
+       val = get_csi_output_format(csi_dev, config->pixelformat,
+                                   config->field);
        cfg |= CSI_CH_CFG_OUTPUT_FMT(val);
 
-       val = get_csi_input_seq(sdev, csi->config.code,
-                               csi->config.pixelformat);
+       val = get_csi_input_seq(csi_dev, config->code,
+                               config->pixelformat);
        cfg |= CSI_CH_CFG_INPUT_SEQ(val);
 
-       if (csi->config.field == V4L2_FIELD_TOP)
+       if (config->field == V4L2_FIELD_TOP)
                cfg |= CSI_CH_CFG_FIELD_SEL_FIELD0;
-       else if (csi->config.field == V4L2_FIELD_BOTTOM)
+       else if (config->field == V4L2_FIELD_BOTTOM)
                cfg |= CSI_CH_CFG_FIELD_SEL_FIELD1;
        else
                cfg |= CSI_CH_CFG_FIELD_SEL_BOTH;
 
-       regmap_write(sdev->regmap, CSI_CH_CFG_REG, cfg);
+       regmap_write(csi_dev->regmap, CSI_CH_CFG_REG, cfg);
 }
 
-static void sun6i_csi_set_window(struct sun6i_csi_dev *sdev)
+static void sun6i_csi_set_window(struct sun6i_csi_device *csi_dev)
 {
-       struct sun6i_csi_config *config = &sdev->csi.config;
+       struct sun6i_csi_config *config = &csi_dev->config;
        u32 bytesperline_y;
        u32 bytesperline_c;
-       int *planar_offset = sdev->planar_offset;
+       int *planar_offset = csi_dev->planar_offset;
        u32 width = config->width;
        u32 height = config->height;
        u32 hor_len = width;
@@ -509,7 +465,7 @@ static void sun6i_csi_set_window(struct sun6i_csi_dev *sdev)
        case V4L2_PIX_FMT_YVYU:
        case V4L2_PIX_FMT_UYVY:
        case V4L2_PIX_FMT_VYUY:
-               dev_dbg(sdev->dev,
+               dev_dbg(csi_dev->dev,
                        "Horizontal length should be 2 times of width for packed YUV formats!\n");
                hor_len = width * 2;
                break;
@@ -517,10 +473,10 @@ static void sun6i_csi_set_window(struct sun6i_csi_dev *sdev)
                break;
        }
 
-       regmap_write(sdev->regmap, CSI_CH_HSIZE_REG,
+       regmap_write(csi_dev->regmap, CSI_CH_HSIZE_REG,
                     CSI_CH_HSIZE_HOR_LEN(hor_len) |
                     CSI_CH_HSIZE_HOR_START(0));
-       regmap_write(sdev->regmap, CSI_CH_VSIZE_REG,
+       regmap_write(csi_dev->regmap, CSI_CH_VSIZE_REG,
                     CSI_CH_VSIZE_VER_LEN(height) |
                     CSI_CH_VSIZE_VER_START(0));
 
@@ -552,7 +508,7 @@ static void sun6i_csi_set_window(struct sun6i_csi_dev *sdev)
                                bytesperline_c * height;
                break;
        default: /* raw */
-               dev_dbg(sdev->dev,
+               dev_dbg(csi_dev->dev,
                        "Calculating pixelformat(0x%x)'s bytesperline as a packed format\n",
                        config->pixelformat);
                bytesperline_y = (sun6i_csi_get_bpp(config->pixelformat) *
@@ -563,46 +519,42 @@ static void sun6i_csi_set_window(struct sun6i_csi_dev *sdev)
                break;
        }
 
-       regmap_write(sdev->regmap, CSI_CH_BUF_LEN_REG,
+       regmap_write(csi_dev->regmap, CSI_CH_BUF_LEN_REG,
                     CSI_CH_BUF_LEN_BUF_LEN_C(bytesperline_c) |
                     CSI_CH_BUF_LEN_BUF_LEN_Y(bytesperline_y));
 }
 
-int sun6i_csi_update_config(struct sun6i_csi *csi,
+int sun6i_csi_update_config(struct sun6i_csi_device *csi_dev,
                            struct sun6i_csi_config *config)
 {
-       struct sun6i_csi_dev *sdev = sun6i_csi_to_dev(csi);
-
        if (!config)
                return -EINVAL;
 
-       memcpy(&csi->config, config, sizeof(csi->config));
+       memcpy(&csi_dev->config, config, sizeof(csi_dev->config));
 
-       sun6i_csi_setup_bus(sdev);
-       sun6i_csi_set_format(sdev);
-       sun6i_csi_set_window(sdev);
+       sun6i_csi_setup_bus(csi_dev);
+       sun6i_csi_set_format(csi_dev);
+       sun6i_csi_set_window(csi_dev);
 
        return 0;
 }
 
-void sun6i_csi_update_buf_addr(struct sun6i_csi *csi, dma_addr_t addr)
+void sun6i_csi_update_buf_addr(struct sun6i_csi_device *csi_dev,
+                              dma_addr_t addr)
 {
-       struct sun6i_csi_dev *sdev = sun6i_csi_to_dev(csi);
-
-       regmap_write(sdev->regmap, CSI_CH_F0_BUFA_REG,
-                    (addr + sdev->planar_offset[0]) >> 2);
-       if (sdev->planar_offset[1] != -1)
-               regmap_write(sdev->regmap, CSI_CH_F1_BUFA_REG,
-                            (addr + sdev->planar_offset[1]) >> 2);
-       if (sdev->planar_offset[2] != -1)
-               regmap_write(sdev->regmap, CSI_CH_F2_BUFA_REG,
-                            (addr + sdev->planar_offset[2]) >> 2);
+       regmap_write(csi_dev->regmap, CSI_CH_F0_BUFA_REG,
+                    (addr + csi_dev->planar_offset[0]) >> 2);
+       if (csi_dev->planar_offset[1] != -1)
+               regmap_write(csi_dev->regmap, CSI_CH_F1_BUFA_REG,
+                            (addr + csi_dev->planar_offset[1]) >> 2);
+       if (csi_dev->planar_offset[2] != -1)
+               regmap_write(csi_dev->regmap, CSI_CH_F2_BUFA_REG,
+                            (addr + csi_dev->planar_offset[2]) >> 2);
 }
 
-void sun6i_csi_set_stream(struct sun6i_csi *csi, bool enable)
+void sun6i_csi_set_stream(struct sun6i_csi_device *csi_dev, bool enable)
 {
-       struct sun6i_csi_dev *sdev = sun6i_csi_to_dev(csi);
-       struct regmap *regmap = sdev->regmap;
+       struct regmap *regmap = csi_dev->regmap;
 
        if (!enable) {
                regmap_update_bits(regmap, CSI_CAP_REG, CSI_CAP_CH0_VCAP_ON, 0);
@@ -623,10 +575,15 @@ void sun6i_csi_set_stream(struct sun6i_csi *csi, bool enable)
                           CSI_CAP_CH0_VCAP_ON);
 }
 
-/* -----------------------------------------------------------------------------
- * Media Controller and V4L2
- */
-static int sun6i_csi_link_entity(struct sun6i_csi *csi,
+/* Media */
+
+static const struct media_device_ops sun6i_csi_media_ops = {
+       .link_notify = v4l2_pipeline_link_notify,
+};
+
+/* V4L2 */
+
+static int sun6i_csi_link_entity(struct sun6i_csi_device *csi_dev,
                                 struct media_entity *entity,
                                 struct fwnode_handle *fwnode)
 {
@@ -637,24 +594,25 @@ static int sun6i_csi_link_entity(struct sun6i_csi *csi,
 
        ret = media_entity_get_fwnode_pad(entity, fwnode, MEDIA_PAD_FL_SOURCE);
        if (ret < 0) {
-               dev_err(csi->dev, "%s: no source pad in external entity %s\n",
-                       __func__, entity->name);
+               dev_err(csi_dev->dev,
+                       "%s: no source pad in external entity %s\n", __func__,
+                       entity->name);
                return -EINVAL;
        }
 
        src_pad_index = ret;
 
-       sink = &csi->video.vdev.entity;
-       sink_pad = &csi->video.pad;
+       sink = &csi_dev->video.video_dev.entity;
+       sink_pad = &csi_dev->video.pad;
 
-       dev_dbg(csi->dev, "creating %s:%u -> %s:%u link\n",
+       dev_dbg(csi_dev->dev, "creating %s:%u -> %s:%u link\n",
                entity->name, src_pad_index, sink->name, sink_pad->index);
        ret = media_create_pad_link(entity, src_pad_index, sink,
                                    sink_pad->index,
                                    MEDIA_LNK_FL_ENABLED |
                                    MEDIA_LNK_FL_IMMUTABLE);
        if (ret < 0) {
-               dev_err(csi->dev, "failed to create %s:%u -> %s:%u link\n",
+               dev_err(csi_dev->dev, "failed to create %s:%u -> %s:%u link\n",
                        entity->name, src_pad_index,
                        sink->name, sink_pad->index);
                return ret;
@@ -665,27 +623,29 @@ static int sun6i_csi_link_entity(struct sun6i_csi *csi,
 
 static int sun6i_subdev_notify_complete(struct v4l2_async_notifier *notifier)
 {
-       struct sun6i_csi *csi = container_of(notifier, struct sun6i_csi,
-                                            notifier);
-       struct v4l2_device *v4l2_dev = &csi->v4l2_dev;
+       struct sun6i_csi_device *csi_dev =
+               container_of(notifier, struct sun6i_csi_device,
+                            v4l2.notifier);
+       struct sun6i_csi_v4l2 *v4l2 = &csi_dev->v4l2;
+       struct v4l2_device *v4l2_dev = &v4l2->v4l2_dev;
        struct v4l2_subdev *sd;
        int ret;
 
-       dev_dbg(csi->dev, "notify complete, all subdevs registered\n");
+       dev_dbg(csi_dev->dev, "notify complete, all subdevs registered\n");
 
        sd = list_first_entry(&v4l2_dev->subdevs, struct v4l2_subdev, list);
        if (!sd)
                return -EINVAL;
 
-       ret = sun6i_csi_link_entity(csi, &sd->entity, sd->fwnode);
+       ret = sun6i_csi_link_entity(csi_dev, &sd->entity, sd->fwnode);
        if (ret < 0)
                return ret;
 
-       ret = v4l2_device_register_subdev_nodes(&csi->v4l2_dev);
+       ret = v4l2_device_register_subdev_nodes(v4l2_dev);
        if (ret < 0)
                return ret;
 
-       return media_device_register(&csi->media_dev);
+       return 0;
 }
 
 static const struct v4l2_async_notifier_operations sun6i_csi_async_ops = {
@@ -696,7 +656,7 @@ static int sun6i_csi_fwnode_parse(struct device *dev,
                                  struct v4l2_fwnode_endpoint *vep,
                                  struct v4l2_async_subdev *asd)
 {
-       struct sun6i_csi *csi = dev_get_drvdata(dev);
+       struct sun6i_csi_device *csi_dev = dev_get_drvdata(dev);
 
        if (vep->base.port || vep->base.id) {
                dev_warn(dev, "Only support a single port with one endpoint\n");
@@ -706,7 +666,7 @@ static int sun6i_csi_fwnode_parse(struct device *dev,
        switch (vep->bus_type) {
        case V4L2_MBUS_PARALLEL:
        case V4L2_MBUS_BT656:
-               csi->v4l2_ep = *vep;
+               csi_dev->v4l2.v4l2_ep = *vep;
                return 0;
        default:
                dev_err(dev, "Unsupported media bus type\n");
@@ -714,87 +674,102 @@ static int sun6i_csi_fwnode_parse(struct device *dev,
        }
 }
 
-static void sun6i_csi_v4l2_cleanup(struct sun6i_csi *csi)
-{
-       media_device_unregister(&csi->media_dev);
-       v4l2_async_nf_unregister(&csi->notifier);
-       v4l2_async_nf_cleanup(&csi->notifier);
-       sun6i_video_cleanup(&csi->video);
-       v4l2_device_unregister(&csi->v4l2_dev);
-       v4l2_ctrl_handler_free(&csi->ctrl_handler);
-       media_device_cleanup(&csi->media_dev);
-}
-
-static int sun6i_csi_v4l2_init(struct sun6i_csi *csi)
+static int sun6i_csi_v4l2_setup(struct sun6i_csi_device *csi_dev)
 {
+       struct sun6i_csi_v4l2 *v4l2 = &csi_dev->v4l2;
+       struct media_device *media_dev = &v4l2->media_dev;
+       struct v4l2_device *v4l2_dev = &v4l2->v4l2_dev;
+       struct v4l2_async_notifier *notifier = &v4l2->notifier;
+       struct device *dev = csi_dev->dev;
        int ret;
 
-       csi->media_dev.dev = csi->dev;
-       strscpy(csi->media_dev.model, "Allwinner Video Capture Device",
-               sizeof(csi->media_dev.model));
-       csi->media_dev.hw_revision = 0;
+       /* Media Device */
+
+       strscpy(media_dev->model, SUN6I_CSI_DESCRIPTION,
+               sizeof(media_dev->model));
+       media_dev->hw_revision = 0;
+       media_dev->ops = &sun6i_csi_media_ops;
+       media_dev->dev = dev;
 
-       media_device_init(&csi->media_dev);
-       v4l2_async_nf_init(&csi->notifier);
+       media_device_init(media_dev);
 
-       ret = v4l2_ctrl_handler_init(&csi->ctrl_handler, 0);
+       ret = media_device_register(media_dev);
        if (ret) {
-               dev_err(csi->dev, "V4L2 controls handler init failed (%d)\n",
-                       ret);
-               goto clean_media;
+               dev_err(dev, "failed to register media device: %d\n", ret);
+               goto error_media;
        }
 
-       csi->v4l2_dev.mdev = &csi->media_dev;
-       csi->v4l2_dev.ctrl_handler = &csi->ctrl_handler;
-       ret = v4l2_device_register(csi->dev, &csi->v4l2_dev);
+       /* V4L2 Device */
+
+       v4l2_dev->mdev = media_dev;
+
+       ret = v4l2_device_register(dev, v4l2_dev);
        if (ret) {
-               dev_err(csi->dev, "V4L2 device registration failed (%d)\n",
-                       ret);
-               goto free_ctrl;
+               dev_err(dev, "failed to register v4l2 device: %d\n", ret);
+               goto error_media;
        }
 
-       ret = sun6i_video_init(&csi->video, csi, "sun6i-csi");
+       /* Video */
+
+       ret = sun6i_video_setup(csi_dev);
        if (ret)
-               goto unreg_v4l2;
+               goto error_v4l2_device;
 
-       ret = v4l2_async_nf_parse_fwnode_endpoints(csi->dev,
-                                                  &csi->notifier,
+       /* V4L2 Async */
+
+       v4l2_async_nf_init(notifier);
+       notifier->ops = &sun6i_csi_async_ops;
+
+       ret = v4l2_async_nf_parse_fwnode_endpoints(dev, notifier,
                                                   sizeof(struct
                                                          v4l2_async_subdev),
                                                   sun6i_csi_fwnode_parse);
        if (ret)
-               goto clean_video;
+               goto error_video;
 
-       csi->notifier.ops = &sun6i_csi_async_ops;
-
-       ret = v4l2_async_nf_register(&csi->v4l2_dev, &csi->notifier);
+       ret = v4l2_async_nf_register(v4l2_dev, notifier);
        if (ret) {
-               dev_err(csi->dev, "notifier registration failed\n");
-               goto clean_video;
+               dev_err(dev, "failed to register v4l2 async notifier: %d\n",
+                       ret);
+               goto error_v4l2_async_notifier;
        }
 
        return 0;
 
-clean_video:
-       sun6i_video_cleanup(&csi->video);
-unreg_v4l2:
-       v4l2_device_unregister(&csi->v4l2_dev);
-free_ctrl:
-       v4l2_ctrl_handler_free(&csi->ctrl_handler);
-clean_media:
-       v4l2_async_nf_cleanup(&csi->notifier);
-       media_device_cleanup(&csi->media_dev);
+error_v4l2_async_notifier:
+       v4l2_async_nf_cleanup(notifier);
+
+error_video:
+       sun6i_video_cleanup(csi_dev);
+
+error_v4l2_device:
+       v4l2_device_unregister(&v4l2->v4l2_dev);
+
+error_media:
+       media_device_unregister(media_dev);
+       media_device_cleanup(media_dev);
 
        return ret;
 }
 
-/* -----------------------------------------------------------------------------
- * Resources and IRQ
- */
-static irqreturn_t sun6i_csi_isr(int irq, void *dev_id)
+static void sun6i_csi_v4l2_cleanup(struct sun6i_csi_device *csi_dev)
 {
-       struct sun6i_csi_dev *sdev = (struct sun6i_csi_dev *)dev_id;
-       struct regmap *regmap = sdev->regmap;
+       struct sun6i_csi_v4l2 *v4l2 = &csi_dev->v4l2;
+
+       media_device_unregister(&v4l2->media_dev);
+       v4l2_async_nf_unregister(&v4l2->notifier);
+       v4l2_async_nf_cleanup(&v4l2->notifier);
+       sun6i_video_cleanup(csi_dev);
+       v4l2_device_unregister(&v4l2->v4l2_dev);
+       media_device_cleanup(&v4l2->media_dev);
+}
+
+/* Platform */
+
+static irqreturn_t sun6i_csi_interrupt(int irq, void *private)
+{
+       struct sun6i_csi_device *csi_dev = private;
+       struct regmap *regmap = csi_dev->regmap;
        u32 status;
 
        regmap_read(regmap, CSI_CH_INT_STA_REG, &status);
@@ -814,13 +789,63 @@ static irqreturn_t sun6i_csi_isr(int irq, void *dev_id)
        }
 
        if (status & CSI_CH_INT_STA_FD_PD)
-               sun6i_video_frame_done(&sdev->csi.video);
+               sun6i_video_frame_done(csi_dev);
 
        regmap_write(regmap, CSI_CH_INT_STA_REG, status);
 
        return IRQ_HANDLED;
 }
 
+static int sun6i_csi_suspend(struct device *dev)
+{
+       struct sun6i_csi_device *csi_dev = dev_get_drvdata(dev);
+
+       reset_control_assert(csi_dev->reset);
+       clk_disable_unprepare(csi_dev->clock_ram);
+       clk_disable_unprepare(csi_dev->clock_mod);
+
+       return 0;
+}
+
+static int sun6i_csi_resume(struct device *dev)
+{
+       struct sun6i_csi_device *csi_dev = dev_get_drvdata(dev);
+       int ret;
+
+       ret = reset_control_deassert(csi_dev->reset);
+       if (ret) {
+               dev_err(dev, "failed to deassert reset\n");
+               return ret;
+       }
+
+       ret = clk_prepare_enable(csi_dev->clock_mod);
+       if (ret) {
+               dev_err(dev, "failed to enable module clock\n");
+               goto error_reset;
+       }
+
+       ret = clk_prepare_enable(csi_dev->clock_ram);
+       if (ret) {
+               dev_err(dev, "failed to enable ram clock\n");
+               goto error_clock_mod;
+       }
+
+       return 0;
+
+error_clock_mod:
+       clk_disable_unprepare(csi_dev->clock_mod);
+
+error_reset:
+       reset_control_assert(csi_dev->reset);
+
+       return ret;
+}
+
+static const struct dev_pm_ops sun6i_csi_pm_ops = {
+       .runtime_suspend        = sun6i_csi_suspend,
+       .runtime_resume         = sun6i_csi_resume,
+};
+
 static const struct regmap_config sun6i_csi_regmap_config = {
        .reg_bits       = 32,
        .reg_stride     = 4,
@@ -828,106 +853,181 @@ static const struct regmap_config sun6i_csi_regmap_config = {
        .max_register   = 0x9c,
 };
 
-static int sun6i_csi_resource_request(struct sun6i_csi_dev *sdev,
-                                     struct platform_device *pdev)
+static int sun6i_csi_resources_setup(struct sun6i_csi_device *csi_dev,
+                                    struct platform_device *platform_dev)
 {
+       struct device *dev = csi_dev->dev;
+       const struct sun6i_csi_variant *variant;
        void __iomem *io_base;
        int ret;
        int irq;
 
-       io_base = devm_platform_ioremap_resource(pdev, 0);
+       variant = of_device_get_match_data(dev);
+       if (!variant)
+               return -EINVAL;
+
+       /* Registers */
+
+       io_base = devm_platform_ioremap_resource(platform_dev, 0);
        if (IS_ERR(io_base))
                return PTR_ERR(io_base);
 
-       sdev->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "bus", io_base,
-                                                &sun6i_csi_regmap_config);
-       if (IS_ERR(sdev->regmap)) {
-               dev_err(&pdev->dev, "Failed to init register map\n");
-               return PTR_ERR(sdev->regmap);
+       csi_dev->regmap = devm_regmap_init_mmio_clk(dev, "bus", io_base,
+                                                   &sun6i_csi_regmap_config);
+       if (IS_ERR(csi_dev->regmap)) {
+               dev_err(dev, "failed to init register map\n");
+               return PTR_ERR(csi_dev->regmap);
        }
 
-       sdev->clk_mod = devm_clk_get(&pdev->dev, "mod");
-       if (IS_ERR(sdev->clk_mod)) {
-               dev_err(&pdev->dev, "Unable to acquire csi clock\n");
-               return PTR_ERR(sdev->clk_mod);
+       /* Clocks */
+
+       csi_dev->clock_mod = devm_clk_get(dev, "mod");
+       if (IS_ERR(csi_dev->clock_mod)) {
+               dev_err(dev, "failed to acquire module clock\n");
+               return PTR_ERR(csi_dev->clock_mod);
        }
 
-       sdev->clk_ram = devm_clk_get(&pdev->dev, "ram");
-       if (IS_ERR(sdev->clk_ram)) {
-               dev_err(&pdev->dev, "Unable to acquire dram-csi clock\n");
-               return PTR_ERR(sdev->clk_ram);
+       csi_dev->clock_ram = devm_clk_get(dev, "ram");
+       if (IS_ERR(csi_dev->clock_ram)) {
+               dev_err(dev, "failed to acquire ram clock\n");
+               return PTR_ERR(csi_dev->clock_ram);
        }
 
-       sdev->rstc_bus = devm_reset_control_get_shared(&pdev->dev, NULL);
-       if (IS_ERR(sdev->rstc_bus)) {
-               dev_err(&pdev->dev, "Cannot get reset controller\n");
-               return PTR_ERR(sdev->rstc_bus);
+       ret = clk_set_rate_exclusive(csi_dev->clock_mod,
+                                    variant->clock_mod_rate);
+       if (ret) {
+               dev_err(dev, "failed to set mod clock rate\n");
+               return ret;
+       }
+
+       /* Reset */
+
+       csi_dev->reset = devm_reset_control_get_shared(dev, NULL);
+       if (IS_ERR(csi_dev->reset)) {
+               dev_err(dev, "failed to acquire reset\n");
+               ret = PTR_ERR(csi_dev->reset);
+               goto error_clock_rate_exclusive;
        }
 
-       irq = platform_get_irq(pdev, 0);
-       if (irq < 0)
-               return -ENXIO;
+       /* Interrupt */
 
-       ret = devm_request_irq(&pdev->dev, irq, sun6i_csi_isr, 0, MODULE_NAME,
-                              sdev);
+       irq = platform_get_irq(platform_dev, 0);
+       if (irq < 0) {
+               dev_err(dev, "failed to get interrupt\n");
+               ret = -ENXIO;
+               goto error_clock_rate_exclusive;
+       }
+
+       ret = devm_request_irq(dev, irq, sun6i_csi_interrupt, 0, SUN6I_CSI_NAME,
+                              csi_dev);
        if (ret) {
-               dev_err(&pdev->dev, "Cannot request csi IRQ\n");
-               return ret;
+               dev_err(dev, "failed to request interrupt\n");
+               goto error_clock_rate_exclusive;
        }
 
+       /* Runtime PM */
+
+       pm_runtime_enable(dev);
+
        return 0;
+
+error_clock_rate_exclusive:
+       clk_rate_exclusive_put(csi_dev->clock_mod);
+
+       return ret;
+}
+
+static void sun6i_csi_resources_cleanup(struct sun6i_csi_device *csi_dev)
+{
+       pm_runtime_disable(csi_dev->dev);
+       clk_rate_exclusive_put(csi_dev->clock_mod);
 }
 
-static int sun6i_csi_probe(struct platform_device *pdev)
+static int sun6i_csi_probe(struct platform_device *platform_dev)
 {
-       struct sun6i_csi_dev *sdev;
+       struct sun6i_csi_device *csi_dev;
+       struct device *dev = &platform_dev->dev;
        int ret;
 
-       sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev), GFP_KERNEL);
-       if (!sdev)
+       csi_dev = devm_kzalloc(dev, sizeof(*csi_dev), GFP_KERNEL);
+       if (!csi_dev)
                return -ENOMEM;
 
-       sdev->dev = &pdev->dev;
+       csi_dev->dev = &platform_dev->dev;
+       platform_set_drvdata(platform_dev, csi_dev);
 
-       ret = sun6i_csi_resource_request(sdev, pdev);
+       ret = sun6i_csi_resources_setup(csi_dev, platform_dev);
        if (ret)
                return ret;
 
-       platform_set_drvdata(pdev, sdev);
+       ret = sun6i_csi_v4l2_setup(csi_dev);
+       if (ret)
+               goto error_resources;
+
+       return 0;
 
-       sdev->csi.dev = &pdev->dev;
-       return sun6i_csi_v4l2_init(&sdev->csi);
+error_resources:
+       sun6i_csi_resources_cleanup(csi_dev);
+
+       return ret;
 }
 
 static int sun6i_csi_remove(struct platform_device *pdev)
 {
-       struct sun6i_csi_dev *sdev = platform_get_drvdata(pdev);
+       struct sun6i_csi_device *csi_dev = platform_get_drvdata(pdev);
 
-       sun6i_csi_v4l2_cleanup(&sdev->csi);
+       sun6i_csi_v4l2_cleanup(csi_dev);
+       sun6i_csi_resources_cleanup(csi_dev);
 
        return 0;
 }
 
+static const struct sun6i_csi_variant sun6i_a31_csi_variant = {
+       .clock_mod_rate = 297000000,
+};
+
+static const struct sun6i_csi_variant sun50i_a64_csi_variant = {
+       .clock_mod_rate = 300000000,
+};
+
 static const struct of_device_id sun6i_csi_of_match[] = {
-       { .compatible = "allwinner,sun6i-a31-csi", },
-       { .compatible = "allwinner,sun8i-a83t-csi", },
-       { .compatible = "allwinner,sun8i-h3-csi", },
-       { .compatible = "allwinner,sun8i-v3s-csi", },
-       { .compatible = "allwinner,sun50i-a64-csi", },
+       {
+               .compatible     = "allwinner,sun6i-a31-csi",
+               .data           = &sun6i_a31_csi_variant,
+       },
+       {
+               .compatible     = "allwinner,sun8i-a83t-csi",
+               .data           = &sun6i_a31_csi_variant,
+       },
+       {
+               .compatible     = "allwinner,sun8i-h3-csi",
+               .data           = &sun6i_a31_csi_variant,
+       },
+       {
+               .compatible     = "allwinner,sun8i-v3s-csi",
+               .data           = &sun6i_a31_csi_variant,
+       },
+       {
+               .compatible     = "allwinner,sun50i-a64-csi",
+               .data           = &sun50i_a64_csi_variant,
+       },
        {},
 };
+
 MODULE_DEVICE_TABLE(of, sun6i_csi_of_match);
 
 static struct platform_driver sun6i_csi_platform_driver = {
-       .probe = sun6i_csi_probe,
-       .remove = sun6i_csi_remove,
-       .driver = {
-               .name = MODULE_NAME,
-               .of_match_table = of_match_ptr(sun6i_csi_of_match),
+       .probe  = sun6i_csi_probe,
+       .remove = sun6i_csi_remove,
+       .driver = {
+               .name           = SUN6I_CSI_NAME,
+               .of_match_table = of_match_ptr(sun6i_csi_of_match),
+               .pm             = &sun6i_csi_pm_ops,
        },
 };
+
 module_platform_driver(sun6i_csi_platform_driver);
 
-MODULE_DESCRIPTION("Allwinner V3s Camera Sensor Interface driver");
+MODULE_DESCRIPTION("Allwinner A31 Camera Sensor Interface driver");
 MODULE_AUTHOR("Yong Deng <yong.deng@magewell.com>");
 MODULE_LICENSE("GPL");
index 3a38d10..bab7056 100644 (file)
@@ -8,13 +8,22 @@
 #ifndef __SUN6I_CSI_H__
 #define __SUN6I_CSI_H__
 
-#include <media/v4l2-ctrls.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-fwnode.h>
+#include <media/videobuf2-v4l2.h>
 
 #include "sun6i_video.h"
 
-struct sun6i_csi;
+#define SUN6I_CSI_NAME         "sun6i-csi"
+#define SUN6I_CSI_DESCRIPTION  "Allwinner A31 CSI Device"
+
+struct sun6i_csi_buffer {
+       struct vb2_v4l2_buffer          v4l2_buffer;
+       struct list_head                list;
+
+       dma_addr_t                      dma_addr;
+       bool                            queued_to_csi;
+};
 
 /**
  * struct sun6i_csi_config - configs for sun6i csi
@@ -32,59 +41,78 @@ struct sun6i_csi_config {
        u32             height;
 };
 
-struct sun6i_csi {
-       struct device                   *dev;
-       struct v4l2_ctrl_handler        ctrl_handler;
+struct sun6i_csi_v4l2 {
        struct v4l2_device              v4l2_dev;
        struct media_device             media_dev;
 
        struct v4l2_async_notifier      notifier;
-
        /* video port settings */
        struct v4l2_fwnode_endpoint     v4l2_ep;
+};
 
-       struct sun6i_csi_config         config;
+struct sun6i_csi_device {
+       struct device                   *dev;
 
+       struct sun6i_csi_config         config;
+       struct sun6i_csi_v4l2           v4l2;
        struct sun6i_video              video;
+
+       struct regmap                   *regmap;
+       struct clk                      *clock_mod;
+       struct clk                      *clock_ram;
+       struct reset_control            *reset;
+
+       int                             planar_offset[3];
+};
+
+struct sun6i_csi_variant {
+       unsigned long   clock_mod_rate;
 };
 
 /**
  * sun6i_csi_is_format_supported() - check if the format supported by csi
- * @csi:       pointer to the csi
+ * @csi_dev:   pointer to the csi device
  * @pixformat: v4l2 pixel format (V4L2_PIX_FMT_*)
  * @mbus_code: media bus format code (MEDIA_BUS_FMT_*)
+ *
+ * Return: true if format is supported, false otherwise.
  */
-bool sun6i_csi_is_format_supported(struct sun6i_csi *csi, u32 pixformat,
-                                  u32 mbus_code);
+bool sun6i_csi_is_format_supported(struct sun6i_csi_device *csi_dev,
+                                  u32 pixformat, u32 mbus_code);
 
 /**
  * sun6i_csi_set_power() - power on/off the csi
- * @csi:       pointer to the csi
+ * @csi_dev:   pointer to the csi device
  * @enable:    on/off
+ *
+ * Return: 0 if successful, error code otherwise.
  */
-int sun6i_csi_set_power(struct sun6i_csi *csi, bool enable);
+int sun6i_csi_set_power(struct sun6i_csi_device *csi_dev, bool enable);
 
 /**
  * sun6i_csi_update_config() - update the csi register settings
- * @csi:       pointer to the csi
+ * @csi_dev:   pointer to the csi device
  * @config:    see struct sun6i_csi_config
+ *
+ * Return: 0 if successful, error code otherwise.
  */
-int sun6i_csi_update_config(struct sun6i_csi *csi,
+int sun6i_csi_update_config(struct sun6i_csi_device *csi_dev,
                            struct sun6i_csi_config *config);
 
 /**
  * sun6i_csi_update_buf_addr() - update the csi frame buffer address
- * @csi:       pointer to the csi
+ * @csi_dev:   pointer to the csi device
  * @addr:      frame buffer's physical address
  */
-void sun6i_csi_update_buf_addr(struct sun6i_csi *csi, dma_addr_t addr);
+void sun6i_csi_update_buf_addr(struct sun6i_csi_device *csi_dev,
+                              dma_addr_t addr);
 
 /**
  * sun6i_csi_set_stream() - start/stop csi streaming
- * @csi:       pointer to the csi
+ * @csi_dev:   pointer to the csi device
  * @enable:    start/stop
  */
-void sun6i_csi_set_stream(struct sun6i_csi *csi, bool enable);
+void sun6i_csi_set_stream(struct sun6i_csi_device *csi_dev, bool enable);
 
 /* get bpp form v4l2 pixformat */
 static inline int sun6i_csi_get_bpp(unsigned int pixformat)
index 74d64a2..791583d 100644 (file)
 #define MAX_WIDTH      (4800)
 #define MAX_HEIGHT     (4800)
 
-struct sun6i_csi_buffer {
-       struct vb2_v4l2_buffer          vb;
-       struct list_head                list;
+/* Helpers */
 
-       dma_addr_t                      dma_addr;
-       bool                            queued_to_csi;
-};
+static struct v4l2_subdev *
+sun6i_video_remote_subdev(struct sun6i_video *video, u32 *pad)
+{
+       struct media_pad *remote;
+
+       remote = media_pad_remote_pad_first(&video->pad);
+
+       if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
+               return NULL;
+
+       if (pad)
+               *pad = remote->index;
 
-static const u32 supported_pixformats[] = {
+       return media_entity_to_v4l2_subdev(remote->entity);
+}
+
+/* Format */
+
+static const u32 sun6i_video_formats[] = {
        V4L2_PIX_FMT_SBGGR8,
        V4L2_PIX_FMT_SGBRG8,
        V4L2_PIX_FMT_SGRBG8,
@@ -61,119 +73,138 @@ static const u32 supported_pixformats[] = {
        V4L2_PIX_FMT_JPEG,
 };
 
-static bool is_pixformat_valid(unsigned int pixformat)
+static bool sun6i_video_format_check(u32 format)
 {
        unsigned int i;
 
-       for (i = 0; i < ARRAY_SIZE(supported_pixformats); i++)
-               if (supported_pixformats[i] == pixformat)
+       for (i = 0; i < ARRAY_SIZE(sun6i_video_formats); i++)
+               if (sun6i_video_formats[i] == format)
                        return true;
 
        return false;
 }
 
-static struct v4l2_subdev *
-sun6i_video_remote_subdev(struct sun6i_video *video, u32 *pad)
-{
-       struct media_pad *remote;
+/* Video */
 
-       remote = media_pad_remote_pad_first(&video->pad);
+static void sun6i_video_buffer_configure(struct sun6i_csi_device *csi_dev,
+                                        struct sun6i_csi_buffer *csi_buffer)
+{
+       csi_buffer->queued_to_csi = true;
+       sun6i_csi_update_buf_addr(csi_dev, csi_buffer->dma_addr);
+}
 
-       if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
-               return NULL;
+static void sun6i_video_configure(struct sun6i_csi_device *csi_dev)
+{
+       struct sun6i_video *video = &csi_dev->video;
+       struct sun6i_csi_config config = { 0 };
 
-       if (pad)
-               *pad = remote->index;
+       config.pixelformat = video->format.fmt.pix.pixelformat;
+       config.code = video->mbus_code;
+       config.field = video->format.fmt.pix.field;
+       config.width = video->format.fmt.pix.width;
+       config.height = video->format.fmt.pix.height;
 
-       return media_entity_to_v4l2_subdev(remote->entity);
+       sun6i_csi_update_config(csi_dev, &config);
 }
 
-static int sun6i_video_queue_setup(struct vb2_queue *vq,
-                                  unsigned int *nbuffers,
-                                  unsigned int *nplanes,
+/* Queue */
+
+static int sun6i_video_queue_setup(struct vb2_queue *queue,
+                                  unsigned int *buffers_count,
+                                  unsigned int *planes_count,
                                   unsigned int sizes[],
                                   struct device *alloc_devs[])
 {
-       struct sun6i_video *video = vb2_get_drv_priv(vq);
-       unsigned int size = video->fmt.fmt.pix.sizeimage;
+       struct sun6i_csi_device *csi_dev = vb2_get_drv_priv(queue);
+       struct sun6i_video *video = &csi_dev->video;
+       unsigned int size = video->format.fmt.pix.sizeimage;
 
-       if (*nplanes)
+       if (*planes_count)
                return sizes[0] < size ? -EINVAL : 0;
 
-       *nplanes = 1;
+       *planes_count = 1;
        sizes[0] = size;
 
        return 0;
 }
 
-static int sun6i_video_buffer_prepare(struct vb2_buffer *vb)
+static int sun6i_video_buffer_prepare(struct vb2_buffer *buffer)
 {
-       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
-       struct sun6i_csi_buffer *buf =
-                       container_of(vbuf, struct sun6i_csi_buffer, vb);
-       struct sun6i_video *video = vb2_get_drv_priv(vb->vb2_queue);
-       unsigned long size = video->fmt.fmt.pix.sizeimage;
-
-       if (vb2_plane_size(vb, 0) < size) {
-               v4l2_err(video->vdev.v4l2_dev, "buffer too small (%lu < %lu)\n",
-                        vb2_plane_size(vb, 0), size);
+       struct sun6i_csi_device *csi_dev = vb2_get_drv_priv(buffer->vb2_queue);
+       struct sun6i_video *video = &csi_dev->video;
+       struct v4l2_device *v4l2_dev = &csi_dev->v4l2.v4l2_dev;
+       struct vb2_v4l2_buffer *v4l2_buffer = to_vb2_v4l2_buffer(buffer);
+       struct sun6i_csi_buffer *csi_buffer =
+               container_of(v4l2_buffer, struct sun6i_csi_buffer, v4l2_buffer);
+       unsigned long size = video->format.fmt.pix.sizeimage;
+
+       if (vb2_plane_size(buffer, 0) < size) {
+               v4l2_err(v4l2_dev, "buffer too small (%lu < %lu)\n",
+                        vb2_plane_size(buffer, 0), size);
                return -EINVAL;
        }
 
-       vb2_set_plane_payload(vb, 0, size);
-
-       buf->dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+       vb2_set_plane_payload(buffer, 0, size);
 
-       vbuf->field = video->fmt.fmt.pix.field;
+       csi_buffer->dma_addr = vb2_dma_contig_plane_dma_addr(buffer, 0);
+       v4l2_buffer->field = video->format.fmt.pix.field;
 
        return 0;
 }
 
-static int sun6i_video_start_streaming(struct vb2_queue *vq, unsigned int count)
+static void sun6i_video_buffer_queue(struct vb2_buffer *buffer)
+{
+       struct sun6i_csi_device *csi_dev = vb2_get_drv_priv(buffer->vb2_queue);
+       struct sun6i_video *video = &csi_dev->video;
+       struct vb2_v4l2_buffer *v4l2_buffer = to_vb2_v4l2_buffer(buffer);
+       struct sun6i_csi_buffer *csi_buffer =
+               container_of(v4l2_buffer, struct sun6i_csi_buffer, v4l2_buffer);
+       unsigned long flags;
+
+       spin_lock_irqsave(&video->dma_queue_lock, flags);
+       csi_buffer->queued_to_csi = false;
+       list_add_tail(&csi_buffer->list, &video->dma_queue);
+       spin_unlock_irqrestore(&video->dma_queue_lock, flags);
+}
+
+static int sun6i_video_start_streaming(struct vb2_queue *queue,
+                                      unsigned int count)
 {
-       struct sun6i_video *video = vb2_get_drv_priv(vq);
+       struct sun6i_csi_device *csi_dev = vb2_get_drv_priv(queue);
+       struct sun6i_video *video = &csi_dev->video;
+       struct video_device *video_dev = &video->video_dev;
        struct sun6i_csi_buffer *buf;
        struct sun6i_csi_buffer *next_buf;
-       struct sun6i_csi_config config;
        struct v4l2_subdev *subdev;
        unsigned long flags;
        int ret;
 
        video->sequence = 0;
 
-       ret = media_pipeline_start(&video->vdev.entity, &video->vdev.pipe);
+       ret = video_device_pipeline_alloc_start(video_dev);
        if (ret < 0)
-               goto clear_dma_queue;
+               goto error_dma_queue_flush;
 
        if (video->mbus_code == 0) {
                ret = -EINVAL;
-               goto stop_media_pipeline;
+               goto error_media_pipeline;
        }
 
        subdev = sun6i_video_remote_subdev(video, NULL);
        if (!subdev) {
                ret = -EINVAL;
-               goto stop_media_pipeline;
+               goto error_media_pipeline;
        }
 
-       config.pixelformat = video->fmt.fmt.pix.pixelformat;
-       config.code = video->mbus_code;
-       config.field = video->fmt.fmt.pix.field;
-       config.width = video->fmt.fmt.pix.width;
-       config.height = video->fmt.fmt.pix.height;
-
-       ret = sun6i_csi_update_config(video->csi, &config);
-       if (ret < 0)
-               goto stop_media_pipeline;
+       sun6i_video_configure(csi_dev);
 
        spin_lock_irqsave(&video->dma_queue_lock, flags);
 
        buf = list_first_entry(&video->dma_queue,
                               struct sun6i_csi_buffer, list);
-       buf->queued_to_csi = true;
-       sun6i_csi_update_buf_addr(video->csi, buf->dma_addr);
+       sun6i_video_buffer_configure(csi_dev, buf);
 
-       sun6i_csi_set_stream(video->csi, true);
+       sun6i_csi_set_stream(csi_dev, true);
 
        /*
         * CSI will lookup the next dma buffer for next frame before the
@@ -193,34 +224,37 @@ static int sun6i_video_start_streaming(struct vb2_queue *vq, unsigned int count)
         * would also drop frame when lacking of queued buffer.
         */
        next_buf = list_next_entry(buf, list);
-       next_buf->queued_to_csi = true;
-       sun6i_csi_update_buf_addr(video->csi, next_buf->dma_addr);
+       sun6i_video_buffer_configure(csi_dev, next_buf);
 
        spin_unlock_irqrestore(&video->dma_queue_lock, flags);
 
        ret = v4l2_subdev_call(subdev, video, s_stream, 1);
        if (ret && ret != -ENOIOCTLCMD)
-               goto stop_csi_stream;
+               goto error_stream;
 
        return 0;
 
-stop_csi_stream:
-       sun6i_csi_set_stream(video->csi, false);
-stop_media_pipeline:
-       media_pipeline_stop(&video->vdev.entity);
-clear_dma_queue:
+error_stream:
+       sun6i_csi_set_stream(csi_dev, false);
+
+error_media_pipeline:
+       video_device_pipeline_stop(video_dev);
+
+error_dma_queue_flush:
        spin_lock_irqsave(&video->dma_queue_lock, flags);
        list_for_each_entry(buf, &video->dma_queue, list)
-               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+               vb2_buffer_done(&buf->v4l2_buffer.vb2_buf,
+                               VB2_BUF_STATE_QUEUED);
        INIT_LIST_HEAD(&video->dma_queue);
        spin_unlock_irqrestore(&video->dma_queue_lock, flags);
 
        return ret;
 }
 
-static void sun6i_video_stop_streaming(struct vb2_queue *vq)
+static void sun6i_video_stop_streaming(struct vb2_queue *queue)
 {
-       struct sun6i_video *video = vb2_get_drv_priv(vq);
+       struct sun6i_csi_device *csi_dev = vb2_get_drv_priv(queue);
+       struct sun6i_video *video = &csi_dev->video;
        struct v4l2_subdev *subdev;
        unsigned long flags;
        struct sun6i_csi_buffer *buf;
@@ -229,45 +263,32 @@ static void sun6i_video_stop_streaming(struct vb2_queue *vq)
        if (subdev)
                v4l2_subdev_call(subdev, video, s_stream, 0);
 
-       sun6i_csi_set_stream(video->csi, false);
+       sun6i_csi_set_stream(csi_dev, false);
 
-       media_pipeline_stop(&video->vdev.entity);
+       video_device_pipeline_stop(&video->video_dev);
 
        /* Release all active buffers */
        spin_lock_irqsave(&video->dma_queue_lock, flags);
        list_for_each_entry(buf, &video->dma_queue, list)
-               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&buf->v4l2_buffer.vb2_buf, VB2_BUF_STATE_ERROR);
        INIT_LIST_HEAD(&video->dma_queue);
        spin_unlock_irqrestore(&video->dma_queue_lock, flags);
 }
 
-static void sun6i_video_buffer_queue(struct vb2_buffer *vb)
-{
-       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
-       struct sun6i_csi_buffer *buf =
-                       container_of(vbuf, struct sun6i_csi_buffer, vb);
-       struct sun6i_video *video = vb2_get_drv_priv(vb->vb2_queue);
-       unsigned long flags;
-
-       spin_lock_irqsave(&video->dma_queue_lock, flags);
-       buf->queued_to_csi = false;
-       list_add_tail(&buf->list, &video->dma_queue);
-       spin_unlock_irqrestore(&video->dma_queue_lock, flags);
-}
-
-void sun6i_video_frame_done(struct sun6i_video *video)
+void sun6i_video_frame_done(struct sun6i_csi_device *csi_dev)
 {
+       struct sun6i_video *video = &csi_dev->video;
        struct sun6i_csi_buffer *buf;
        struct sun6i_csi_buffer *next_buf;
-       struct vb2_v4l2_buffer *vbuf;
+       struct vb2_v4l2_buffer *v4l2_buffer;
 
        spin_lock(&video->dma_queue_lock);
 
        buf = list_first_entry(&video->dma_queue,
                               struct sun6i_csi_buffer, list);
        if (list_is_last(&buf->list, &video->dma_queue)) {
-               dev_dbg(video->csi->dev, "Frame dropped!\n");
-               goto unlock;
+               dev_dbg(csi_dev->dev, "Frame dropped!\n");
+               goto complete;
        }
 
        next_buf = list_next_entry(buf, list);
@@ -277,200 +298,204 @@ void sun6i_video_frame_done(struct sun6i_video *video)
         * for next ISR call.
         */
        if (!next_buf->queued_to_csi) {
-               next_buf->queued_to_csi = true;
-               sun6i_csi_update_buf_addr(video->csi, next_buf->dma_addr);
-               dev_dbg(video->csi->dev, "Frame dropped!\n");
-               goto unlock;
+               sun6i_video_buffer_configure(csi_dev, next_buf);
+               dev_dbg(csi_dev->dev, "Frame dropped!\n");
+               goto complete;
        }
 
        list_del(&buf->list);
-       vbuf = &buf->vb;
-       vbuf->vb2_buf.timestamp = ktime_get_ns();
-       vbuf->sequence = video->sequence;
-       vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
+       v4l2_buffer = &buf->v4l2_buffer;
+       v4l2_buffer->vb2_buf.timestamp = ktime_get_ns();
+       v4l2_buffer->sequence = video->sequence;
+       vb2_buffer_done(&v4l2_buffer->vb2_buf, VB2_BUF_STATE_DONE);
 
        /* Prepare buffer for next frame but one.  */
        if (!list_is_last(&next_buf->list, &video->dma_queue)) {
                next_buf = list_next_entry(next_buf, list);
-               next_buf->queued_to_csi = true;
-               sun6i_csi_update_buf_addr(video->csi, next_buf->dma_addr);
+               sun6i_video_buffer_configure(csi_dev, next_buf);
        } else {
-               dev_dbg(video->csi->dev, "Next frame will be dropped!\n");
+               dev_dbg(csi_dev->dev, "Next frame will be dropped!\n");
        }
 
-unlock:
+complete:
        video->sequence++;
        spin_unlock(&video->dma_queue_lock);
 }
 
-static const struct vb2_ops sun6i_csi_vb2_ops = {
+static const struct vb2_ops sun6i_video_queue_ops = {
        .queue_setup            = sun6i_video_queue_setup,
-       .wait_prepare           = vb2_ops_wait_prepare,
-       .wait_finish            = vb2_ops_wait_finish,
        .buf_prepare            = sun6i_video_buffer_prepare,
+       .buf_queue              = sun6i_video_buffer_queue,
        .start_streaming        = sun6i_video_start_streaming,
        .stop_streaming         = sun6i_video_stop_streaming,
-       .buf_queue              = sun6i_video_buffer_queue,
+       .wait_prepare           = vb2_ops_wait_prepare,
+       .wait_finish            = vb2_ops_wait_finish,
 };
 
-static int vidioc_querycap(struct file *file, void *priv,
-                          struct v4l2_capability *cap)
+/* V4L2 Device */
+
+static int sun6i_video_querycap(struct file *file, void *private,
+                               struct v4l2_capability *capability)
 {
-       struct sun6i_video *video = video_drvdata(file);
+       struct sun6i_csi_device *csi_dev = video_drvdata(file);
+       struct video_device *video_dev = &csi_dev->video.video_dev;
 
-       strscpy(cap->driver, "sun6i-video", sizeof(cap->driver));
-       strscpy(cap->card, video->vdev.name, sizeof(cap->card));
-       snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
-                video->csi->dev->of_node->name);
+       strscpy(capability->driver, SUN6I_CSI_NAME, sizeof(capability->driver));
+       strscpy(capability->card, video_dev->name, sizeof(capability->card));
+       snprintf(capability->bus_info, sizeof(capability->bus_info),
+                "platform:%s", dev_name(csi_dev->dev));
 
        return 0;
 }
 
-static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
-                                  struct v4l2_fmtdesc *f)
+static int sun6i_video_enum_fmt(struct file *file, void *private,
+                               struct v4l2_fmtdesc *fmtdesc)
 {
-       u32 index = f->index;
+       u32 index = fmtdesc->index;
 
-       if (index >= ARRAY_SIZE(supported_pixformats))
+       if (index >= ARRAY_SIZE(sun6i_video_formats))
                return -EINVAL;
 
-       f->pixelformat = supported_pixformats[index];
+       fmtdesc->pixelformat = sun6i_video_formats[index];
 
        return 0;
 }
 
-static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
-                               struct v4l2_format *fmt)
+static int sun6i_video_g_fmt(struct file *file, void *private,
+                            struct v4l2_format *format)
 {
-       struct sun6i_video *video = video_drvdata(file);
+       struct sun6i_csi_device *csi_dev = video_drvdata(file);
+       struct sun6i_video *video = &csi_dev->video;
 
-       *fmt = video->fmt;
+       *format = video->format;
 
        return 0;
 }
 
-static int sun6i_video_try_fmt(struct sun6i_video *video,
-                              struct v4l2_format *f)
+static int sun6i_video_format_try(struct sun6i_video *video,
+                                 struct v4l2_format *format)
 {
-       struct v4l2_pix_format *pixfmt = &f->fmt.pix;
+       struct v4l2_pix_format *pix_format = &format->fmt.pix;
        int bpp;
 
-       if (!is_pixformat_valid(pixfmt->pixelformat))
-               pixfmt->pixelformat = supported_pixformats[0];
+       if (!sun6i_video_format_check(pix_format->pixelformat))
+               pix_format->pixelformat = sun6i_video_formats[0];
 
-       v4l_bound_align_image(&pixfmt->width, MIN_WIDTH, MAX_WIDTH, 1,
-                             &pixfmt->height, MIN_HEIGHT, MAX_WIDTH, 1, 1);
+       v4l_bound_align_image(&pix_format->width, MIN_WIDTH, MAX_WIDTH, 1,
+                             &pix_format->height, MIN_HEIGHT, MAX_WIDTH, 1, 1);
 
-       bpp = sun6i_csi_get_bpp(pixfmt->pixelformat);
-       pixfmt->bytesperline = (pixfmt->width * bpp) >> 3;
-       pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
+       bpp = sun6i_csi_get_bpp(pix_format->pixelformat);
+       pix_format->bytesperline = (pix_format->width * bpp) >> 3;
+       pix_format->sizeimage = pix_format->bytesperline * pix_format->height;
 
-       if (pixfmt->field == V4L2_FIELD_ANY)
-               pixfmt->field = V4L2_FIELD_NONE;
+       if (pix_format->field == V4L2_FIELD_ANY)
+               pix_format->field = V4L2_FIELD_NONE;
 
-       if (pixfmt->pixelformat == V4L2_PIX_FMT_JPEG)
-               pixfmt->colorspace = V4L2_COLORSPACE_JPEG;
+       if (pix_format->pixelformat == V4L2_PIX_FMT_JPEG)
+               pix_format->colorspace = V4L2_COLORSPACE_JPEG;
        else
-               pixfmt->colorspace = V4L2_COLORSPACE_SRGB;
+               pix_format->colorspace = V4L2_COLORSPACE_SRGB;
 
-       pixfmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
-       pixfmt->quantization = V4L2_QUANTIZATION_DEFAULT;
-       pixfmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+       pix_format->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+       pix_format->quantization = V4L2_QUANTIZATION_DEFAULT;
+       pix_format->xfer_func = V4L2_XFER_FUNC_DEFAULT;
 
        return 0;
 }
 
-static int sun6i_video_set_fmt(struct sun6i_video *video, struct v4l2_format *f)
+static int sun6i_video_format_set(struct sun6i_video *video,
+                                 struct v4l2_format *format)
 {
        int ret;
 
-       ret = sun6i_video_try_fmt(video, f);
+       ret = sun6i_video_format_try(video, format);
        if (ret)
                return ret;
 
-       video->fmt = *f;
+       video->format = *format;
 
        return 0;
 }
 
-static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
-                               struct v4l2_format *f)
+static int sun6i_video_s_fmt(struct file *file, void *private,
+                            struct v4l2_format *format)
 {
-       struct sun6i_video *video = video_drvdata(file);
+       struct sun6i_csi_device *csi_dev = video_drvdata(file);
+       struct sun6i_video *video = &csi_dev->video;
 
-       if (vb2_is_busy(&video->vb2_vidq))
+       if (vb2_is_busy(&video->queue))
                return -EBUSY;
 
-       return sun6i_video_set_fmt(video, f);
+       return sun6i_video_format_set(video, format);
 }
 
-static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
-                                 struct v4l2_format *f)
+static int sun6i_video_try_fmt(struct file *file, void *private,
+                              struct v4l2_format *format)
 {
-       struct sun6i_video *video = video_drvdata(file);
+       struct sun6i_csi_device *csi_dev = video_drvdata(file);
+       struct sun6i_video *video = &csi_dev->video;
 
-       return sun6i_video_try_fmt(video, f);
+       return sun6i_video_format_try(video, format);
 }
 
-static int vidioc_enum_input(struct file *file, void *fh,
-                            struct v4l2_input *inp)
+static int sun6i_video_enum_input(struct file *file, void *private,
+                                 struct v4l2_input *input)
 {
-       if (inp->index != 0)
+       if (input->index != 0)
                return -EINVAL;
 
-       strscpy(inp->name, "camera", sizeof(inp->name));
-       inp->type = V4L2_INPUT_TYPE_CAMERA;
+       input->type = V4L2_INPUT_TYPE_CAMERA;
+       strscpy(input->name, "Camera", sizeof(input->name));
 
        return 0;
 }
 
-static int vidioc_g_input(struct file *file, void *fh, unsigned int *i)
+static int sun6i_video_g_input(struct file *file, void *private,
+                              unsigned int *index)
 {
-       *i = 0;
+       *index = 0;
 
        return 0;
 }
 
-static int vidioc_s_input(struct file *file, void *fh, unsigned int i)
+static int sun6i_video_s_input(struct file *file, void *private,
+                              unsigned int index)
 {
-       if (i != 0)
+       if (index != 0)
                return -EINVAL;
 
        return 0;
 }
 
 static const struct v4l2_ioctl_ops sun6i_video_ioctl_ops = {
-       .vidioc_querycap                = vidioc_querycap,
-       .vidioc_enum_fmt_vid_cap        = vidioc_enum_fmt_vid_cap,
-       .vidioc_g_fmt_vid_cap           = vidioc_g_fmt_vid_cap,
-       .vidioc_s_fmt_vid_cap           = vidioc_s_fmt_vid_cap,
-       .vidioc_try_fmt_vid_cap         = vidioc_try_fmt_vid_cap,
+       .vidioc_querycap                = sun6i_video_querycap,
+
+       .vidioc_enum_fmt_vid_cap        = sun6i_video_enum_fmt,
+       .vidioc_g_fmt_vid_cap           = sun6i_video_g_fmt,
+       .vidioc_s_fmt_vid_cap           = sun6i_video_s_fmt,
+       .vidioc_try_fmt_vid_cap         = sun6i_video_try_fmt,
 
-       .vidioc_enum_input              = vidioc_enum_input,
-       .vidioc_s_input                 = vidioc_s_input,
-       .vidioc_g_input                 = vidioc_g_input,
+       .vidioc_enum_input              = sun6i_video_enum_input,
+       .vidioc_g_input                 = sun6i_video_g_input,
+       .vidioc_s_input                 = sun6i_video_s_input,
 
+       .vidioc_create_bufs             = vb2_ioctl_create_bufs,
+       .vidioc_prepare_buf             = vb2_ioctl_prepare_buf,
        .vidioc_reqbufs                 = vb2_ioctl_reqbufs,
        .vidioc_querybuf                = vb2_ioctl_querybuf,
-       .vidioc_qbuf                    = vb2_ioctl_qbuf,
        .vidioc_expbuf                  = vb2_ioctl_expbuf,
+       .vidioc_qbuf                    = vb2_ioctl_qbuf,
        .vidioc_dqbuf                   = vb2_ioctl_dqbuf,
-       .vidioc_create_bufs             = vb2_ioctl_create_bufs,
-       .vidioc_prepare_buf             = vb2_ioctl_prepare_buf,
        .vidioc_streamon                = vb2_ioctl_streamon,
        .vidioc_streamoff               = vb2_ioctl_streamoff,
-
-       .vidioc_log_status              = v4l2_ctrl_log_status,
-       .vidioc_subscribe_event         = v4l2_ctrl_subscribe_event,
-       .vidioc_unsubscribe_event       = v4l2_event_unsubscribe,
 };
 
-/* -----------------------------------------------------------------------------
- * V4L2 file operations
- */
+/* V4L2 File */
+
 static int sun6i_video_open(struct file *file)
 {
-       struct sun6i_video *video = video_drvdata(file);
+       struct sun6i_csi_device *csi_dev = video_drvdata(file);
+       struct sun6i_video *video = &csi_dev->video;
        int ret = 0;
 
        if (mutex_lock_interruptible(&video->lock))
@@ -478,45 +503,48 @@ static int sun6i_video_open(struct file *file)
 
        ret = v4l2_fh_open(file);
        if (ret < 0)
-               goto unlock;
+               goto error_lock;
 
-       ret = v4l2_pipeline_pm_get(&video->vdev.entity);
+       ret = v4l2_pipeline_pm_get(&video->video_dev.entity);
        if (ret < 0)
-               goto fh_release;
-
-       /* check if already powered */
-       if (!v4l2_fh_is_singular_file(file))
-               goto unlock;
+               goto error_v4l2_fh;
 
-       ret = sun6i_csi_set_power(video->csi, true);
-       if (ret < 0)
-               goto fh_release;
+       /* Power on at first open. */
+       if (v4l2_fh_is_singular_file(file)) {
+               ret = sun6i_csi_set_power(csi_dev, true);
+               if (ret < 0)
+                       goto error_v4l2_fh;
+       }
 
        mutex_unlock(&video->lock);
+
        return 0;
 
-fh_release:
+error_v4l2_fh:
        v4l2_fh_release(file);
-unlock:
+
+error_lock:
        mutex_unlock(&video->lock);
+
        return ret;
 }
 
 static int sun6i_video_close(struct file *file)
 {
-       struct sun6i_video *video = video_drvdata(file);
-       bool last_fh;
+       struct sun6i_csi_device *csi_dev = video_drvdata(file);
+       struct sun6i_video *video = &csi_dev->video;
+       bool last_close;
 
        mutex_lock(&video->lock);
 
-       last_fh = v4l2_fh_is_singular_file(file);
+       last_close = v4l2_fh_is_singular_file(file);
 
        _vb2_fop_release(file, NULL);
+       v4l2_pipeline_pm_put(&video->video_dev.entity);
 
-       v4l2_pipeline_pm_put(&video->vdev.entity);
-
-       if (last_fh)
-               sun6i_csi_set_power(video->csi, false);
+       /* Power off at last close. */
+       if (last_close)
+               sun6i_csi_set_power(csi_dev, false);
 
        mutex_unlock(&video->lock);
 
@@ -532,9 +560,8 @@ static const struct v4l2_file_operations sun6i_video_fops = {
        .poll           = vb2_fop_poll
 };
 
-/* -----------------------------------------------------------------------------
- * Media Operations
- */
+/* Media Entity */
+
 static int sun6i_video_link_validate_get_format(struct media_pad *pad,
                                                struct v4l2_subdev_format *fmt)
 {
@@ -554,15 +581,16 @@ static int sun6i_video_link_validate(struct media_link *link)
 {
        struct video_device *vdev = container_of(link->sink->entity,
                                                 struct video_device, entity);
-       struct sun6i_video *video = video_get_drvdata(vdev);
+       struct sun6i_csi_device *csi_dev = video_get_drvdata(vdev);
+       struct sun6i_video *video = &csi_dev->video;
        struct v4l2_subdev_format source_fmt;
        int ret;
 
        video->mbus_code = 0;
 
        if (!media_pad_remote_pad_first(link->sink->entity->pads)) {
-               dev_info(video->csi->dev,
-                        "video node %s pad not connected\n", vdev->name);
+               dev_info(csi_dev->dev, "video node %s pad not connected\n",
+                        vdev->name);
                return -ENOLINK;
        }
 
@@ -570,21 +598,21 @@ static int sun6i_video_link_validate(struct media_link *link)
        if (ret < 0)
                return ret;
 
-       if (!sun6i_csi_is_format_supported(video->csi,
-                                          video->fmt.fmt.pix.pixelformat,
+       if (!sun6i_csi_is_format_supported(csi_dev,
+                                          video->format.fmt.pix.pixelformat,
                                           source_fmt.format.code)) {
-               dev_err(video->csi->dev,
+               dev_err(csi_dev->dev,
                        "Unsupported pixformat: 0x%x with mbus code: 0x%x!\n",
-                       video->fmt.fmt.pix.pixelformat,
+                       video->format.fmt.pix.pixelformat,
                        source_fmt.format.code);
                return -EPIPE;
        }
 
-       if (source_fmt.format.width != video->fmt.fmt.pix.width ||
-           source_fmt.format.height != video->fmt.fmt.pix.height) {
-               dev_err(video->csi->dev,
+       if (source_fmt.format.width != video->format.fmt.pix.width ||
+           source_fmt.format.height != video->format.fmt.pix.height) {
+               dev_err(csi_dev->dev,
                        "Wrong width or height %ux%u (%ux%u expected)\n",
-                       video->fmt.fmt.pix.width, video->fmt.fmt.pix.height,
+                       video->format.fmt.pix.width, video->format.fmt.pix.height,
                        source_fmt.format.width, source_fmt.format.height);
                return -EPIPE;
        }
@@ -598,88 +626,108 @@ static const struct media_entity_operations sun6i_video_media_ops = {
        .link_validate = sun6i_video_link_validate
 };
 
-int sun6i_video_init(struct sun6i_video *video, struct sun6i_csi *csi,
-                    const char *name)
+/* Video */
+
+int sun6i_video_setup(struct sun6i_csi_device *csi_dev)
 {
-       struct video_device *vdev = &video->vdev;
-       struct vb2_queue *vidq = &video->vb2_vidq;
-       struct v4l2_format fmt = { 0 };
+       struct sun6i_video *video = &csi_dev->video;
+       struct v4l2_device *v4l2_dev = &csi_dev->v4l2.v4l2_dev;
+       struct video_device *video_dev = &video->video_dev;
+       struct vb2_queue *queue = &video->queue;
+       struct media_pad *pad = &video->pad;
+       struct v4l2_format format = { 0 };
+       struct v4l2_pix_format *pix_format = &format.fmt.pix;
        int ret;
 
-       video->csi = csi;
+       /* Media Entity */
 
-       /* Initialize the media entity... */
-       video->pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
-       vdev->entity.ops = &sun6i_video_media_ops;
-       ret = media_entity_pads_init(&vdev->entity, 1, &video->pad);
+       video_dev->entity.ops = &sun6i_video_media_ops;
+
+       /* Media Pad */
+
+       pad->flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
+
+       ret = media_entity_pads_init(&video_dev->entity, 1, pad);
        if (ret < 0)
                return ret;
 
-       mutex_init(&video->lock);
+       /* DMA queue */
 
        INIT_LIST_HEAD(&video->dma_queue);
        spin_lock_init(&video->dma_queue_lock);
 
        video->sequence = 0;
 
-       /* Setup default format */
-       fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-       fmt.fmt.pix.pixelformat = supported_pixformats[0];
-       fmt.fmt.pix.width = 1280;
-       fmt.fmt.pix.height = 720;
-       fmt.fmt.pix.field = V4L2_FIELD_NONE;
-       sun6i_video_set_fmt(video, &fmt);
-
-       /* Initialize videobuf2 queue */
-       vidq->type                      = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-       vidq->io_modes                  = VB2_MMAP | VB2_DMABUF;
-       vidq->drv_priv                  = video;
-       vidq->buf_struct_size           = sizeof(struct sun6i_csi_buffer);
-       vidq->ops                       = &sun6i_csi_vb2_ops;
-       vidq->mem_ops                   = &vb2_dma_contig_memops;
-       vidq->timestamp_flags           = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
-       vidq->lock                      = &video->lock;
-       /* Make sure non-dropped frame */
-       vidq->min_buffers_needed        = 3;
-       vidq->dev                       = csi->dev;
-
-       ret = vb2_queue_init(vidq);
+       /* Queue */
+
+       mutex_init(&video->lock);
+
+       queue->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+       queue->io_modes = VB2_MMAP | VB2_DMABUF;
+       queue->buf_struct_size = sizeof(struct sun6i_csi_buffer);
+       queue->ops = &sun6i_video_queue_ops;
+       queue->mem_ops = &vb2_dma_contig_memops;
+       queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+       queue->lock = &video->lock;
+       queue->dev = csi_dev->dev;
+       queue->drv_priv = csi_dev;
+
+       /* Make sure non-dropped frame. */
+       queue->min_buffers_needed = 3;
+
+       ret = vb2_queue_init(queue);
        if (ret) {
-               v4l2_err(&csi->v4l2_dev, "vb2_queue_init failed: %d\n", ret);
-               goto clean_entity;
+               v4l2_err(v4l2_dev, "failed to initialize vb2 queue: %d\n", ret);
+               goto error_media_entity;
        }
 
-       /* Register video device */
-       strscpy(vdev->name, name, sizeof(vdev->name));
-       vdev->release           = video_device_release_empty;
-       vdev->fops              = &sun6i_video_fops;
-       vdev->ioctl_ops         = &sun6i_video_ioctl_ops;
-       vdev->vfl_type          = VFL_TYPE_VIDEO;
-       vdev->vfl_dir           = VFL_DIR_RX;
-       vdev->v4l2_dev          = &csi->v4l2_dev;
-       vdev->queue             = vidq;
-       vdev->lock              = &video->lock;
-       vdev->device_caps       = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
-       video_set_drvdata(vdev, video);
-
-       ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+       /* V4L2 Format */
+
+       format.type = queue->type;
+       pix_format->pixelformat = sun6i_video_formats[0];
+       pix_format->width = 1280;
+       pix_format->height = 720;
+       pix_format->field = V4L2_FIELD_NONE;
+
+       sun6i_video_format_set(video, &format);
+
+       /* Video Device */
+
+       strscpy(video_dev->name, SUN6I_CSI_NAME, sizeof(video_dev->name));
+       video_dev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       video_dev->vfl_dir = VFL_DIR_RX;
+       video_dev->release = video_device_release_empty;
+       video_dev->fops = &sun6i_video_fops;
+       video_dev->ioctl_ops = &sun6i_video_ioctl_ops;
+       video_dev->v4l2_dev = v4l2_dev;
+       video_dev->queue = queue;
+       video_dev->lock = &video->lock;
+
+       video_set_drvdata(video_dev, csi_dev);
+
+       ret = video_register_device(video_dev, VFL_TYPE_VIDEO, -1);
        if (ret < 0) {
-               v4l2_err(&csi->v4l2_dev,
-                        "video_register_device failed: %d\n", ret);
-               goto clean_entity;
+               v4l2_err(v4l2_dev, "failed to register video device: %d\n",
+                        ret);
+               goto error_media_entity;
        }
 
        return 0;
 
-clean_entity:
-       media_entity_cleanup(&video->vdev.entity);
+error_media_entity:
+       media_entity_cleanup(&video_dev->entity);
+
        mutex_destroy(&video->lock);
+
        return ret;
 }
 
-void sun6i_video_cleanup(struct sun6i_video *video)
+void sun6i_video_cleanup(struct sun6i_csi_device *csi_dev)
 {
-       vb2_video_unregister_device(&video->vdev);
-       media_entity_cleanup(&video->vdev.entity);
+       struct sun6i_video *video = &csi_dev->video;
+       struct video_device *video_dev = &video->video_dev;
+
+       vb2_video_unregister_device(video_dev);
+       media_entity_cleanup(&video_dev->entity);
        mutex_destroy(&video->lock);
 }
index b9cd919..a917d2d 100644 (file)
 #include <media/v4l2-dev.h>
 #include <media/videobuf2-core.h>
 
-struct sun6i_csi;
+struct sun6i_csi_device;
 
 struct sun6i_video {
-       struct video_device             vdev;
+       struct video_device             video_dev;
+       struct vb2_queue                queue;
+       struct mutex                    lock; /* Queue lock. */
        struct media_pad                pad;
-       struct sun6i_csi                *csi;
 
-       struct mutex                    lock;
-
-       struct vb2_queue                vb2_vidq;
-       spinlock_t                      dma_queue_lock;
        struct list_head                dma_queue;
+       spinlock_t                      dma_queue_lock; /* DMA queue lock. */
 
-       unsigned int                    sequence;
-       struct v4l2_format              fmt;
+       struct v4l2_format              format;
        u32                             mbus_code;
+       unsigned int                    sequence;
 };
 
-int sun6i_video_init(struct sun6i_video *video, struct sun6i_csi *csi,
-                    const char *name);
-void sun6i_video_cleanup(struct sun6i_video *video);
+int sun6i_video_setup(struct sun6i_csi_device *csi_dev);
+void sun6i_video_cleanup(struct sun6i_csi_device *csi_dev);
 
-void sun6i_video_frame_done(struct sun6i_video *video);
+void sun6i_video_frame_done(struct sun6i_csi_device *csi_dev);
 
 #endif /* __SUN6I_VIDEO_H__ */
index eb98246..08852f6 100644 (file)
@@ -3,11 +3,11 @@ config VIDEO_SUN6I_MIPI_CSI2
        tristate "Allwinner A31 MIPI CSI-2 Controller Driver"
        depends on V4L_PLATFORM_DRIVERS && VIDEO_DEV
        depends on ARCH_SUNXI || COMPILE_TEST
-       depends on PM && COMMON_CLK
+       depends on PM && COMMON_CLK && RESET_CONTROLLER
+       depends on PHY_SUN6I_MIPI_DPHY
        select MEDIA_CONTROLLER
        select VIDEO_V4L2_SUBDEV_API
        select V4L2_FWNODE
-       select PHY_SUN6I_MIPI_DPHY
        select GENERIC_PHY_MIPI_DPHY
        select REGMAP_MMIO
        help
index a4e3f9a..30d6c0c 100644 (file)
@@ -661,7 +661,8 @@ sun6i_mipi_csi2_resources_setup(struct sun6i_mipi_csi2_device *csi2_dev,
        csi2_dev->reset = devm_reset_control_get_shared(dev, NULL);
        if (IS_ERR(csi2_dev->reset)) {
                dev_err(dev, "failed to get reset controller\n");
-               return PTR_ERR(csi2_dev->reset);
+               ret = PTR_ERR(csi2_dev->reset);
+               goto error_clock_rate_exclusive;
        }
 
        /* D-PHY */
@@ -669,13 +670,14 @@ sun6i_mipi_csi2_resources_setup(struct sun6i_mipi_csi2_device *csi2_dev,
        csi2_dev->dphy = devm_phy_get(dev, "dphy");
        if (IS_ERR(csi2_dev->dphy)) {
                dev_err(dev, "failed to get MIPI D-PHY\n");
-               return PTR_ERR(csi2_dev->dphy);
+               ret = PTR_ERR(csi2_dev->dphy);
+               goto error_clock_rate_exclusive;
        }
 
        ret = phy_init(csi2_dev->dphy);
        if (ret) {
                dev_err(dev, "failed to initialize MIPI D-PHY\n");
-               return ret;
+               goto error_clock_rate_exclusive;
        }
 
        /* Runtime PM */
@@ -683,6 +685,11 @@ sun6i_mipi_csi2_resources_setup(struct sun6i_mipi_csi2_device *csi2_dev,
        pm_runtime_enable(dev);
 
        return 0;
+
+error_clock_rate_exclusive:
+       clk_rate_exclusive_put(csi2_dev->clock_mod);
+
+       return ret;
 }
 
 static void
@@ -712,9 +719,14 @@ static int sun6i_mipi_csi2_probe(struct platform_device *platform_dev)
 
        ret = sun6i_mipi_csi2_bridge_setup(csi2_dev);
        if (ret)
-               return ret;
+               goto error_resources;
 
        return 0;
+
+error_resources:
+       sun6i_mipi_csi2_resources_cleanup(csi2_dev);
+
+       return ret;
 }
 
 static int sun6i_mipi_csi2_remove(struct platform_device *platform_dev)
index 789d58e..47a8c0f 100644 (file)
@@ -3,7 +3,7 @@ config VIDEO_SUN8I_A83T_MIPI_CSI2
        tristate "Allwinner A83T MIPI CSI-2 Controller and D-PHY Driver"
        depends on V4L_PLATFORM_DRIVERS && VIDEO_DEV
        depends on ARCH_SUNXI || COMPILE_TEST
-       depends on PM && COMMON_CLK
+       depends on PM && COMMON_CLK && RESET_CONTROLLER
        select MEDIA_CONTROLLER
        select VIDEO_V4L2_SUBDEV_API
        select V4L2_FWNODE
index d052ee7..b032ec1 100644 (file)
@@ -719,13 +719,15 @@ sun8i_a83t_mipi_csi2_resources_setup(struct sun8i_a83t_mipi_csi2_device *csi2_de
        csi2_dev->clock_mipi = devm_clk_get(dev, "mipi");
        if (IS_ERR(csi2_dev->clock_mipi)) {
                dev_err(dev, "failed to acquire mipi clock\n");
-               return PTR_ERR(csi2_dev->clock_mipi);
+               ret = PTR_ERR(csi2_dev->clock_mipi);
+               goto error_clock_rate_exclusive;
        }
 
        csi2_dev->clock_misc = devm_clk_get(dev, "misc");
        if (IS_ERR(csi2_dev->clock_misc)) {
                dev_err(dev, "failed to acquire misc clock\n");
-               return PTR_ERR(csi2_dev->clock_misc);
+               ret = PTR_ERR(csi2_dev->clock_misc);
+               goto error_clock_rate_exclusive;
        }
 
        /* Reset */
@@ -733,7 +735,8 @@ sun8i_a83t_mipi_csi2_resources_setup(struct sun8i_a83t_mipi_csi2_device *csi2_de
        csi2_dev->reset = devm_reset_control_get_shared(dev, NULL);
        if (IS_ERR(csi2_dev->reset)) {
                dev_err(dev, "failed to get reset controller\n");
-               return PTR_ERR(csi2_dev->reset);
+               ret = PTR_ERR(csi2_dev->reset);
+               goto error_clock_rate_exclusive;
        }
 
        /* D-PHY */
@@ -741,7 +744,7 @@ sun8i_a83t_mipi_csi2_resources_setup(struct sun8i_a83t_mipi_csi2_device *csi2_de
        ret = sun8i_a83t_dphy_register(csi2_dev);
        if (ret) {
                dev_err(dev, "failed to initialize MIPI D-PHY\n");
-               return ret;
+               goto error_clock_rate_exclusive;
        }
 
        /* Runtime PM */
@@ -749,6 +752,11 @@ sun8i_a83t_mipi_csi2_resources_setup(struct sun8i_a83t_mipi_csi2_device *csi2_de
        pm_runtime_enable(dev);
 
        return 0;
+
+error_clock_rate_exclusive:
+       clk_rate_exclusive_put(csi2_dev->clock_mod);
+
+       return ret;
 }
 
 static void
@@ -778,9 +786,14 @@ static int sun8i_a83t_mipi_csi2_probe(struct platform_device *platform_dev)
 
        ret = sun8i_a83t_mipi_csi2_bridge_setup(csi2_dev);
        if (ret)
-               return ret;
+               goto error_resources;
 
        return 0;
+
+error_resources:
+       sun8i_a83t_mipi_csi2_resources_cleanup(csi2_dev);
+
+       return ret;
 }
 
 static int sun8i_a83t_mipi_csi2_remove(struct platform_device *platform_dev)
index ff71e06..f688396 100644 (file)
@@ -4,7 +4,7 @@ config VIDEO_SUN8I_DEINTERLACE
        depends on V4L_MEM2MEM_DRIVERS
        depends on VIDEO_DEV
        depends on ARCH_SUNXI || COMPILE_TEST
-       depends on COMMON_CLK && OF
+       depends on COMMON_CLK && RESET_CONTROLLER && OF
        depends on PM
        select VIDEOBUF2_DMA_CONTIG
        select V4L2_MEM2MEM_DEV
index cfba290..ee2c1f2 100644 (file)
@@ -5,7 +5,7 @@ config VIDEO_SUN8I_ROTATE
        depends on V4L_MEM2MEM_DRIVERS
        depends on VIDEO_DEV
        depends on ARCH_SUNXI || COMPILE_TEST
-       depends on COMMON_CLK && OF
+       depends on COMMON_CLK && RESET_CONTROLLER && OF
        depends on PM
        select VIDEOBUF2_DMA_CONTIG
        select V4L2_MEM2MEM_DEV
index 21e3d0a..4eade40 100644 (file)
@@ -708,7 +708,7 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
        dma_addr_t addr;
        int ret;
 
-       ret = media_pipeline_start(&ctx->vdev.entity, &ctx->phy->pipe);
+       ret = video_device_pipeline_alloc_start(&ctx->vdev);
        if (ret < 0) {
                ctx_err(ctx, "Failed to start media pipeline: %d\n", ret);
                goto error_release_buffers;
@@ -761,7 +761,7 @@ error_stop:
        cal_ctx_unprepare(ctx);
 
 error_pipeline:
-       media_pipeline_stop(&ctx->vdev.entity);
+       video_device_pipeline_stop(&ctx->vdev);
 error_release_buffers:
        cal_release_buffers(ctx, VB2_BUF_STATE_QUEUED);
 
@@ -782,7 +782,7 @@ static void cal_stop_streaming(struct vb2_queue *vq)
 
        cal_release_buffers(ctx, VB2_BUF_STATE_ERROR);
 
-       media_pipeline_stop(&ctx->vdev.entity);
+       video_device_pipeline_stop(&ctx->vdev);
 }
 
 static const struct vb2_ops cal_video_qops = {
index 80f2c9c..de73d6d 100644 (file)
@@ -174,7 +174,6 @@ struct cal_camerarx {
        struct device_node      *source_ep_node;
        struct device_node      *source_node;
        struct v4l2_subdev      *source;
-       struct media_pipeline   pipe;
 
        struct v4l2_subdev      subdev;
        struct media_pad        pads[CAL_CAMERARX_NUM_PADS];
index a6052df..24d2383 100644 (file)
@@ -937,10 +937,8 @@ static int isp_pipeline_is_last(struct media_entity *me)
        struct isp_pipeline *pipe;
        struct media_pad *pad;
 
-       if (!me->pipe)
-               return 0;
        pipe = to_isp_pipeline(me);
-       if (pipe->stream_state == ISP_PIPELINE_STREAM_STOPPED)
+       if (!pipe || pipe->stream_state == ISP_PIPELINE_STREAM_STOPPED)
                return 0;
        pad = media_pad_remote_pad_first(&pipe->output->pad);
        return pad->entity == me;
index cc9a97d..3e5348c 100644 (file)
@@ -1093,8 +1093,7 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
        /* Start streaming on the pipeline. No link touching an entity in the
         * pipeline can be activated or deactivated once streaming is started.
         */
-       pipe = video->video.entity.pipe
-            ? to_isp_pipeline(&video->video.entity) : &video->pipe;
+       pipe = to_isp_pipeline(&video->video.entity) ? : &video->pipe;
 
        ret = media_entity_enum_init(&pipe->ent_enum, &video->isp->media_dev);
        if (ret)
@@ -1104,7 +1103,7 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
        pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
        pipe->max_rate = pipe->l3_ick;
 
-       ret = media_pipeline_start(&video->video.entity, &pipe->pipe);
+       ret = video_device_pipeline_start(&video->video, &pipe->pipe);
        if (ret < 0)
                goto err_pipeline_start;
 
@@ -1161,7 +1160,7 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
        return 0;
 
 err_check_format:
-       media_pipeline_stop(&video->video.entity);
+       video_device_pipeline_stop(&video->video);
 err_pipeline_start:
        /* TODO: Implement PM QoS */
        /* The DMA queue must be emptied here, otherwise CCDC interrupts that
@@ -1228,7 +1227,7 @@ isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
        video->error = false;
 
        /* TODO: Implement PM QoS */
-       media_pipeline_stop(&video->video.entity);
+       video_device_pipeline_stop(&video->video);
 
        media_entity_enum_cleanup(&pipe->ent_enum);
 
index a090867..1d23df5 100644 (file)
@@ -99,8 +99,15 @@ struct isp_pipeline {
        unsigned int external_width;
 };
 
-#define to_isp_pipeline(__e) \
-       container_of((__e)->pipe, struct isp_pipeline, pipe)
+static inline struct isp_pipeline *to_isp_pipeline(struct media_entity *entity)
+{
+       struct media_pipeline *pipe = media_entity_pipeline(entity);
+
+       if (!pipe)
+               return NULL;
+
+       return container_of(pipe, struct isp_pipeline, pipe);
+}
 
 static inline int isp_pipeline_ready(struct isp_pipeline *pipe)
 {
index 2036f72..8cb4a68 100644 (file)
@@ -251,6 +251,11 @@ queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
 
 static int hantro_try_ctrl(struct v4l2_ctrl *ctrl)
 {
+       struct hantro_ctx *ctx;
+
+       ctx = container_of(ctrl->handler,
+                          struct hantro_ctx, ctrl_handler);
+
        if (ctrl->id == V4L2_CID_STATELESS_H264_SPS) {
                const struct v4l2_ctrl_h264_sps *sps = ctrl->p_new.p_h264_sps;
 
@@ -266,12 +271,11 @@ static int hantro_try_ctrl(struct v4l2_ctrl *ctrl)
        } else if (ctrl->id == V4L2_CID_STATELESS_HEVC_SPS) {
                const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps;
 
-               if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8)
-                       /* Luma and chroma bit depth mismatch */
-                       return -EINVAL;
-               if (sps->bit_depth_luma_minus8 != 0)
-                       /* Only 8-bit is supported */
+               if (sps->bit_depth_luma_minus8 != 0 && sps->bit_depth_luma_minus8 != 2)
+                       /* Only 8-bit and 10-bit are supported */
                        return -EINVAL;
+
+               ctx->bit_depth = sps->bit_depth_luma_minus8 + 8;
        } else if (ctrl->id == V4L2_CID_STATELESS_VP9_FRAME) {
                const struct v4l2_ctrl_vp9_frame *dec_params = ctrl->p_new.p_vp9_frame;
 
index 233ecd8..a9d4ac8 100644 (file)
@@ -12,7 +12,7 @@
 
 static size_t hantro_hevc_chroma_offset(struct hantro_ctx *ctx)
 {
-       return ctx->dst_fmt.width * ctx->dst_fmt.height;
+       return ctx->dst_fmt.width * ctx->dst_fmt.height * ctx->bit_depth / 8;
 }
 
 static size_t hantro_hevc_motion_vectors_offset(struct hantro_ctx *ctx)
@@ -167,8 +167,6 @@ static void set_params(struct hantro_ctx *ctx)
        hantro_reg_write(vpu, &g2_bit_depth_y_minus8, sps->bit_depth_luma_minus8);
        hantro_reg_write(vpu, &g2_bit_depth_c_minus8, sps->bit_depth_chroma_minus8);
 
-       hantro_reg_write(vpu, &g2_output_8_bits, 0);
-
        hantro_reg_write(vpu, &g2_hdr_skip_length, compute_header_skip_length(ctx));
 
        min_log2_cb_size = sps->log2_min_luma_coding_block_size_minus3 + 3;
index b990bc9..9383fb7 100644 (file)
@@ -104,7 +104,7 @@ static int tile_buffer_reallocate(struct hantro_ctx *ctx)
                hevc_dec->tile_bsd.cpu = NULL;
        }
 
-       size = VERT_FILTER_RAM_SIZE * height64 * (num_tile_cols - 1);
+       size = (VERT_FILTER_RAM_SIZE * height64 * (num_tile_cols - 1) * ctx->bit_depth) / 8;
        hevc_dec->tile_filter.cpu = dma_alloc_coherent(vpu->dev, size,
                                                       &hevc_dec->tile_filter.dma,
                                                       GFP_KERNEL);
@@ -112,7 +112,7 @@ static int tile_buffer_reallocate(struct hantro_ctx *ctx)
                goto err_free_tile_buffers;
        hevc_dec->tile_filter.size = size;
 
-       size = VERT_SAO_RAM_SIZE * height64 * (num_tile_cols - 1);
+       size = (VERT_SAO_RAM_SIZE * height64 * (num_tile_cols - 1) * ctx->bit_depth) / 8;
        hevc_dec->tile_sao.cpu = dma_alloc_coherent(vpu->dev, size,
                                                    &hevc_dec->tile_sao.dma,
                                                    GFP_KERNEL);
index a0928c5..09d8cf9 100644 (file)
@@ -114,6 +114,7 @@ static void hantro_postproc_g2_enable(struct hantro_ctx *ctx)
        struct hantro_dev *vpu = ctx->dev;
        struct vb2_v4l2_buffer *dst_buf;
        int down_scale = down_scale_factor(ctx);
+       int out_depth;
        size_t chroma_offset;
        dma_addr_t dst_dma;
 
@@ -132,8 +133,9 @@ static void hantro_postproc_g2_enable(struct hantro_ctx *ctx)
                hantro_write_addr(vpu, G2_RS_OUT_LUMA_ADDR, dst_dma);
                hantro_write_addr(vpu, G2_RS_OUT_CHROMA_ADDR, dst_dma + chroma_offset);
        }
+
+       out_depth = hantro_get_format_depth(ctx->dst_fmt.pixelformat);
        if (ctx->dev->variant->legacy_regs) {
-               int out_depth = hantro_get_format_depth(ctx->dst_fmt.pixelformat);
                u8 pp_shift = 0;
 
                if (out_depth > 8)
@@ -141,6 +143,9 @@ static void hantro_postproc_g2_enable(struct hantro_ctx *ctx)
 
                hantro_reg_write(ctx->dev, &g2_rs_out_bit_depth, out_depth);
                hantro_reg_write(ctx->dev, &g2_pp_pix_shift, pp_shift);
+       } else {
+               hantro_reg_write(vpu, &g2_output_8_bits, out_depth > 8 ? 0 : 1);
+               hantro_reg_write(vpu, &g2_output_format, out_depth > 8 ? 1 : 0);
        }
        hantro_reg_write(vpu, &g2_out_rs_e, 1);
 }
index 77f574f..b390228 100644 (file)
@@ -162,12 +162,39 @@ static const struct hantro_fmt imx8m_vpu_g2_postproc_fmts[] = {
                        .step_height = MB_DIM,
                },
        },
+       {
+               .fourcc = V4L2_PIX_FMT_P010,
+               .codec_mode = HANTRO_MODE_NONE,
+               .postprocessed = true,
+               .frmsize = {
+                       .min_width = FMT_MIN_WIDTH,
+                       .max_width = FMT_UHD_WIDTH,
+                       .step_width = MB_DIM,
+                       .min_height = FMT_MIN_HEIGHT,
+                       .max_height = FMT_UHD_HEIGHT,
+                       .step_height = MB_DIM,
+               },
+       },
 };
 
 static const struct hantro_fmt imx8m_vpu_g2_dec_fmts[] = {
        {
                .fourcc = V4L2_PIX_FMT_NV12_4L4,
                .codec_mode = HANTRO_MODE_NONE,
+               .match_depth = true,
+               .frmsize = {
+                       .min_width = FMT_MIN_WIDTH,
+                       .max_width = FMT_UHD_WIDTH,
+                       .step_width = TILE_MB_DIM,
+                       .min_height = FMT_MIN_HEIGHT,
+                       .max_height = FMT_UHD_HEIGHT,
+                       .step_height = TILE_MB_DIM,
+               },
+       },
+       {
+               .fourcc = V4L2_PIX_FMT_P010_4L4,
+               .codec_mode = HANTRO_MODE_NONE,
+               .match_depth = true,
                .frmsize = {
                        .min_width = FMT_MIN_WIDTH,
                        .max_width = FMT_UHD_WIDTH,
index 2d1ef7a..0a7fd86 100644 (file)
@@ -402,10 +402,9 @@ static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
         * Use the pipeline object embedded in the first DMA object that starts
         * streaming.
         */
-       pipe = dma->video.entity.pipe
-            ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
+       pipe = to_xvip_pipeline(&dma->video) ? : &dma->pipe;
 
-       ret = media_pipeline_start(&dma->video.entity, &pipe->pipe);
+       ret = video_device_pipeline_start(&dma->video, &pipe->pipe);
        if (ret < 0)
                goto error;
 
@@ -431,7 +430,7 @@ static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
        return 0;
 
 error_stop:
-       media_pipeline_stop(&dma->video.entity);
+       video_device_pipeline_stop(&dma->video);
 
 error:
        /* Give back all queued buffers to videobuf2. */
@@ -448,7 +447,7 @@ error:
 static void xvip_dma_stop_streaming(struct vb2_queue *vq)
 {
        struct xvip_dma *dma = vb2_get_drv_priv(vq);
-       struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity);
+       struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video);
        struct xvip_dma_buffer *buf, *nbuf;
 
        /* Stop the pipeline. */
@@ -459,7 +458,7 @@ static void xvip_dma_stop_streaming(struct vb2_queue *vq)
 
        /* Cleanup the pipeline and mark it as being stopped. */
        xvip_pipeline_cleanup(pipe);
-       media_pipeline_stop(&dma->video.entity);
+       video_device_pipeline_stop(&dma->video);
 
        /* Give back all queued buffers to videobuf2. */
        spin_lock_irq(&dma->queued_lock);
index 2378bda..9c6d4c1 100644 (file)
@@ -45,9 +45,14 @@ struct xvip_pipeline {
        struct xvip_dma *output;
 };
 
-static inline struct xvip_pipeline *to_xvip_pipeline(struct media_entity *e)
+static inline struct xvip_pipeline *to_xvip_pipeline(struct video_device *vdev)
 {
-       return container_of(e->pipe, struct xvip_pipeline, pipe);
+       struct media_pipeline *pipe = video_device_pipeline(vdev);
+
+       if (!pipe)
+               return NULL;
+
+       return container_of(pipe, struct xvip_pipeline, pipe);
 }
 
 /**
index 0bf99e1..171f9cc 100644 (file)
@@ -1072,7 +1072,6 @@ done:
 
 static int si476x_radio_fops_release(struct file *file)
 {
-       int err;
        struct si476x_radio *radio = video_drvdata(file);
 
        if (v4l2_fh_is_singular_file(file) &&
@@ -1080,9 +1079,7 @@ static int si476x_radio_fops_release(struct file *file)
                si476x_core_set_power_state(radio->core,
                                            SI476X_POWER_DOWN);
 
-       err = v4l2_fh_release(file);
-
-       return err;
+       return v4l2_fh_release(file);
 }
 
 static ssize_t si476x_radio_fops_read(struct file *file, char __user *buf,
index 2aec642..93d847c 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/interrupt.h>
 #include <linux/i2c.h>
 #include <linux/slab.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ioctl.h>
index 735b925..5edfd8a 100644 (file)
@@ -684,7 +684,6 @@ static int send_packet(struct imon_context *ictx)
  */
 static int send_associate_24g(struct imon_context *ictx)
 {
-       int retval;
        const unsigned char packet[8] = { 0x01, 0x00, 0x00, 0x00,
                                          0x00, 0x00, 0x00, 0x20 };
 
@@ -699,9 +698,8 @@ static int send_associate_24g(struct imon_context *ictx)
        }
 
        memcpy(ictx->usb_tx_buf, packet, sizeof(packet));
-       retval = send_packet(ictx);
 
-       return retval;
+       return send_packet(ictx);
 }
 
 /*
index 39d2b03..c76ba24 100644 (file)
@@ -1077,7 +1077,7 @@ static int mceusb_set_timeout(struct rc_dev *dev, unsigned int timeout)
        struct mceusb_dev *ir = dev->priv;
        unsigned int units;
 
-       units = DIV_ROUND_CLOSEST(timeout, MCE_TIME_UNIT);
+       units = DIV_ROUND_UP(timeout, MCE_TIME_UNIT);
 
        cmdbuf[2] = units >> 8;
        cmdbuf[3] = units;
index 6c43780..aa94427 100644 (file)
@@ -241,13 +241,12 @@ static void vimc_capture_return_all_buffers(struct vimc_capture_device *vcapture
 static int vimc_capture_start_streaming(struct vb2_queue *vq, unsigned int count)
 {
        struct vimc_capture_device *vcapture = vb2_get_drv_priv(vq);
-       struct media_entity *entity = &vcapture->vdev.entity;
        int ret;
 
        vcapture->sequence = 0;
 
        /* Start the media pipeline */
-       ret = media_pipeline_start(entity, &vcapture->stream.pipe);
+       ret = video_device_pipeline_start(&vcapture->vdev, &vcapture->stream.pipe);
        if (ret) {
                vimc_capture_return_all_buffers(vcapture, VB2_BUF_STATE_QUEUED);
                return ret;
@@ -255,7 +254,7 @@ static int vimc_capture_start_streaming(struct vb2_queue *vq, unsigned int count
 
        ret = vimc_streamer_s_stream(&vcapture->stream, &vcapture->ved, 1);
        if (ret) {
-               media_pipeline_stop(entity);
+               video_device_pipeline_stop(&vcapture->vdev);
                vimc_capture_return_all_buffers(vcapture, VB2_BUF_STATE_QUEUED);
                return ret;
        }
@@ -274,7 +273,7 @@ static void vimc_capture_stop_streaming(struct vb2_queue *vq)
        vimc_streamer_s_stream(&vcapture->stream, &vcapture->ved, 0);
 
        /* Stop the media pipeline */
-       media_pipeline_stop(&vcapture->vdev.entity);
+       video_device_pipeline_stop(&vcapture->vdev);
 
        /* Release all active buffers */
        vimc_capture_return_all_buffers(vcapture, VB2_BUF_STATE_ERROR);
index 04b7566..f28440e 100644 (file)
@@ -339,6 +339,28 @@ static int vidioc_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *a
        return vivid_vid_out_g_fbuf(file, fh, a);
 }
 
+/*
+ * Only support the framebuffer of one of the vivid instances.
+ * Anything else is rejected.
+ */
+bool vivid_validate_fb(const struct v4l2_framebuffer *a)
+{
+       struct vivid_dev *dev;
+       int i;
+
+       for (i = 0; i < n_devs; i++) {
+               dev = vivid_devs[i];
+               if (!dev || !dev->video_pbase)
+                       continue;
+               if ((unsigned long)a->base == dev->video_pbase &&
+                   a->fmt.width <= dev->display_width &&
+                   a->fmt.height <= dev->display_height &&
+                   a->fmt.bytesperline <= dev->display_byte_stride)
+                       return true;
+       }
+       return false;
+}
+
 static int vidioc_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *a)
 {
        struct video_device *vdev = video_devdata(file);
@@ -920,8 +942,12 @@ static int vivid_detect_feature_set(struct vivid_dev *dev, int inst,
 
        /* how many inputs do we have and of what type? */
        dev->num_inputs = num_inputs[inst];
-       if (dev->num_inputs < 1)
-               dev->num_inputs = 1;
+       if (node_type & 0x20007) {
+               if (dev->num_inputs < 1)
+                       dev->num_inputs = 1;
+       } else {
+               dev->num_inputs = 0;
+       }
        if (dev->num_inputs >= MAX_INPUTS)
                dev->num_inputs = MAX_INPUTS;
        for (i = 0; i < dev->num_inputs; i++) {
@@ -938,8 +964,12 @@ static int vivid_detect_feature_set(struct vivid_dev *dev, int inst,
 
        /* how many outputs do we have and of what type? */
        dev->num_outputs = num_outputs[inst];
-       if (dev->num_outputs < 1)
-               dev->num_outputs = 1;
+       if (node_type & 0x40300) {
+               if (dev->num_outputs < 1)
+                       dev->num_outputs = 1;
+       } else {
+               dev->num_outputs = 0;
+       }
        if (dev->num_outputs >= MAX_OUTPUTS)
                dev->num_outputs = MAX_OUTPUTS;
        for (i = 0; i < dev->num_outputs; i++) {
index bfcfb35..473f359 100644 (file)
@@ -613,4 +613,6 @@ static inline bool vivid_is_hdmi_out(const struct vivid_dev *dev)
        return dev->output_type[dev->output] == HDMI;
 }
 
+bool vivid_validate_fb(const struct v4l2_framebuffer *a);
+
 #endif
index fbaec8a..ec25edc 100644 (file)
@@ -357,7 +357,7 @@ int vivid_fb_init(struct vivid_dev *dev)
        int ret;
 
        dev->video_buffer_size = MAX_OSD_HEIGHT * MAX_OSD_WIDTH * 2;
-       dev->video_vbase = kzalloc(dev->video_buffer_size, GFP_KERNEL | GFP_DMA32);
+       dev->video_vbase = kzalloc(dev->video_buffer_size, GFP_KERNEL);
        if (dev->video_vbase == NULL)
                return -ENOMEM;
        dev->video_pbase = virt_to_phys(dev->video_vbase);
index 86b158e..11620ea 100644 (file)
@@ -453,6 +453,12 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
        tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
        dev->crop_cap = dev->src_rect;
        dev->crop_bounds_cap = dev->src_rect;
+       if (dev->bitmap_cap &&
+           (dev->compose_cap.width != dev->crop_cap.width ||
+            dev->compose_cap.height != dev->crop_cap.height)) {
+               vfree(dev->bitmap_cap);
+               dev->bitmap_cap = NULL;
+       }
        dev->compose_cap = dev->crop_cap;
        if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap))
                dev->compose_cap.height /= 2;
@@ -460,6 +466,14 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
        tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev));
        tpg_s_pixel_aspect(&dev->tpg, vivid_get_pixel_aspect(dev));
        tpg_update_mv_step(&dev->tpg);
+
+       /*
+        * We can be called from within s_ctrl, in that case we can't
+        * modify controls. Luckily we don't need to in that case.
+        */
+       if (keep_controls)
+               return;
+
        dims[0] = roundup(dev->src_rect.width, PIXEL_ARRAY_DIV);
        dims[1] = roundup(dev->src_rect.height, PIXEL_ARRAY_DIV);
        v4l2_ctrl_modify_dimensions(dev->pixel_array, dims);
@@ -913,6 +927,8 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
        struct vivid_dev *dev = video_drvdata(file);
        struct v4l2_rect *crop = &dev->crop_cap;
        struct v4l2_rect *compose = &dev->compose_cap;
+       unsigned orig_compose_w = compose->width;
+       unsigned orig_compose_h = compose->height;
        unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1;
        int ret;
 
@@ -1029,17 +1045,17 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
                        s->r.height /= factor;
                }
                v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect);
-               if (dev->bitmap_cap && (compose->width != s->r.width ||
-                                       compose->height != s->r.height)) {
-                       vfree(dev->bitmap_cap);
-                       dev->bitmap_cap = NULL;
-               }
                *compose = s->r;
                break;
        default:
                return -EINVAL;
        }
 
+       if (dev->bitmap_cap && (compose->width != orig_compose_w ||
+                               compose->height != orig_compose_h)) {
+               vfree(dev->bitmap_cap);
+               dev->bitmap_cap = NULL;
+       }
        tpg_s_crop_compose(&dev->tpg, crop, compose);
        return 0;
 }
@@ -1276,7 +1292,14 @@ int vivid_vid_cap_s_fbuf(struct file *file, void *fh,
                return -EINVAL;
        if (a->fmt.bytesperline < (a->fmt.width * fmt->bit_depth[0]) / 8)
                return -EINVAL;
-       if (a->fmt.height * a->fmt.bytesperline < a->fmt.sizeimage)
+       if (a->fmt.bytesperline > a->fmt.sizeimage / a->fmt.height)
+               return -EINVAL;
+
+       /*
+        * Only support the framebuffer of one of the vivid instances.
+        * Anything else is rejected.
+        */
+       if (!vivid_validate_fb(a))
                return -EINVAL;
 
        dev->fb_vbase_cap = phys_to_virt((unsigned long)a->base);
index a04dfd5..d59b4ab 100644 (file)
@@ -282,15 +282,13 @@ static int xc4000_tuner_reset(struct dvb_frontend *fe)
 static int xc_write_reg(struct xc4000_priv *priv, u16 regAddr, u16 i2cData)
 {
        u8 buf[4];
-       int result;
 
        buf[0] = (regAddr >> 8) & 0xFF;
        buf[1] = regAddr & 0xFF;
        buf[2] = (i2cData >> 8) & 0xFF;
        buf[3] = i2cData & 0xFF;
-       result = xc_send_i2c_data(priv, buf, 4);
 
-       return result;
+       return xc_send_i2c_data(priv, buf, 4);
 }
 
 static int xc_load_i2c_sequence(struct dvb_frontend *fe, const u8 *i2c_sequence)
index caefac0..877e85a 100644 (file)
@@ -410,7 +410,7 @@ static int au0828_enable_source(struct media_entity *entity,
                goto end;
        }
 
-       ret = __media_pipeline_start(entity, pipe);
+       ret = __media_pipeline_start(entity->pads, pipe);
        if (ret) {
                pr_err("Start Pipeline: %s->%s Error %d\n",
                        source->name, entity->name, ret);
@@ -501,12 +501,12 @@ static void au0828_disable_source(struct media_entity *entity)
                                return;
 
                        /* stop pipeline */
-                       __media_pipeline_stop(dev->active_link_owner);
+                       __media_pipeline_stop(dev->active_link_owner->pads);
                        pr_debug("Pipeline stop for %s\n",
                                dev->active_link_owner->name);
 
                        ret = __media_pipeline_start(
-                                       dev->active_link_user,
+                                       dev->active_link_user->pads,
                                        dev->active_link_user_pipe);
                        if (ret) {
                                pr_err("Start Pipeline: %s->%s %d\n",
@@ -532,7 +532,7 @@ static void au0828_disable_source(struct media_entity *entity)
                        return;
 
                /* stop pipeline */
-               __media_pipeline_stop(dev->active_link_owner);
+               __media_pipeline_stop(dev->active_link_owner->pads);
                pr_debug("Pipeline stop for %s\n",
                        dev->active_link_owner->name);
 
index 5eef37b..1e9c8d0 100644 (file)
@@ -1497,7 +1497,7 @@ static int af9035_tuner_attach(struct dvb_usb_adapter *adap)
                /*
                 * AF9035 gpiot2 = FC0012 enable
                 * XXX: there seems to be something on gpioh8 too, but on my
-                * my test I didn't find any difference.
+                * test I didn't find any difference.
                 */
 
                if (adap->id == 0) {
index 5a1f269..9759996 100644 (file)
@@ -209,7 +209,7 @@ leave:
  *
  * Control bits for previous samples is 32-bit field, containing 16 x 2-bit
  * numbers. This results one 2-bit number for 8 samples. It is likely used for
- * for bit shifting sample by given bits, increasing actual sampling resolution.
+ * bit shifting sample by given bits, increasing actual sampling resolution.
  * Number 2 (0b10) was never seen.
  *
  * 6 * 16 * 2 * 4 = 768 samples. 768 * 4 = 3072 bytes
index a8c354a..d0a3aa3 100644 (file)
@@ -89,7 +89,7 @@ static int req_to_user(struct v4l2_ext_control *c,
 /* Helper function: copy the initial control value back to the caller */
 static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
 {
-       ctrl->type_ops->init(ctrl, 0, ctrl->elems, ctrl->p_new);
+       ctrl->type_ops->init(ctrl, 0, ctrl->p_new);
 
        return ptr_to_user(c, ctrl, ctrl->p_new);
 }
@@ -126,7 +126,7 @@ static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
                if (ctrl->is_dyn_array)
                        ctrl->new_elems = elems;
                else if (ctrl->is_array)
-                       ctrl->type_ops->init(ctrl, elems, ctrl->elems, ctrl->p_new);
+                       ctrl->type_ops->init(ctrl, elems, ctrl->p_new);
                return 0;
        }
 
@@ -494,7 +494,7 @@ EXPORT_SYMBOL(v4l2_g_ext_ctrls);
 /* Validate a new control */
 static int validate_new(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr p_new)
 {
-       return ctrl->type_ops->validate(ctrl, ctrl->new_elems, p_new);
+       return ctrl->type_ops->validate(ctrl, p_new);
 }
 
 /* Validate controls. */
@@ -1007,7 +1007,7 @@ int __v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl,
        ctrl->p_cur.p = p_array + elems * ctrl->elem_size;
        for (i = 0; i < ctrl->nr_of_dims; i++)
                ctrl->dims[i] = dims[i];
-       ctrl->type_ops->init(ctrl, 0, elems, ctrl->p_cur);
+       ctrl->type_ops->init(ctrl, 0, ctrl->p_cur);
        cur_to_new(ctrl);
        send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_VALUE |
                               V4L2_EVENT_CTRL_CH_DIMENSIONS);
index 01f0009..0dab1d7 100644 (file)
@@ -65,7 +65,7 @@ void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes)
                        v4l2_event_queue_fh(sev->fh, &ev);
 }
 
-bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, u32 elems,
+bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl,
                             union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2)
 {
        unsigned int i;
@@ -74,7 +74,7 @@ bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, u32 elems,
        case V4L2_CTRL_TYPE_BUTTON:
                return false;
        case V4L2_CTRL_TYPE_STRING:
-               for (i = 0; i < elems; i++) {
+               for (i = 0; i < ctrl->elems; i++) {
                        unsigned int idx = i * ctrl->elem_size;
 
                        /* strings are always 0-terminated */
@@ -84,7 +84,7 @@ bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, u32 elems,
                return true;
        default:
                return !memcmp(ptr1.p_const, ptr2.p_const,
-                              elems * ctrl->elem_size);
+                              ctrl->elems * ctrl->elem_size);
        }
 }
 EXPORT_SYMBOL(v4l2_ctrl_type_op_equal);
@@ -178,9 +178,10 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
 }
 
 void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx,
-                           u32 tot_elems, union v4l2_ctrl_ptr ptr)
+                           union v4l2_ctrl_ptr ptr)
 {
        unsigned int i;
+       u32 tot_elems = ctrl->elems;
        u32 elems = tot_elems - from_idx;
 
        if (from_idx >= tot_elems)
@@ -995,7 +996,7 @@ static int std_validate_elem(const struct v4l2_ctrl *ctrl, u32 idx,
        }
 }
 
-int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, u32 elems,
+int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl,
                               union v4l2_ctrl_ptr ptr)
 {
        unsigned int i;
@@ -1017,11 +1018,11 @@ int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, u32 elems,
 
        case V4L2_CTRL_TYPE_BUTTON:
        case V4L2_CTRL_TYPE_CTRL_CLASS:
-               memset(ptr.p_s32, 0, elems * sizeof(s32));
+               memset(ptr.p_s32, 0, ctrl->new_elems * sizeof(s32));
                return 0;
        }
 
-       for (i = 0; !ret && i < elems; i++)
+       for (i = 0; !ret && i < ctrl->new_elems; i++)
                ret = std_validate_elem(ctrl, i, ptr);
        return ret;
 }
@@ -1724,7 +1725,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
                memcpy(ctrl->p_def.p, p_def.p_const, elem_size);
        }
 
-       ctrl->type_ops->init(ctrl, 0, elems, ctrl->p_cur);
+       ctrl->type_ops->init(ctrl, 0, ctrl->p_cur);
        cur_to_new(ctrl);
 
        if (handler_new_ref(hdl, ctrl, NULL, false, false)) {
@@ -2069,7 +2070,7 @@ static int cluster_changed(struct v4l2_ctrl *master)
                        ctrl_changed = true;
                if (!ctrl_changed)
                        ctrl_changed = !ctrl->type_ops->equal(ctrl,
-                               ctrl->elems, ctrl->p_cur, ctrl->p_new);
+                               ctrl->p_cur, ctrl->p_new);
                ctrl->has_changed = ctrl_changed;
                changed |= ctrl->has_changed;
        }
index d00237e..397d553 100644 (file)
@@ -1095,6 +1095,78 @@ void video_unregister_device(struct video_device *vdev)
 }
 EXPORT_SYMBOL(video_unregister_device);
 
+#if defined(CONFIG_MEDIA_CONTROLLER)
+
+__must_check int video_device_pipeline_start(struct video_device *vdev,
+                                            struct media_pipeline *pipe)
+{
+       struct media_entity *entity = &vdev->entity;
+
+       if (entity->num_pads != 1)
+               return -ENODEV;
+
+       return media_pipeline_start(&entity->pads[0], pipe);
+}
+EXPORT_SYMBOL_GPL(video_device_pipeline_start);
+
+__must_check int __video_device_pipeline_start(struct video_device *vdev,
+                                              struct media_pipeline *pipe)
+{
+       struct media_entity *entity = &vdev->entity;
+
+       if (entity->num_pads != 1)
+               return -ENODEV;
+
+       return __media_pipeline_start(&entity->pads[0], pipe);
+}
+EXPORT_SYMBOL_GPL(__video_device_pipeline_start);
+
+void video_device_pipeline_stop(struct video_device *vdev)
+{
+       struct media_entity *entity = &vdev->entity;
+
+       if (WARN_ON(entity->num_pads != 1))
+               return;
+
+       return media_pipeline_stop(&entity->pads[0]);
+}
+EXPORT_SYMBOL_GPL(video_device_pipeline_stop);
+
+void __video_device_pipeline_stop(struct video_device *vdev)
+{
+       struct media_entity *entity = &vdev->entity;
+
+       if (WARN_ON(entity->num_pads != 1))
+               return;
+
+       return __media_pipeline_stop(&entity->pads[0]);
+}
+EXPORT_SYMBOL_GPL(__video_device_pipeline_stop);
+
+__must_check int video_device_pipeline_alloc_start(struct video_device *vdev)
+{
+       struct media_entity *entity = &vdev->entity;
+
+       if (entity->num_pads != 1)
+               return -ENODEV;
+
+       return media_pipeline_alloc_start(&entity->pads[0]);
+}
+EXPORT_SYMBOL_GPL(video_device_pipeline_alloc_start);
+
+struct media_pipeline *video_device_pipeline(struct video_device *vdev)
+{
+       struct media_entity *entity = &vdev->entity;
+
+       if (WARN_ON(entity->num_pads != 1))
+               return NULL;
+
+       return media_pad_pipeline(&entity->pads[0]);
+}
+EXPORT_SYMBOL_GPL(video_device_pipeline);
+
+#endif /* CONFIG_MEDIA_CONTROLLER */
+
 /*
  *     Initialise video for linux
  */
index af48705..003c32f 100644 (file)
@@ -161,6 +161,20 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
            (bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) ||
            (!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE)))
                return false;
+
+       /* sanity checks for the blanking timings */
+       if (!bt->interlaced &&
+           (bt->il_vbackporch || bt->il_vsync || bt->il_vfrontporch))
+               return false;
+       if (bt->hfrontporch > 2 * bt->width ||
+           bt->hsync > 1024 || bt->hbackporch > 1024)
+               return false;
+       if (bt->vfrontporch > 4096 ||
+           bt->vsync > 128 || bt->vbackporch > 4096)
+               return false;
+       if (bt->interlaced && (bt->il_vfrontporch > 4096 ||
+           bt->il_vsync > 128 || bt->il_vbackporch > 4096))
+               return false;
        return fnc == NULL || fnc(t, fnc_handle);
 }
 EXPORT_SYMBOL_GPL(v4l2_valid_dv_timings);
index 9489e80..bdb2ce7 100644 (file)
@@ -66,6 +66,14 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
                goto err_map;
        }
 
+       /* Parse the device's DT node for an endianness specification */
+       if (of_property_read_bool(np, "big-endian"))
+               syscon_config.val_format_endian = REGMAP_ENDIAN_BIG;
+       else if (of_property_read_bool(np, "little-endian"))
+               syscon_config.val_format_endian = REGMAP_ENDIAN_LITTLE;
+       else if (of_property_read_bool(np, "native-endian"))
+               syscon_config.val_format_endian = REGMAP_ENDIAN_NATIVE;
+
        /*
         * search for reg-io-width property in DT. If it is not provided,
         * default to 4 bytes. regmap_init_mmio will return an error if values
index 9afda47..6706ef3 100644 (file)
@@ -152,7 +152,7 @@ static int gru_assign_asid(struct gru_state *gru)
  * Optionally, build an array of chars that contain the bit numbers allocated.
  */
 static unsigned long reserve_resources(unsigned long *p, int n, int mmax,
-                                      char *idx)
+                                      signed char *idx)
 {
        unsigned long bits = 0;
        int i;
@@ -170,14 +170,14 @@ static unsigned long reserve_resources(unsigned long *p, int n, int mmax,
 }
 
 unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count,
-                                      char *cbmap)
+                                      signed char *cbmap)
 {
        return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU,
                                 cbmap);
 }
 
 unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count,
-                                      char *dsmap)
+                                      signed char *dsmap)
 {
        return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU,
                                 dsmap);
index 5efc869..8c52776 100644 (file)
@@ -351,7 +351,7 @@ struct gru_thread_state {
        pid_t                   ts_tgid_owner;  /* task that is using the
                                                   context - for migration */
        short                   ts_user_blade_id;/* user selected blade */
-       char                    ts_user_chiplet_id;/* user selected chiplet */
+       signed char             ts_user_chiplet_id;/* user selected chiplet */
        unsigned short          ts_sizeavail;   /* Pagesizes in use */
        int                     ts_tsid;        /* thread that owns the
                                                   structure */
@@ -364,11 +364,11 @@ struct gru_thread_state {
                                                   required for contest */
        unsigned char           ts_cbr_au_count;/* Number of CBR resources
                                                   required for contest */
-       char                    ts_cch_req_slice;/* CCH packet slice */
-       char                    ts_blade;       /* If >= 0, migrate context if
+       signed char             ts_cch_req_slice;/* CCH packet slice */
+       signed char             ts_blade;       /* If >= 0, migrate context if
                                                   ref from different blade */
-       char                    ts_force_cch_reload;
-       char                    ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each
+       signed char             ts_force_cch_reload;
+       signed char             ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each
                                                          allocated CB */
        int                     ts_data_valid;  /* Indicates if ts_gdata has
                                                   valid data */
@@ -643,9 +643,9 @@ extern struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
                int cbr_au_count, int dsr_au_count,
                unsigned char tlb_preload_count, int options, int tsid);
 extern unsigned long gru_reserve_cb_resources(struct gru_state *gru,
-               int cbr_au_count, char *cbmap);
+               int cbr_au_count, signed char *cbmap);
 extern unsigned long gru_reserve_ds_resources(struct gru_state *gru,
-               int dsr_au_count, char *dsmap);
+               int dsr_au_count, signed char *dsmap);
 extern vm_fault_t gru_fault(struct vm_fault *vmf);
 extern struct gru_mm_struct *gru_register_mmu_notifier(void);
 extern void gru_drop_mmu_notifier(struct gru_mm_struct *gms);
index 54cd009..db6d8a0 100644 (file)
@@ -134,6 +134,7 @@ struct mmc_blk_data {
         * track of the current selected device partition.
         */
        unsigned int    part_curr;
+#define MMC_BLK_PART_INVALID   UINT_MAX        /* Unknown partition active */
        int     area_type;
 
        /* debugfs files (only in main mmc_blk_data) */
@@ -987,33 +988,39 @@ static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host,
        return ms;
 }
 
+/*
+ * Attempts to reset the card and get back to the requested partition.
+ * Therefore any error here must result in cancelling the block layer
+ * request, it must not be reattempted without going through the mmc_blk
+ * partition sanity checks.
+ */
 static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
                         int type)
 {
        int err;
+       struct mmc_blk_data *main_md = dev_get_drvdata(&host->card->dev);
 
        if (md->reset_done & type)
                return -EEXIST;
 
        md->reset_done |= type;
        err = mmc_hw_reset(host->card);
+       /*
+        * A successful reset will leave the card in the main partition, but
+        * upon failure it might not be, so set it to MMC_BLK_PART_INVALID
+        * in that case.
+        */
+       main_md->part_curr = err ? MMC_BLK_PART_INVALID : main_md->part_type;
+       if (err)
+               return err;
        /* Ensure we switch back to the correct partition */
-       if (err) {
-               struct mmc_blk_data *main_md =
-                       dev_get_drvdata(&host->card->dev);
-               int part_err;
-
-               main_md->part_curr = main_md->part_type;
-               part_err = mmc_blk_part_switch(host->card, md->part_type);
-               if (part_err) {
-                       /*
-                        * We have failed to get back into the correct
-                        * partition, so we need to abort the whole request.
-                        */
-                       return -ENODEV;
-               }
-       }
-       return err;
+       if (mmc_blk_part_switch(host->card, md->part_type))
+               /*
+                * We have failed to get back into the correct
+                * partition, so we need to abort the whole request.
+                */
+               return -ENODEV;
+       return 0;
 }
 
 static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
@@ -1871,8 +1878,9 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
                return;
 
        /* Reset before last retry */
-       if (mqrq->retries + 1 == MMC_MAX_RETRIES)
-               mmc_blk_reset(md, card->host, type);
+       if (mqrq->retries + 1 == MMC_MAX_RETRIES &&
+           mmc_blk_reset(md, card->host, type))
+               return;
 
        /* Command errors fail fast, so use all MMC_MAX_RETRIES */
        if (brq->sbc.error || brq->cmd.error)
index fefaa90..b396e39 100644 (file)
@@ -48,6 +48,7 @@ static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host,
        case REQ_OP_DRV_OUT:
        case REQ_OP_DISCARD:
        case REQ_OP_SECURE_ERASE:
+       case REQ_OP_WRITE_ZEROES:
                return MMC_ISSUE_SYNC;
        case REQ_OP_FLUSH:
                return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
@@ -493,6 +494,13 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
        if (blk_queue_quiesced(q))
                blk_mq_unquiesce_queue(q);
 
+       /*
+        * If the recovery completes the last (and only remaining) request in
+        * the queue, and the card has been removed, we could end up here with
+        * the recovery not quite finished yet, so cancel it.
+        */
+       cancel_work_sync(&mq->recovery_work);
+
        blk_mq_free_tag_set(&mq->tag_set);
 
        /*
index c6268c3..babf21a 100644 (file)
@@ -291,7 +291,8 @@ static void sdio_release_func(struct device *dev)
 {
        struct sdio_func *func = dev_to_sdio_func(dev);
 
-       sdio_free_func_cis(func);
+       if (!(func->card->quirks & MMC_QUIRK_NONSTD_SDIO))
+               sdio_free_func_cis(func);
 
        kfree(func->info);
        kfree(func->tmpbuf);
index f324daa..fb1062a 100644 (file)
@@ -1075,9 +1075,10 @@ config MMC_SDHCI_OMAP
 
 config MMC_SDHCI_AM654
        tristate "Support for the SDHCI Controller in TI's AM654 SOCs"
-       depends on MMC_SDHCI_PLTFM && OF && REGMAP_MMIO
+       depends on MMC_SDHCI_PLTFM && OF
        select MMC_SDHCI_IO_ACCESSORS
        select MMC_CQHCI
+       select REGMAP_MMIO
        help
          This selects the Secure Digital Host Controller Interface (SDHCI)
          support present in TI's AM654 SOCs. The controller supports
index 55981b0..747df79 100644 (file)
@@ -1660,6 +1660,10 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
                host->mmc_host_ops.execute_tuning = usdhc_execute_tuning;
        }
 
+       err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
+       if (err)
+               goto disable_ahb_clk;
+
        if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
                sdhci_esdhc_ops.platform_execute_tuning =
                                        esdhc_executing_tuning;
@@ -1667,13 +1671,15 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
        if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
                host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
 
-       if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
+       if (host->caps & MMC_CAP_8_BIT_DATA &&
+           imx_data->socdata->flags & ESDHC_FLAG_HS400)
                host->mmc->caps2 |= MMC_CAP2_HS400;
 
        if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23)
                host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN;
 
-       if (imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
+       if (host->caps & MMC_CAP_8_BIT_DATA &&
+           imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
                host->mmc->caps2 |= MMC_CAP2_HS400_ES;
                host->mmc_host_ops.hs400_enhanced_strobe =
                                        esdhc_hs400_enhanced_strobe;
@@ -1695,10 +1701,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
                        goto disable_ahb_clk;
        }
 
-       err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
-       if (err)
-               goto disable_ahb_clk;
-
        sdhci_esdhc_imx_hwinit(host);
 
        err = sdhci_add_host(host);
index 169b847..34ea1ac 100644 (file)
@@ -914,6 +914,12 @@ static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
                dmi_match(DMI_SYS_VENDOR, "IRBIS"));
 }
 
+static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot)
+{
+       return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_JSL_EMMC &&
+                       dmi_match(DMI_BIOS_VENDOR, "ASUSTeK COMPUTER INC.");
+}
+
 static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
 {
        int ret = byt_emmc_probe_slot(slot);
@@ -922,9 +928,11 @@ static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
                slot->host->mmc->caps2 |= MMC_CAP2_CQE;
 
        if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
-               slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES;
-               slot->host->mmc_host_ops.hs400_enhanced_strobe =
-                                               intel_hs400_enhanced_strobe;
+               if (!jsl_broken_hs400es(slot)) {
+                       slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES;
+                       slot->host->mmc_host_ops.hs400_enhanced_strobe =
+                                                       intel_hs400_enhanced_strobe;
+               }
                slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
        }
 
index 18aa544..0b4ca0a 100644 (file)
@@ -562,7 +562,7 @@ static void mtd_check_of_node(struct mtd_info *mtd)
        if (!mtd_is_partition(mtd))
                return;
        parent = mtd->parent;
-       parent_dn = dev_of_node(&parent->dev);
+       parent_dn = of_node_get(dev_of_node(&parent->dev));
        if (!parent_dn)
                return;
 
index d4a0987..6f4cea8 100644 (file)
@@ -608,11 +608,12 @@ static int ebu_nand_probe(struct platform_device *pdev)
        ret = of_property_read_u32(chip_np, "reg", &cs);
        if (ret) {
                dev_err(dev, "failed to get chip select: %d\n", ret);
-               return ret;
+               goto err_of_node_put;
        }
        if (cs >= MAX_CS) {
                dev_err(dev, "got invalid chip select: %d\n", cs);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_of_node_put;
        }
 
        ebu_host->cs_num = cs;
@@ -620,18 +621,22 @@ static int ebu_nand_probe(struct platform_device *pdev)
        resname = devm_kasprintf(dev, GFP_KERNEL, "nand_cs%d", cs);
        ebu_host->cs[cs].chipaddr = devm_platform_ioremap_resource_byname(pdev,
                                                                          resname);
-       if (IS_ERR(ebu_host->cs[cs].chipaddr))
-               return PTR_ERR(ebu_host->cs[cs].chipaddr);
+       if (IS_ERR(ebu_host->cs[cs].chipaddr)) {
+               ret = PTR_ERR(ebu_host->cs[cs].chipaddr);
+               goto err_of_node_put;
+       }
 
        ebu_host->clk = devm_clk_get(dev, NULL);
-       if (IS_ERR(ebu_host->clk))
-               return dev_err_probe(dev, PTR_ERR(ebu_host->clk),
-                                    "failed to get clock\n");
+       if (IS_ERR(ebu_host->clk)) {
+               ret = dev_err_probe(dev, PTR_ERR(ebu_host->clk),
+                                   "failed to get clock\n");
+               goto err_of_node_put;
+       }
 
        ret = clk_prepare_enable(ebu_host->clk);
        if (ret) {
                dev_err(dev, "failed to enable clock: %d\n", ret);
-               return ret;
+               goto err_of_node_put;
        }
 
        ebu_host->dma_tx = dma_request_chan(dev, "tx");
@@ -695,6 +700,8 @@ err_cleanup_dma:
        ebu_dma_cleanup(ebu_host);
 err_disable_unprepare_clk:
        clk_disable_unprepare(ebu_host->clk);
+err_of_node_put:
+       of_node_put(chip_np);
 
        return ret;
 }
index d9f2f1d..b9d1e96 100644 (file)
@@ -2678,7 +2678,7 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
        chip->controller = &nfc->controller;
        nand_set_flash_node(chip, np);
 
-       if (!of_property_read_bool(np, "marvell,nand-keep-config"))
+       if (of_property_read_bool(np, "marvell,nand-keep-config"))
                chip->options |= NAND_KEEP_TIMINGS;
 
        mtd = nand_to_mtd(chip);
index e12f9f5..a9b9031 100644 (file)
@@ -1181,7 +1181,7 @@ static int tegra_nand_probe(struct platform_device *pdev)
        pm_runtime_enable(&pdev->dev);
        err = pm_runtime_resume_and_get(&pdev->dev);
        if (err)
-               return err;
+               goto err_dis_pm;
 
        err = reset_control_reset(rst);
        if (err) {
@@ -1215,6 +1215,8 @@ static int tegra_nand_probe(struct platform_device *pdev)
 err_put_pm:
        pm_runtime_put_sync_suspend(ctrl->dev);
        pm_runtime_force_suspend(ctrl->dev);
+err_dis_pm:
+       pm_runtime_disable(&pdev->dev);
        return err;
 }
 
index 50fcf4c..13daf9b 100644 (file)
@@ -233,11 +233,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
                }
 
                /* Read middle of the block */
-               err = mtd_read(master, offset + 0x8000, 0x4, &bytes_read,
+               err = mtd_read(master, offset + (blocksize / 2), 0x4, &bytes_read,
                               (uint8_t *)buf);
                if (err && !mtd_is_bitflip(err)) {
                        pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
-                              offset + 0x8000, err);
+                              offset + (blocksize / 2), err);
                        continue;
                }
 
index f2c6400..bee8fc4 100644 (file)
@@ -2724,7 +2724,9 @@ static int spi_nor_init(struct spi_nor *nor)
                 */
                WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
                          "enabling reset hack; may not recover from unexpected reboots\n");
-               return nor->params->set_4byte_addr_mode(nor, true);
+               err = nor->params->set_4byte_addr_mode(nor, true);
+               if (err && err != -ENOTSUPP)
+                       return err;
        }
 
        return 0;
index c469b2f..b0ed798 100644 (file)
@@ -322,14 +322,14 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
                                               &mscan_clksrc);
        if (!priv->can.clock.freq) {
                dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n");
-               goto exit_free_mscan;
+               goto exit_put_clock;
        }
 
        err = register_mscandev(dev, mscan_clksrc);
        if (err) {
                dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
                        DRV_NAME, err);
-               goto exit_free_mscan;
+               goto exit_put_clock;
        }
 
        dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n",
@@ -337,7 +337,9 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
 
        return 0;
 
-exit_free_mscan:
+exit_put_clock:
+       if (data->put_clock)
+               data->put_clock(ofdev);
        free_candev(dev);
 exit_dispose_irq:
        irq_dispose_mapping(irq);
index 567620d..198da64 100644 (file)
@@ -1157,11 +1157,13 @@ static void rcar_canfd_handle_global_receive(struct rcar_canfd_global *gpriv, u3
 {
        struct rcar_canfd_channel *priv = gpriv->ch[ch];
        u32 ridx = ch + RCANFD_RFFIFO_IDX;
-       u32 sts;
+       u32 sts, cc;
 
        /* Handle Rx interrupts */
        sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx));
-       if (likely(sts & RCANFD_RFSTS_RFIF)) {
+       cc = rcar_canfd_read(priv->base, RCANFD_RFCC(gpriv, ridx));
+       if (likely(sts & RCANFD_RFSTS_RFIF &&
+                  cc & RCANFD_RFCC_RFIE)) {
                if (napi_schedule_prep(&priv->napi)) {
                        /* Disable Rx FIFO interrupts */
                        rcar_canfd_clear_bit(priv->base,
@@ -1244,11 +1246,9 @@ static void rcar_canfd_handle_channel_tx(struct rcar_canfd_global *gpriv, u32 ch
 
 static irqreturn_t rcar_canfd_channel_tx_interrupt(int irq, void *dev_id)
 {
-       struct rcar_canfd_global *gpriv = dev_id;
-       u32 ch;
+       struct rcar_canfd_channel *priv = dev_id;
 
-       for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels)
-               rcar_canfd_handle_channel_tx(gpriv, ch);
+       rcar_canfd_handle_channel_tx(priv->gpriv, priv->channel);
 
        return IRQ_HANDLED;
 }
@@ -1276,11 +1276,9 @@ static void rcar_canfd_handle_channel_err(struct rcar_canfd_global *gpriv, u32 c
 
 static irqreturn_t rcar_canfd_channel_err_interrupt(int irq, void *dev_id)
 {
-       struct rcar_canfd_global *gpriv = dev_id;
-       u32 ch;
+       struct rcar_canfd_channel *priv = dev_id;
 
-       for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels)
-               rcar_canfd_handle_channel_err(gpriv, ch);
+       rcar_canfd_handle_channel_err(priv->gpriv, priv->channel);
 
        return IRQ_HANDLED;
 }
@@ -1721,6 +1719,7 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
        priv->ndev = ndev;
        priv->base = gpriv->base;
        priv->channel = ch;
+       priv->gpriv = gpriv;
        priv->can.clock.freq = fcan_freq;
        dev_info(&pdev->dev, "can_clk rate is %u\n", priv->can.clock.freq);
 
@@ -1749,7 +1748,7 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
                }
                err = devm_request_irq(&pdev->dev, err_irq,
                                       rcar_canfd_channel_err_interrupt, 0,
-                                      irq_name, gpriv);
+                                      irq_name, priv);
                if (err) {
                        dev_err(&pdev->dev, "devm_request_irq CH Err(%d) failed, error %d\n",
                                err_irq, err);
@@ -1763,7 +1762,7 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
                }
                err = devm_request_irq(&pdev->dev, tx_irq,
                                       rcar_canfd_channel_tx_interrupt, 0,
-                                      irq_name, gpriv);
+                                      irq_name, priv);
                if (err) {
                        dev_err(&pdev->dev, "devm_request_irq Tx (%d) failed, error %d\n",
                                tx_irq, err);
@@ -1789,7 +1788,6 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
 
        priv->can.do_set_mode = rcar_canfd_do_set_mode;
        priv->can.do_get_berr_counter = rcar_canfd_get_berr_counter;
-       priv->gpriv = gpriv;
        SET_NETDEV_DEV(ndev, &pdev->dev);
 
        netif_napi_add_weight(ndev, &priv->napi, rcar_canfd_rx_poll,
index c320de4..24883a6 100644 (file)
@@ -1415,11 +1415,14 @@ static int mcp251x_can_probe(struct spi_device *spi)
 
        ret = mcp251x_gpio_setup(priv);
        if (ret)
-               goto error_probe;
+               goto out_unregister_candev;
 
        netdev_info(net, "MCP%x successfully initialized.\n", priv->model);
        return 0;
 
+out_unregister_candev:
+       unregister_candev(net);
+
 error_probe:
        destroy_workqueue(priv->wq);
        priv->wq = NULL;
index 7b52fda..66f672e 100644 (file)
@@ -1875,7 +1875,7 @@ static int kvaser_usb_hydra_start_chip(struct kvaser_usb_net_priv *priv)
 {
        int err;
 
-       init_completion(&priv->start_comp);
+       reinit_completion(&priv->start_comp);
 
        err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_START_CHIP_REQ,
                                               priv->channel);
@@ -1893,7 +1893,7 @@ static int kvaser_usb_hydra_stop_chip(struct kvaser_usb_net_priv *priv)
 {
        int err;
 
-       init_completion(&priv->stop_comp);
+       reinit_completion(&priv->stop_comp);
 
        /* Make sure we do not report invalid BUS_OFF from CMD_CHIP_STATE_EVENT
         * see comment in kvaser_usb_hydra_update_state()
index 50f2ac8..1995803 100644 (file)
@@ -1320,7 +1320,7 @@ static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv)
 {
        int err;
 
-       init_completion(&priv->start_comp);
+       reinit_completion(&priv->start_comp);
 
        err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_START_CHIP,
                                              priv->channel);
@@ -1338,7 +1338,7 @@ static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv)
 {
        int err;
 
-       init_completion(&priv->stop_comp);
+       reinit_completion(&priv->stop_comp);
 
        err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP,
                                              priv->channel);
index 5669c92..c5c3b4e 100644 (file)
@@ -137,27 +137,42 @@ static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
        struct qca8k_mgmt_eth_data *mgmt_eth_data;
        struct qca8k_priv *priv = ds->priv;
        struct qca_mgmt_ethhdr *mgmt_ethhdr;
+       u32 command;
        u8 len, cmd;
+       int i;
 
        mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
        mgmt_eth_data = &priv->mgmt_eth_data;
 
-       cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command);
-       len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command);
+       command = get_unaligned_le32(&mgmt_ethhdr->command);
+       cmd = FIELD_GET(QCA_HDR_MGMT_CMD, command);
+       len = FIELD_GET(QCA_HDR_MGMT_LENGTH, command);
 
        /* Make sure the seq match the requested packet */
-       if (mgmt_ethhdr->seq == mgmt_eth_data->seq)
+       if (get_unaligned_le32(&mgmt_ethhdr->seq) == mgmt_eth_data->seq)
                mgmt_eth_data->ack = true;
 
        if (cmd == MDIO_READ) {
-               mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data;
+               u32 *val = mgmt_eth_data->data;
+
+               *val = get_unaligned_le32(&mgmt_ethhdr->mdio_data);
 
                /* Get the rest of the 12 byte of data.
                 * The read/write function will extract the requested data.
                 */
-               if (len > QCA_HDR_MGMT_DATA1_LEN)
-                       memcpy(mgmt_eth_data->data + 1, skb->data,
-                              QCA_HDR_MGMT_DATA2_LEN);
+               if (len > QCA_HDR_MGMT_DATA1_LEN) {
+                       __le32 *data2 = (__le32 *)skb->data;
+                       int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
+                                            len - QCA_HDR_MGMT_DATA1_LEN);
+
+                       val++;
+
+                       for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
+                               *val = get_unaligned_le32(data2);
+                               val++;
+                               data2++;
+                       }
+               }
        }
 
        complete(&mgmt_eth_data->rw_done);
@@ -169,8 +184,10 @@ static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *
        struct qca_mgmt_ethhdr *mgmt_ethhdr;
        unsigned int real_len;
        struct sk_buff *skb;
-       u32 *data2;
+       __le32 *data2;
+       u32 command;
        u16 hdr;
+       int i;
 
        skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
        if (!skb)
@@ -199,20 +216,32 @@ static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *
        hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
        hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
 
-       mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
-       mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
-       mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
-       mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
+       command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
+       command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
+       command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
+       command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
                                           QCA_HDR_MGMT_CHECK_CODE_VAL);
 
+       put_unaligned_le32(command, &mgmt_ethhdr->command);
+
        if (cmd == MDIO_WRITE)
-               mgmt_ethhdr->mdio_data = *val;
+               put_unaligned_le32(*val, &mgmt_ethhdr->mdio_data);
 
        mgmt_ethhdr->hdr = htons(hdr);
 
        data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
-       if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN)
-               memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN);
+       if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) {
+               int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
+                                    len - QCA_HDR_MGMT_DATA1_LEN);
+
+               val++;
+
+               for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
+                       put_unaligned_le32(*val, data2);
+                       data2++;
+                       val++;
+               }
+       }
 
        return skb;
 }
@@ -220,9 +249,11 @@ static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *
 static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
 {
        struct qca_mgmt_ethhdr *mgmt_ethhdr;
+       u32 seq;
 
+       seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
        mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
-       mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
+       put_unaligned_le32(seq, &mgmt_ethhdr->seq);
 }
 
 static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
@@ -1487,9 +1518,9 @@ static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *sk
        struct qca8k_priv *priv = ds->priv;
        const struct qca8k_mib_desc *mib;
        struct mib_ethhdr *mib_ethhdr;
-       int i, mib_len, offset = 0;
-       u64 *data;
+       __le32 *data2;
        u8 port;
+       int i;
 
        mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
        mib_eth_data = &priv->mib_eth_data;
@@ -1501,28 +1532,24 @@ static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *sk
        if (port != mib_eth_data->req_port)
                goto exit;
 
-       data = mib_eth_data->data;
+       data2 = (__le32 *)skb->data;
 
        for (i = 0; i < priv->info->mib_count; i++) {
                mib = &ar8327_mib[i];
 
                /* First 3 mib are present in the skb head */
                if (i < 3) {
-                       data[i] = mib_ethhdr->data[i];
+                       mib_eth_data->data[i] = get_unaligned_le32(mib_ethhdr->data + i);
                        continue;
                }
 
-               mib_len = sizeof(uint32_t);
-
                /* Some mib are 64 bit wide */
                if (mib->size == 2)
-                       mib_len = sizeof(uint64_t);
-
-               /* Copy the mib value from packet to the */
-               memcpy(data + i, skb->data + offset, mib_len);
+                       mib_eth_data->data[i] = get_unaligned_le64((__le64 *)data2);
+               else
+                       mib_eth_data->data[i] = get_unaligned_le32(data2);
 
-               /* Set the offset for the next mib */
-               offset += mib_len;
+               data2 += mib->size;
        }
 
 exit:
index 2af3da4..f409d7b 100644 (file)
@@ -285,6 +285,9 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
                /* Yellow Carp devices do not need cdr workaround */
                pdata->vdata->an_cdr_workaround = 0;
+
+               /* Yellow Carp devices do not need rrc */
+               pdata->vdata->enable_rrc = 0;
        } else {
                pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
                pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
@@ -483,6 +486,7 @@ static struct xgbe_version_data xgbe_v2a = {
        .tx_desc_prefetch               = 5,
        .rx_desc_prefetch               = 5,
        .an_cdr_workaround              = 1,
+       .enable_rrc                     = 1,
 };
 
 static struct xgbe_version_data xgbe_v2b = {
@@ -498,6 +502,7 @@ static struct xgbe_version_data xgbe_v2b = {
        .tx_desc_prefetch               = 5,
        .rx_desc_prefetch               = 5,
        .an_cdr_workaround              = 1,
+       .enable_rrc                     = 1,
 };
 
 static const struct pci_device_id xgbe_pci_table[] = {
index 2156600..4064c3e 100644 (file)
@@ -239,6 +239,7 @@ enum xgbe_sfp_speed {
 #define XGBE_SFP_BASE_BR_1GBE_MAX              0x0d
 #define XGBE_SFP_BASE_BR_10GBE_MIN             0x64
 #define XGBE_SFP_BASE_BR_10GBE_MAX             0x68
+#define XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX       0x78
 
 #define XGBE_SFP_BASE_CU_CABLE_LEN             18
 
@@ -284,6 +285,8 @@ struct xgbe_sfp_eeprom {
 #define XGBE_BEL_FUSE_VENDOR   "BEL-FUSE        "
 #define XGBE_BEL_FUSE_PARTNO   "1GBT-SFP06      "
 
+#define XGBE_MOLEX_VENDOR      "Molex Inc.      "
+
 struct xgbe_sfp_ascii {
        union {
                char vendor[XGBE_SFP_BASE_VENDOR_NAME_LEN + 1];
@@ -834,7 +837,11 @@ static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom,
                break;
        case XGBE_SFP_SPEED_10000:
                min = XGBE_SFP_BASE_BR_10GBE_MIN;
-               max = XGBE_SFP_BASE_BR_10GBE_MAX;
+               if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME],
+                          XGBE_MOLEX_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN) == 0)
+                       max = XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX;
+               else
+                       max = XGBE_SFP_BASE_BR_10GBE_MAX;
                break;
        default:
                return false;
@@ -1151,7 +1158,10 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
        }
 
        /* Determine the type of SFP */
-       if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)
+       if (phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE &&
+           xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000))
+               phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;
+       else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)
                phy_data->sfp_base = XGBE_SFP_BASE_10000_SR;
        else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LR)
                phy_data->sfp_base = XGBE_SFP_BASE_10000_LR;
@@ -1167,9 +1177,6 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
                phy_data->sfp_base = XGBE_SFP_BASE_1000_CX;
        else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_T)
                phy_data->sfp_base = XGBE_SFP_BASE_1000_T;
-       else if ((phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE) &&
-                xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000))
-               phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;
 
        switch (phy_data->sfp_base) {
        case XGBE_SFP_BASE_1000_T:
@@ -1979,6 +1986,10 @@ static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata)
 
 static void xgbe_phy_pll_ctrl(struct xgbe_prv_data *pdata, bool enable)
 {
+       /* PLL_CTRL feature needs to be enabled for fixed PHY modes (Non-Autoneg) only */
+       if (pdata->phy.autoneg != AUTONEG_DISABLE)
+               return;
+
        XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_MISC_CTRL0,
                         XGBE_PMA_PLL_CTRL_MASK,
                         enable ? XGBE_PMA_PLL_CTRL_ENABLE
@@ -1989,7 +2000,7 @@ static void xgbe_phy_pll_ctrl(struct xgbe_prv_data *pdata, bool enable)
 }
 
 static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
-                                       unsigned int cmd, unsigned int sub_cmd)
+                                       enum xgbe_mb_cmd cmd, enum xgbe_mb_subcmd sub_cmd)
 {
        unsigned int s0 = 0;
        unsigned int wait;
@@ -2029,14 +2040,16 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
        xgbe_phy_rx_reset(pdata);
 
 reenable_pll:
-       /* Enable PLL re-initialization */
-       xgbe_phy_pll_ctrl(pdata, true);
+       /* Enable PLL re-initialization, not needed for PHY Power Off and RRC cmds */
+       if (cmd != XGBE_MB_CMD_POWER_OFF &&
+           cmd != XGBE_MB_CMD_RRC)
+               xgbe_phy_pll_ctrl(pdata, true);
 }
 
 static void xgbe_phy_rrc(struct xgbe_prv_data *pdata)
 {
        /* Receiver Reset Cycle */
-       xgbe_phy_perform_ratechange(pdata, 5, 0);
+       xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_RRC, XGBE_MB_SUBCMD_NONE);
 
        netif_dbg(pdata, link, pdata->netdev, "receiver reset complete\n");
 }
@@ -2046,7 +2059,7 @@ static void xgbe_phy_power_off(struct xgbe_prv_data *pdata)
        struct xgbe_phy_data *phy_data = pdata->phy_data;
 
        /* Power off */
-       xgbe_phy_perform_ratechange(pdata, 0, 0);
+       xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_POWER_OFF, XGBE_MB_SUBCMD_NONE);
 
        phy_data->cur_mode = XGBE_MODE_UNKNOWN;
 
@@ -2061,14 +2074,17 @@ static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
 
        /* 10G/SFI */
        if (phy_data->sfp_cable != XGBE_SFP_CABLE_PASSIVE) {
-               xgbe_phy_perform_ratechange(pdata, 3, 0);
+               xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI, XGBE_MB_SUBCMD_ACTIVE);
        } else {
                if (phy_data->sfp_cable_len <= 1)
-                       xgbe_phy_perform_ratechange(pdata, 3, 1);
+                       xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI,
+                                                   XGBE_MB_SUBCMD_PASSIVE_1M);
                else if (phy_data->sfp_cable_len <= 3)
-                       xgbe_phy_perform_ratechange(pdata, 3, 2);
+                       xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI,
+                                                   XGBE_MB_SUBCMD_PASSIVE_3M);
                else
-                       xgbe_phy_perform_ratechange(pdata, 3, 3);
+                       xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI,
+                                                   XGBE_MB_SUBCMD_PASSIVE_OTHER);
        }
 
        phy_data->cur_mode = XGBE_MODE_SFI;
@@ -2083,7 +2099,7 @@ static void xgbe_phy_x_mode(struct xgbe_prv_data *pdata)
        xgbe_phy_set_redrv_mode(pdata);
 
        /* 1G/X */
-       xgbe_phy_perform_ratechange(pdata, 1, 3);
+       xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_1G_KX);
 
        phy_data->cur_mode = XGBE_MODE_X;
 
@@ -2097,7 +2113,7 @@ static void xgbe_phy_sgmii_1000_mode(struct xgbe_prv_data *pdata)
        xgbe_phy_set_redrv_mode(pdata);
 
        /* 1G/SGMII */
-       xgbe_phy_perform_ratechange(pdata, 1, 2);
+       xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_1G_SGMII);
 
        phy_data->cur_mode = XGBE_MODE_SGMII_1000;
 
@@ -2111,7 +2127,7 @@ static void xgbe_phy_sgmii_100_mode(struct xgbe_prv_data *pdata)
        xgbe_phy_set_redrv_mode(pdata);
 
        /* 100M/SGMII */
-       xgbe_phy_perform_ratechange(pdata, 1, 1);
+       xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_100MBITS);
 
        phy_data->cur_mode = XGBE_MODE_SGMII_100;
 
@@ -2125,7 +2141,7 @@ static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
        xgbe_phy_set_redrv_mode(pdata);
 
        /* 10G/KR */
-       xgbe_phy_perform_ratechange(pdata, 4, 0);
+       xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_KR, XGBE_MB_SUBCMD_NONE);
 
        phy_data->cur_mode = XGBE_MODE_KR;
 
@@ -2139,7 +2155,7 @@ static void xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata)
        xgbe_phy_set_redrv_mode(pdata);
 
        /* 2.5G/KX */
-       xgbe_phy_perform_ratechange(pdata, 2, 0);
+       xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_2_5G, XGBE_MB_SUBCMD_NONE);
 
        phy_data->cur_mode = XGBE_MODE_KX_2500;
 
@@ -2153,7 +2169,7 @@ static void xgbe_phy_kx_1000_mode(struct xgbe_prv_data *pdata)
        xgbe_phy_set_redrv_mode(pdata);
 
        /* 1G/KX */
-       xgbe_phy_perform_ratechange(pdata, 1, 3);
+       xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_1G_KX);
 
        phy_data->cur_mode = XGBE_MODE_KX_1000;
 
@@ -2640,7 +2656,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
        }
 
        /* No link, attempt a receiver reset cycle */
-       if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
+       if (pdata->vdata->enable_rrc && phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
                phy_data->rrc_count = 0;
                xgbe_phy_rrc(pdata);
        }
index b875c43..71f24cb 100644 (file)
@@ -611,6 +611,31 @@ enum xgbe_mdio_mode {
        XGBE_MDIO_MODE_CL45,
 };
 
+enum xgbe_mb_cmd {
+       XGBE_MB_CMD_POWER_OFF = 0,
+       XGBE_MB_CMD_SET_1G,
+       XGBE_MB_CMD_SET_2_5G,
+       XGBE_MB_CMD_SET_10G_SFI,
+       XGBE_MB_CMD_SET_10G_KR,
+       XGBE_MB_CMD_RRC
+};
+
+enum xgbe_mb_subcmd {
+       XGBE_MB_SUBCMD_NONE = 0,
+
+       /* 10GbE SFP subcommands */
+       XGBE_MB_SUBCMD_ACTIVE = 0,
+       XGBE_MB_SUBCMD_PASSIVE_1M,
+       XGBE_MB_SUBCMD_PASSIVE_3M,
+       XGBE_MB_SUBCMD_PASSIVE_OTHER,
+
+       /* 1GbE Mode subcommands */
+       XGBE_MB_SUBCMD_10MBITS = 0,
+       XGBE_MB_SUBCMD_100MBITS,
+       XGBE_MB_SUBCMD_1G_SGMII,
+       XGBE_MB_SUBCMD_1G_KX
+};
+
 struct xgbe_phy {
        struct ethtool_link_ksettings lks;
 
@@ -1013,6 +1038,7 @@ struct xgbe_version_data {
        unsigned int tx_desc_prefetch;
        unsigned int rx_desc_prefetch;
        unsigned int an_cdr_workaround;
+       unsigned int enable_rrc;
 };
 
 struct xgbe_prv_data {
index 3d0e167..a018081 100644 (file)
@@ -1394,26 +1394,57 @@ static void aq_check_txsa_expiration(struct aq_nic_s *nic)
                        egress_sa_threshold_expired);
 }
 
+#define AQ_LOCKED_MDO_DEF(mdo)                                         \
+static int aq_locked_mdo_##mdo(struct macsec_context *ctx)             \
+{                                                                      \
+       struct aq_nic_s *nic = netdev_priv(ctx->netdev);                \
+       int ret;                                                        \
+       mutex_lock(&nic->macsec_mutex);                                 \
+       ret = aq_mdo_##mdo(ctx);                                        \
+       mutex_unlock(&nic->macsec_mutex);                               \
+       return ret;                                                     \
+}
+
+AQ_LOCKED_MDO_DEF(dev_open)
+AQ_LOCKED_MDO_DEF(dev_stop)
+AQ_LOCKED_MDO_DEF(add_secy)
+AQ_LOCKED_MDO_DEF(upd_secy)
+AQ_LOCKED_MDO_DEF(del_secy)
+AQ_LOCKED_MDO_DEF(add_rxsc)
+AQ_LOCKED_MDO_DEF(upd_rxsc)
+AQ_LOCKED_MDO_DEF(del_rxsc)
+AQ_LOCKED_MDO_DEF(add_rxsa)
+AQ_LOCKED_MDO_DEF(upd_rxsa)
+AQ_LOCKED_MDO_DEF(del_rxsa)
+AQ_LOCKED_MDO_DEF(add_txsa)
+AQ_LOCKED_MDO_DEF(upd_txsa)
+AQ_LOCKED_MDO_DEF(del_txsa)
+AQ_LOCKED_MDO_DEF(get_dev_stats)
+AQ_LOCKED_MDO_DEF(get_tx_sc_stats)
+AQ_LOCKED_MDO_DEF(get_tx_sa_stats)
+AQ_LOCKED_MDO_DEF(get_rx_sc_stats)
+AQ_LOCKED_MDO_DEF(get_rx_sa_stats)
+
 const struct macsec_ops aq_macsec_ops = {
-       .mdo_dev_open = aq_mdo_dev_open,
-       .mdo_dev_stop = aq_mdo_dev_stop,
-       .mdo_add_secy = aq_mdo_add_secy,
-       .mdo_upd_secy = aq_mdo_upd_secy,
-       .mdo_del_secy = aq_mdo_del_secy,
-       .mdo_add_rxsc = aq_mdo_add_rxsc,
-       .mdo_upd_rxsc = aq_mdo_upd_rxsc,
-       .mdo_del_rxsc = aq_mdo_del_rxsc,
-       .mdo_add_rxsa = aq_mdo_add_rxsa,
-       .mdo_upd_rxsa = aq_mdo_upd_rxsa,
-       .mdo_del_rxsa = aq_mdo_del_rxsa,
-       .mdo_add_txsa = aq_mdo_add_txsa,
-       .mdo_upd_txsa = aq_mdo_upd_txsa,
-       .mdo_del_txsa = aq_mdo_del_txsa,
-       .mdo_get_dev_stats = aq_mdo_get_dev_stats,
-       .mdo_get_tx_sc_stats = aq_mdo_get_tx_sc_stats,
-       .mdo_get_tx_sa_stats = aq_mdo_get_tx_sa_stats,
-       .mdo_get_rx_sc_stats = aq_mdo_get_rx_sc_stats,
-       .mdo_get_rx_sa_stats = aq_mdo_get_rx_sa_stats,
+       .mdo_dev_open = aq_locked_mdo_dev_open,
+       .mdo_dev_stop = aq_locked_mdo_dev_stop,
+       .mdo_add_secy = aq_locked_mdo_add_secy,
+       .mdo_upd_secy = aq_locked_mdo_upd_secy,
+       .mdo_del_secy = aq_locked_mdo_del_secy,
+       .mdo_add_rxsc = aq_locked_mdo_add_rxsc,
+       .mdo_upd_rxsc = aq_locked_mdo_upd_rxsc,
+       .mdo_del_rxsc = aq_locked_mdo_del_rxsc,
+       .mdo_add_rxsa = aq_locked_mdo_add_rxsa,
+       .mdo_upd_rxsa = aq_locked_mdo_upd_rxsa,
+       .mdo_del_rxsa = aq_locked_mdo_del_rxsa,
+       .mdo_add_txsa = aq_locked_mdo_add_txsa,
+       .mdo_upd_txsa = aq_locked_mdo_upd_txsa,
+       .mdo_del_txsa = aq_locked_mdo_del_txsa,
+       .mdo_get_dev_stats = aq_locked_mdo_get_dev_stats,
+       .mdo_get_tx_sc_stats = aq_locked_mdo_get_tx_sc_stats,
+       .mdo_get_tx_sa_stats = aq_locked_mdo_get_tx_sa_stats,
+       .mdo_get_rx_sc_stats = aq_locked_mdo_get_rx_sc_stats,
+       .mdo_get_rx_sa_stats = aq_locked_mdo_get_rx_sa_stats,
 };
 
 int aq_macsec_init(struct aq_nic_s *nic)
@@ -1435,6 +1466,7 @@ int aq_macsec_init(struct aq_nic_s *nic)
 
        nic->ndev->features |= NETIF_F_HW_MACSEC;
        nic->ndev->macsec_ops = &aq_macsec_ops;
+       mutex_init(&nic->macsec_mutex);
 
        return 0;
 }
@@ -1458,7 +1490,7 @@ int aq_macsec_enable(struct aq_nic_s *nic)
        if (!nic->macsec_cfg)
                return 0;
 
-       rtnl_lock();
+       mutex_lock(&nic->macsec_mutex);
 
        if (nic->aq_fw_ops->send_macsec_req) {
                struct macsec_cfg_request cfg = { 0 };
@@ -1507,7 +1539,7 @@ int aq_macsec_enable(struct aq_nic_s *nic)
        ret = aq_apply_macsec_cfg(nic);
 
 unlock:
-       rtnl_unlock();
+       mutex_unlock(&nic->macsec_mutex);
        return ret;
 }
 
@@ -1519,9 +1551,9 @@ void aq_macsec_work(struct aq_nic_s *nic)
        if (!netif_carrier_ok(nic->ndev))
                return;
 
-       rtnl_lock();
+       mutex_lock(&nic->macsec_mutex);
        aq_check_txsa_expiration(nic);
-       rtnl_unlock();
+       mutex_unlock(&nic->macsec_mutex);
 }
 
 int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic)
@@ -1532,21 +1564,30 @@ int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic)
        if (!cfg)
                return 0;
 
+       mutex_lock(&nic->macsec_mutex);
+
        for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
                if (!test_bit(i, &cfg->rxsc_idx_busy))
                        continue;
                cnt += hweight_long(cfg->aq_rxsc[i].rx_sa_idx_busy);
        }
 
+       mutex_unlock(&nic->macsec_mutex);
        return cnt;
 }
 
 int aq_macsec_tx_sc_cnt(struct aq_nic_s *nic)
 {
+       int cnt;
+
        if (!nic->macsec_cfg)
                return 0;
 
-       return hweight_long(nic->macsec_cfg->txsc_idx_busy);
+       mutex_lock(&nic->macsec_mutex);
+       cnt = hweight_long(nic->macsec_cfg->txsc_idx_busy);
+       mutex_unlock(&nic->macsec_mutex);
+
+       return cnt;
 }
 
 int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic)
@@ -1557,12 +1598,15 @@ int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic)
        if (!cfg)
                return 0;
 
+       mutex_lock(&nic->macsec_mutex);
+
        for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
                if (!test_bit(i, &cfg->txsc_idx_busy))
                        continue;
                cnt += hweight_long(cfg->aq_txsc[i].tx_sa_idx_busy);
        }
 
+       mutex_unlock(&nic->macsec_mutex);
        return cnt;
 }
 
@@ -1634,6 +1678,8 @@ u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data)
        if (!cfg)
                return data;
 
+       mutex_lock(&nic->macsec_mutex);
+
        aq_macsec_update_stats(nic);
 
        common_stats = &cfg->stats;
@@ -1716,5 +1762,7 @@ u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data)
 
        data += i;
 
+       mutex_unlock(&nic->macsec_mutex);
+
        return data;
 }
index 935ba88..ad33f85 100644 (file)
@@ -157,6 +157,8 @@ struct aq_nic_s {
        struct mutex fwreq_mutex;
 #if IS_ENABLED(CONFIG_MACSEC)
        struct aq_macsec_cfg *macsec_cfg;
+       /* mutex to protect data in macsec_cfg */
+       struct mutex macsec_mutex;
 #endif
        /* PTP support */
        struct aq_ptp_s *aq_ptp;
index 93ccf54..a737b19 100644 (file)
@@ -561,8 +561,6 @@ static netdev_tx_t bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_devic
 
        if (++ring->write_idx == ring->length - 1)
                ring->write_idx = 0;
-       enet->netdev->stats.tx_bytes += skb->len;
-       enet->netdev->stats.tx_packets++;
 
        return NETDEV_TX_OK;
 }
@@ -635,6 +633,7 @@ static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight)
        struct bcm4908_enet_dma_ring_bd *buf_desc;
        struct bcm4908_enet_dma_ring_slot *slot;
        struct device *dev = enet->dev;
+       unsigned int bytes = 0;
        int handled = 0;
 
        while (handled < weight && tx_ring->read_idx != tx_ring->write_idx) {
@@ -645,12 +644,17 @@ static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight)
 
                dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);
                dev_kfree_skb(slot->skb);
-               if (++tx_ring->read_idx == tx_ring->length)
-                       tx_ring->read_idx = 0;
 
                handled++;
+               bytes += slot->len;
+
+               if (++tx_ring->read_idx == tx_ring->length)
+                       tx_ring->read_idx = 0;
        }
 
+       enet->netdev->stats.tx_packets += handled;
+       enet->netdev->stats.tx_bytes += bytes;
+
        if (handled < weight) {
                napi_complete_done(napi, handled);
                bcm4908_enet_dma_ring_intrs_on(enet, tx_ring);
index 867f14c..425d6cc 100644 (file)
@@ -1991,6 +1991,9 @@ static int bcm_sysport_open(struct net_device *dev)
                goto out_clk_disable;
        }
 
+       /* Indicate that the MAC is responsible for PHY PM */
+       phydev->mac_managed_pm = true;
+
        /* Reset house keeping link status */
        priv->old_duplex = -1;
        priv->old_link = -1;
index a36803e..8a6f788 100644 (file)
@@ -613,6 +613,7 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
 
 static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack)
 {
+       bool rc = false;
        u32 datalen;
        u16 index;
        u8 *buf;
@@ -632,20 +633,20 @@ static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack)
 
        if (bnxt_get_nvram_item(bp->dev, index, 0, datalen, buf)) {
                NL_SET_ERR_MSG_MOD(extack, "nvm test vpd read error");
-               goto err;
+               goto done;
        }
 
        if (bnxt_flash_nvram(bp->dev, BNX_DIR_TYPE_VPD, BNX_DIR_ORDINAL_FIRST,
                             BNX_DIR_EXT_NONE, 0, 0, buf, datalen)) {
                NL_SET_ERR_MSG_MOD(extack, "nvm test vpd write error");
-               goto err;
+               goto done;
        }
 
-       return true;
+       rc = true;
 
-err:
+done:
        kfree(buf);
-       return false;
+       return rc;
 }
 
 static bool bnxt_dl_selftest_check(struct devlink *dl, unsigned int id,
index 51c9fd6..4f63f1b 100644 (file)
@@ -806,6 +806,7 @@ static int macb_mii_probe(struct net_device *dev)
 
        bp->phylink_config.dev = &dev->dev;
        bp->phylink_config.type = PHYLINK_NETDEV;
+       bp->phylink_config.mac_managed_pm = true;
 
        if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
                bp->phylink_config.poll_fixed_state = true;
index 31cfa12..fc68a32 100644 (file)
@@ -221,8 +221,8 @@ static int dpaa_netdev_init(struct net_device *net_dev,
        net_dev->netdev_ops = dpaa_ops;
        mac_addr = mac_dev->addr;
 
-       net_dev->mem_start = (unsigned long)mac_dev->vaddr;
-       net_dev->mem_end = (unsigned long)mac_dev->vaddr_end;
+       net_dev->mem_start = (unsigned long)priv->mac_dev->res->start;
+       net_dev->mem_end = (unsigned long)priv->mac_dev->res->end;
 
        net_dev->min_mtu = ETH_MIN_MTU;
        net_dev->max_mtu = dpaa_get_max_mtu();
index 258eb6c..4fee74c 100644 (file)
@@ -18,7 +18,7 @@ static ssize_t dpaa_eth_show_addr(struct device *dev,
 
        if (mac_dev)
                return sprintf(buf, "%llx",
-                               (unsigned long long)mac_dev->vaddr);
+                               (unsigned long long)mac_dev->res->start);
        else
                return sprintf(buf, "none");
 }
index 54bc92f..f8c06c3 100644 (file)
@@ -2090,7 +2090,12 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
        else
                enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
 
+       /* Also prepare the consumer index in case page allocation never
+        * succeeds. In that case, hardware will never advance producer index
+        * to match consumer index, and will drop all frames.
+        */
        enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
+       enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, 1);
 
        /* enable Rx ints by setting pkt thr to 1 */
        enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1);
index 98d5cd3..28ef4d3 100644 (file)
@@ -2432,6 +2432,31 @@ static u32 fec_enet_register_offset[] = {
        IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
        IEEE_R_FDXFC, IEEE_R_OCTETS_OK
 };
+/* for i.MX6ul */
+static u32 fec_enet_register_offset_6ul[] = {
+       FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
+       FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
+       FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0,
+       FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH,
+       FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0,
+       FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
+       FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC,
+       RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
+       RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
+       RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
+       RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
+       RMON_T_P_GTE2048, RMON_T_OCTETS,
+       IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
+       IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
+       IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
+       RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
+       RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
+       RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
+       RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
+       RMON_R_P_GTE2048, RMON_R_OCTETS,
+       IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
+       IEEE_R_FDXFC, IEEE_R_OCTETS_OK
+};
 #else
 static __u32 fec_enet_register_version = 1;
 static u32 fec_enet_register_offset[] = {
@@ -2456,7 +2481,24 @@ static void fec_enet_get_regs(struct net_device *ndev,
        u32 *buf = (u32 *)regbuf;
        u32 i, off;
        int ret;
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+       defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+       defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
+       u32 *reg_list;
+       u32 reg_cnt;
 
+       if (!of_machine_is_compatible("fsl,imx6ul")) {
+               reg_list = fec_enet_register_offset;
+               reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
+       } else {
+               reg_list = fec_enet_register_offset_6ul;
+               reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul);
+       }
+#else
+       /* coldfire */
+       static u32 *reg_list = fec_enet_register_offset;
+       static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
+#endif
        ret = pm_runtime_resume_and_get(dev);
        if (ret < 0)
                return;
@@ -2465,8 +2507,8 @@ static void fec_enet_get_regs(struct net_device *ndev,
 
        memset(buf, 0, regs->len);
 
-       for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
-               off = fec_enet_register_offset[i];
+       for (i = 0; i < reg_cnt; i++) {
+               off = reg_list[i];
 
                if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
                    !(fep->quirks & FEC_QUIRK_HAS_FRREG))
index 7b7526f..65df308 100644 (file)
@@ -279,7 +279,6 @@ static int mac_probe(struct platform_device *_of_dev)
        struct device_node      *mac_node, *dev_node;
        struct mac_device       *mac_dev;
        struct platform_device  *of_dev;
-       struct resource         *res;
        struct mac_priv_s       *priv;
        struct fman_mac_params   params;
        u32                      val;
@@ -338,24 +337,25 @@ static int mac_probe(struct platform_device *_of_dev)
        of_node_put(dev_node);
 
        /* Get the address of the memory mapped registers */
-       res = platform_get_mem_or_io(_of_dev, 0);
-       if (!res) {
+       mac_dev->res = platform_get_mem_or_io(_of_dev, 0);
+       if (!mac_dev->res) {
                dev_err(dev, "could not get registers\n");
                return -EINVAL;
        }
 
-       err = devm_request_resource(dev, fman_get_mem_region(priv->fman), res);
+       err = devm_request_resource(dev, fman_get_mem_region(priv->fman),
+                                   mac_dev->res);
        if (err) {
                dev_err_probe(dev, err, "could not request resource\n");
                return err;
        }
 
-       mac_dev->vaddr = devm_ioremap(dev, res->start, resource_size(res));
+       mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start,
+                                     resource_size(mac_dev->res));
        if (!mac_dev->vaddr) {
                dev_err(dev, "devm_ioremap() failed\n");
                return -EIO;
        }
-       mac_dev->vaddr_end = mac_dev->vaddr + resource_size(res);
 
        if (!of_device_is_available(mac_node))
                return -ENODEV;
index b95d384..13b69ca 100644 (file)
@@ -20,8 +20,8 @@ struct mac_priv_s;
 
 struct mac_device {
        void __iomem            *vaddr;
-       void __iomem            *vaddr_end;
        struct device           *dev;
+       struct resource         *res;
        u8                       addr[ETH_ALEN];
        struct fman_port        *port[2];
        u32                      if_support;
index 00fafc0..430ecce 100644 (file)
@@ -419,8 +419,10 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
        hdev->cls_dev.release = hnae_release;
        (void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
        ret = device_register(&hdev->cls_dev);
-       if (ret)
+       if (ret) {
+               put_device(&hdev->cls_dev);
                return ret;
+       }
 
        __module_get(THIS_MODULE);
 
index 19eb839..061952c 100644 (file)
@@ -85,6 +85,7 @@ static int hinic_dbg_get_func_table(struct hinic_dev *nic_dev, int idx)
        struct tag_sml_funcfg_tbl *funcfg_table_elem;
        struct hinic_cmd_lt_rd *read_data;
        u16 out_size = sizeof(*read_data);
+       int ret = ~0;
        int err;
 
        read_data = kzalloc(sizeof(*read_data), GFP_KERNEL);
@@ -111,20 +112,25 @@ static int hinic_dbg_get_func_table(struct hinic_dev *nic_dev, int idx)
 
        switch (idx) {
        case VALID:
-               return funcfg_table_elem->dw0.bs.valid;
+               ret = funcfg_table_elem->dw0.bs.valid;
+               break;
        case RX_MODE:
-               return funcfg_table_elem->dw0.bs.nic_rx_mode;
+               ret = funcfg_table_elem->dw0.bs.nic_rx_mode;
+               break;
        case MTU:
-               return funcfg_table_elem->dw1.bs.mtu;
+               ret = funcfg_table_elem->dw1.bs.mtu;
+               break;
        case RQ_DEPTH:
-               return funcfg_table_elem->dw13.bs.cfg_rq_depth;
+               ret = funcfg_table_elem->dw13.bs.cfg_rq_depth;
+               break;
        case QUEUE_NUM:
-               return funcfg_table_elem->dw13.bs.cfg_q_num;
+               ret = funcfg_table_elem->dw13.bs.cfg_q_num;
+               break;
        }
 
        kfree(read_data);
 
-       return ~0;
+       return ret;
 }
 
 static ssize_t hinic_dbg_cmd_read(struct file *filp, char __user *buffer, size_t count,
index 78190e8..d39eec9 100644 (file)
@@ -924,7 +924,7 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
 
 err_set_cmdq_depth:
        hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ);
-
+       free_cmdq(&cmdqs->cmdq[HINIC_CMDQ_SYNC]);
 err_cmdq_ctxt:
        hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
                            HINIC_MAX_CMDQ_TYPES);
index 94f4705..2779528 100644 (file)
@@ -877,7 +877,7 @@ int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
        if (err)
                return -EINVAL;
 
-       interrupt_info->lli_credit_cnt = temp_info.lli_timer_cnt;
+       interrupt_info->lli_credit_cnt = temp_info.lli_credit_cnt;
        interrupt_info->lli_timer_cnt = temp_info.lli_timer_cnt;
 
        err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
index a5f08b9..f7e05b4 100644 (file)
@@ -1174,7 +1174,6 @@ int hinic_vf_func_init(struct hinic_hwdev *hwdev)
                        dev_err(&hwdev->hwif->pdev->dev,
                                "Failed to register VF, err: %d, status: 0x%x, out size: 0x%x\n",
                                err, register_info.status, out_size);
-                       hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
                        return -EIO;
                }
        } else {
index 294bdbb..b4aff59 100644 (file)
@@ -2900,6 +2900,7 @@ static struct device *ehea_register_port(struct ehea_port *port,
        ret = of_device_register(&port->ofdev);
        if (ret) {
                pr_err("failed to register device. ret=%d\n", ret);
+               put_device(&port->ofdev.dev);
                goto out;
        }
 
index 7e75706..4a6a6e4 100644 (file)
@@ -2183,9 +2183,6 @@ static int i40e_set_ringparam(struct net_device *netdev,
                        err = i40e_setup_rx_descriptors(&rx_rings[i]);
                        if (err)
                                goto rx_unwind;
-                       err = i40e_alloc_rx_bi(&rx_rings[i]);
-                       if (err)
-                               goto rx_unwind;
 
                        /* now allocate the Rx buffers to make sure the OS
                         * has enough memory, any failure here means abort
@@ -3188,10 +3185,17 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
 
                if (cmd->flow_type == TCP_V4_FLOW ||
                    cmd->flow_type == UDP_V4_FLOW) {
-                       if (i_set & I40E_L3_SRC_MASK)
-                               cmd->data |= RXH_IP_SRC;
-                       if (i_set & I40E_L3_DST_MASK)
-                               cmd->data |= RXH_IP_DST;
+                       if (hw->mac.type == I40E_MAC_X722) {
+                               if (i_set & I40E_X722_L3_SRC_MASK)
+                                       cmd->data |= RXH_IP_SRC;
+                               if (i_set & I40E_X722_L3_DST_MASK)
+                                       cmd->data |= RXH_IP_DST;
+                       } else {
+                               if (i_set & I40E_L3_SRC_MASK)
+                                       cmd->data |= RXH_IP_SRC;
+                               if (i_set & I40E_L3_DST_MASK)
+                                       cmd->data |= RXH_IP_DST;
+                       }
                } else if (cmd->flow_type == TCP_V6_FLOW ||
                          cmd->flow_type == UDP_V6_FLOW) {
                        if (i_set & I40E_L3_V6_SRC_MASK)
@@ -3549,12 +3553,15 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
 
 /**
  * i40e_get_rss_hash_bits - Read RSS Hash bits from register
+ * @hw: hw structure
  * @nfc: pointer to user request
  * @i_setc: bits currently set
  *
  * Returns value of bits to be set per user request
  **/
-static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
+static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw,
+                                 struct ethtool_rxnfc *nfc,
+                                 u64 i_setc)
 {
        u64 i_set = i_setc;
        u64 src_l3 = 0, dst_l3 = 0;
@@ -3573,8 +3580,13 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
                dst_l3 = I40E_L3_V6_DST_MASK;
        } else if (nfc->flow_type == TCP_V4_FLOW ||
                  nfc->flow_type == UDP_V4_FLOW) {
-               src_l3 = I40E_L3_SRC_MASK;
-               dst_l3 = I40E_L3_DST_MASK;
+               if (hw->mac.type == I40E_MAC_X722) {
+                       src_l3 = I40E_X722_L3_SRC_MASK;
+                       dst_l3 = I40E_X722_L3_DST_MASK;
+               } else {
+                       src_l3 = I40E_L3_SRC_MASK;
+                       dst_l3 = I40E_L3_DST_MASK;
+               }
        } else {
                /* Any other flow type are not supported here */
                return i_set;
@@ -3592,6 +3604,7 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
        return i_set;
 }
 
+#define FLOW_PCTYPES_SIZE 64
 /**
  * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
  * @pf: pointer to the physical function struct
@@ -3604,9 +3617,11 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        struct i40e_hw *hw = &pf->hw;
        u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
                   ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
-       u8 flow_pctype = 0;
+       DECLARE_BITMAP(flow_pctypes, FLOW_PCTYPES_SIZE);
        u64 i_set, i_setc;
 
+       bitmap_zero(flow_pctypes, FLOW_PCTYPES_SIZE);
+
        if (pf->flags & I40E_FLAG_MFP_ENABLED) {
                dev_err(&pf->pdev->dev,
                        "Change of RSS hash input set is not supported when MFP mode is enabled\n");
@@ -3622,36 +3637,35 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
 
        switch (nfc->flow_type) {
        case TCP_V4_FLOW:
-               flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+               set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes);
                if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
-                       hena |=
-                         BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+                       set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK,
+                               flow_pctypes);
                break;
        case TCP_V6_FLOW:
-               flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
-               if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
-                       hena |=
-                         BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+               set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes);
                if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
-                       hena |=
-                         BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
+                       set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK,
+                               flow_pctypes);
                break;
        case UDP_V4_FLOW:
-               flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
-               if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
-                       hena |=
-                         BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
-                         BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
-
+               set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes);
+               if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+                       set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP,
+                               flow_pctypes);
+                       set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP,
+                               flow_pctypes);
+               }
                hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
                break;
        case UDP_V6_FLOW:
-               flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
-               if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
-                       hena |=
-                         BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
-                         BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
-
+               set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes);
+               if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+                       set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP,
+                               flow_pctypes);
+                       set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP,
+                               flow_pctypes);
+               }
                hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
                break;
        case AH_ESP_V4_FLOW:
@@ -3684,17 +3698,20 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
                return -EINVAL;
        }
 
-       if (flow_pctype) {
-               i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0,
-                                              flow_pctype)) |
-                       ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1,
-                                              flow_pctype)) << 32);
-               i_set = i40e_get_rss_hash_bits(nfc, i_setc);
-               i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_pctype),
-                                 (u32)i_set);
-               i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_pctype),
-                                 (u32)(i_set >> 32));
-               hena |= BIT_ULL(flow_pctype);
+       if (bitmap_weight(flow_pctypes, FLOW_PCTYPES_SIZE)) {
+               u8 flow_id;
+
+               for_each_set_bit(flow_id, flow_pctypes, FLOW_PCTYPES_SIZE) {
+                       i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id)) |
+                                ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id)) << 32);
+                       i_set = i40e_get_rss_hash_bits(&pf->hw, nfc, i_setc);
+
+                       i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id),
+                                         (u32)i_set);
+                       i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id),
+                                         (u32)(i_set >> 32));
+                       hena |= BIT_ULL(flow_id);
+               }
        }
 
        i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
index 2c07fa8..b5dcd15 100644 (file)
@@ -3566,12 +3566,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
        if (ring->vsi->type == I40E_VSI_MAIN)
                xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
 
-       kfree(ring->rx_bi);
        ring->xsk_pool = i40e_xsk_pool(ring);
        if (ring->xsk_pool) {
-               ret = i40e_alloc_rx_bi_zc(ring);
-               if (ret)
-                       return ret;
                ring->rx_buf_len =
                  xsk_pool_get_rx_frame_size(ring->xsk_pool);
                /* For AF_XDP ZC, we disallow packets to span on
@@ -3589,9 +3585,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
                         ring->queue_index);
 
        } else {
-               ret = i40e_alloc_rx_bi(ring);
-               if (ret)
-                       return ret;
                ring->rx_buf_len = vsi->rx_buf_len;
                if (ring->vsi->type == I40E_VSI_MAIN) {
                        ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
@@ -13296,6 +13289,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
                i40e_reset_and_rebuild(pf, true, true);
        }
 
+       if (!i40e_enabled_xdp_vsi(vsi) && prog) {
+               if (i40e_realloc_rx_bi_zc(vsi, true))
+                       return -ENOMEM;
+       } else if (i40e_enabled_xdp_vsi(vsi) && !prog) {
+               if (i40e_realloc_rx_bi_zc(vsi, false))
+                       return -ENOMEM;
+       }
+
        for (i = 0; i < vsi->num_queue_pairs; i++)
                WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
 
@@ -13528,6 +13529,7 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
 
        i40e_queue_pair_disable_irq(vsi, queue_pair);
        err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
+       i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
        i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
        i40e_queue_pair_clean_rings(vsi, queue_pair);
        i40e_queue_pair_reset_stats(vsi, queue_pair);
index 69e67eb..b97c95f 100644 (file)
@@ -1457,14 +1457,6 @@ err:
        return -ENOMEM;
 }
 
-int i40e_alloc_rx_bi(struct i40e_ring *rx_ring)
-{
-       unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count;
-
-       rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL);
-       return rx_ring->rx_bi ? 0 : -ENOMEM;
-}
-
 static void i40e_clear_rx_bi(struct i40e_ring *rx_ring)
 {
        memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count);
@@ -1593,6 +1585,11 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
 
        rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
 
+       rx_ring->rx_bi =
+               kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL);
+       if (!rx_ring->rx_bi)
+               return -ENOMEM;
+
        return 0;
 }
 
index 41f86e9..768290d 100644 (file)
@@ -469,7 +469,6 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
 bool __i40e_chk_linearize(struct sk_buff *skb);
 int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
                  u32 flags);
-int i40e_alloc_rx_bi(struct i40e_ring *rx_ring);
 
 /**
  * i40e_get_head - Retrieve head from head writeback
index 7b3f30b..388c3d3 100644 (file)
@@ -1404,6 +1404,10 @@ struct i40e_lldp_variables {
 #define I40E_PFQF_CTL_0_HASHLUTSIZE_512        0x00010000
 
 /* INPUT SET MASK for RSS, flow director, and flexible payload */
+#define I40E_X722_L3_SRC_SHIFT         49
+#define I40E_X722_L3_SRC_MASK          (0x3ULL << I40E_X722_L3_SRC_SHIFT)
+#define I40E_X722_L3_DST_SHIFT         41
+#define I40E_X722_L3_DST_MASK          (0x3ULL << I40E_X722_L3_DST_SHIFT)
 #define I40E_L3_SRC_SHIFT              47
 #define I40E_L3_SRC_MASK               (0x3ULL << I40E_L3_SRC_SHIFT)
 #define I40E_L3_V6_SRC_SHIFT           43
index 7e9f6a6..72ddcef 100644 (file)
@@ -1536,10 +1536,12 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
        if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
                return true;
 
-       /* If the VFs have been disabled, this means something else is
-        * resetting the VF, so we shouldn't continue.
-        */
-       if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
+       /* Bail out if VFs are disabled. */
+       if (test_bit(__I40E_VF_DISABLE, pf->state))
+               return true;
+
+       /* If VF is being reset already we don't need to continue. */
+       if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
                return true;
 
        i40e_trigger_vf_reset(vf, flr);
@@ -1576,7 +1578,7 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
        i40e_cleanup_reset_vf(vf);
 
        i40e_flush(hw);
-       clear_bit(__I40E_VF_DISABLE, pf->state);
+       clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
 
        return true;
 }
@@ -1609,8 +1611,12 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
                return false;
 
        /* Begin reset on all VFs at once */
-       for (v = 0; v < pf->num_alloc_vfs; v++)
-               i40e_trigger_vf_reset(&pf->vf[v], flr);
+       for (v = 0; v < pf->num_alloc_vfs; v++) {
+               vf = &pf->vf[v];
+               /* If VF is being reset no need to trigger reset again */
+               if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+                       i40e_trigger_vf_reset(&pf->vf[v], flr);
+       }
 
        /* HW requires some time to make sure it can flush the FIFO for a VF
         * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
@@ -1626,9 +1632,11 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
                 */
                while (v < pf->num_alloc_vfs) {
                        vf = &pf->vf[v];
-                       reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
-                       if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
-                               break;
+                       if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
+                               reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+                               if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
+                                       break;
+                       }
 
                        /* If the current VF has finished resetting, move on
                         * to the next VF in sequence.
@@ -1656,6 +1664,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
                if (pf->vf[v].lan_vsi_idx == 0)
                        continue;
 
+               /* If VF is reset in another thread just continue */
+               if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+                       continue;
+
                i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
        }
 
@@ -1667,6 +1679,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
                if (pf->vf[v].lan_vsi_idx == 0)
                        continue;
 
+               /* If VF is reset in another thread just continue */
+               if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+                       continue;
+
                i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
        }
 
@@ -1676,8 +1692,13 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
        mdelay(50);
 
        /* Finish the reset on each VF */
-       for (v = 0; v < pf->num_alloc_vfs; v++)
+       for (v = 0; v < pf->num_alloc_vfs; v++) {
+               /* If VF is reset in another thread just continue */
+               if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+                       continue;
+
                i40e_cleanup_reset_vf(&pf->vf[v]);
+       }
 
        i40e_flush(hw);
        clear_bit(__I40E_VF_DISABLE, pf->state);
index a554d0a..358bbdb 100644 (file)
@@ -39,6 +39,7 @@ enum i40e_vf_states {
        I40E_VF_STATE_MC_PROMISC,
        I40E_VF_STATE_UC_PROMISC,
        I40E_VF_STATE_PRE_ENABLE,
+       I40E_VF_STATE_RESETTING
 };
 
 /* VF capabilities */
index 6d4009e..cd7b52f 100644 (file)
 #include "i40e_txrx_common.h"
 #include "i40e_xsk.h"
 
-int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring)
-{
-       unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count;
-
-       rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL);
-       return rx_ring->rx_bi_zc ? 0 : -ENOMEM;
-}
-
 void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)
 {
        memset(rx_ring->rx_bi_zc, 0,
@@ -30,6 +22,58 @@ static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
 }
 
 /**
+ * i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer
+ * @rx_ring: Current rx ring
+ * @pool_present: is pool for XSK present
+ *
+ * Try allocating memory and return ENOMEM, if failed to allocate.
+ * If allocation was successful, substitute buffer with allocated one.
+ * Returns 0 on success, negative on failure
+ */
+static int i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present)
+{
+       size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) :
+                                         sizeof(*rx_ring->rx_bi);
+       void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
+
+       if (!sw_ring)
+               return -ENOMEM;
+
+       if (pool_present) {
+               kfree(rx_ring->rx_bi);
+               rx_ring->rx_bi = NULL;
+               rx_ring->rx_bi_zc = sw_ring;
+       } else {
+               kfree(rx_ring->rx_bi_zc);
+               rx_ring->rx_bi_zc = NULL;
+               rx_ring->rx_bi = sw_ring;
+       }
+       return 0;
+}
+
+/**
+ * i40e_realloc_rx_bi_zc - reallocate rx SW rings
+ * @vsi: Current VSI
+ * @zc: is zero copy set
+ *
+ * Reallocate buffer for rx_rings that might be used by XSK.
+ * XDP requires more memory, than rx_buf provides.
+ * Returns 0 on success, negative on failure
+ */
+int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc)
+{
+       struct i40e_ring *rx_ring;
+       unsigned long q;
+
+       for_each_set_bit(q, vsi->af_xdp_zc_qps, vsi->alloc_queue_pairs) {
+               rx_ring = vsi->rx_rings[q];
+               if (i40e_realloc_rx_xdp_bi(rx_ring, zc))
+                       return -ENOMEM;
+       }
+       return 0;
+}
+
+/**
  * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
  * certain ring/qid
  * @vsi: Current VSI
@@ -69,6 +113,10 @@ static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
                if (err)
                        return err;
 
+               err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true);
+               if (err)
+                       return err;
+
                err = i40e_queue_pair_enable(vsi, qid);
                if (err)
                        return err;
@@ -113,6 +161,9 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
        xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
 
        if (if_running) {
+               err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false);
+               if (err)
+                       return err;
                err = i40e_queue_pair_enable(vsi, qid);
                if (err)
                        return err;
index bb96298..821df24 100644 (file)
@@ -32,7 +32,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
 
 bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring);
 int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
-int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring);
+int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc);
 void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring);
 
 #endif /* _I40E_XSK_H_ */
index 59aab40..f5961bd 100644 (file)
@@ -485,7 +485,6 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
        len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
 
        if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
-               dev_kfree_skb_any(skb);
                netdev_err(dev, "tx ring full\n");
                netif_tx_stop_queue(txq);
                return NETDEV_TX_BUSY;
index 9809f55..9ec5f38 100644 (file)
@@ -815,6 +815,7 @@ free_flowid:
        cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
                            txsc->hw_flow_id, false);
 fail:
+       kfree(txsc);
        return ERR_PTR(ret);
 }
 
@@ -870,6 +871,7 @@ free_flowid:
        cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
                            rxsc->hw_flow_id, false);
 fail:
+       kfree(rxsc);
        return ERR_PTR(ret);
 }
 
index 4fba7cb..7cd3815 100644 (file)
@@ -4060,19 +4060,23 @@ static int mtk_probe(struct platform_device *pdev)
                        eth->irq[i] = platform_get_irq(pdev, i);
                if (eth->irq[i] < 0) {
                        dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
-                       return -ENXIO;
+                       err = -ENXIO;
+                       goto err_wed_exit;
                }
        }
        for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
                eth->clks[i] = devm_clk_get(eth->dev,
                                            mtk_clks_source_name[i]);
                if (IS_ERR(eth->clks[i])) {
-                       if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
-                               return -EPROBE_DEFER;
+                       if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
+                               err = -EPROBE_DEFER;
+                               goto err_wed_exit;
+                       }
                        if (eth->soc->required_clks & BIT(i)) {
                                dev_err(&pdev->dev, "clock %s not found\n",
                                        mtk_clks_source_name[i]);
-                               return -EINVAL;
+                               err = -EINVAL;
+                               goto err_wed_exit;
                        }
                        eth->clks[i] = NULL;
                }
@@ -4083,7 +4087,7 @@ static int mtk_probe(struct platform_device *pdev)
 
        err = mtk_hw_init(eth);
        if (err)
-               return err;
+               goto err_wed_exit;
 
        eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
 
@@ -4179,6 +4183,8 @@ err_free_dev:
        mtk_free_dev(eth);
 err_deinit_hw:
        mtk_hw_deinit(eth);
+err_wed_exit:
+       mtk_wed_exit();
 
        return err;
 }
@@ -4198,6 +4204,7 @@ static int mtk_remove(struct platform_device *pdev)
                phylink_disconnect_phy(mac->phylink);
        }
 
+       mtk_wed_exit();
        mtk_hw_deinit(eth);
 
        netif_napi_del(&eth->tx_napi);
index ae00e57..2d8ca99 100644 (file)
@@ -397,12 +397,6 @@ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
        return 0;
 }
 
-static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
-{
-       return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
-              FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
-}
-
 static bool
 mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
                     struct mtk_foe_entry *data)
index 099b6e0..65e01bf 100644 (file)
@@ -1072,16 +1072,16 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
 
        pdev = of_find_device_by_node(np);
        if (!pdev)
-               return;
+               goto err_of_node_put;
 
        get_device(&pdev->dev);
        irq = platform_get_irq(pdev, 0);
        if (irq < 0)
-               return;
+               goto err_put_device;
 
        regs = syscon_regmap_lookup_by_phandle(np, NULL);
        if (IS_ERR(regs))
-               return;
+               goto err_put_device;
 
        rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
 
@@ -1124,8 +1124,16 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
 
        hw_list[index] = hw;
 
+       mutex_unlock(&hw_lock);
+
+       return;
+
 unlock:
        mutex_unlock(&hw_lock);
+err_put_device:
+       put_device(&pdev->dev);
+err_of_node_put:
+       of_node_put(np);
 }
 
 void mtk_wed_exit(void)
@@ -1146,6 +1154,7 @@ void mtk_wed_exit(void)
                hw_list[i] = NULL;
                debugfs_remove(hw->debugfs_dir);
                put_device(hw->dev);
+               of_node_put(hw->node);
                kfree(hw);
        }
 }
index 0377392..46ba4c2 100644 (file)
@@ -2004,7 +2004,7 @@ void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
        ctx->dev = dev;
        /* Starts at 1 to avoid doing wake_up if we are not cleaning up */
        atomic_set(&ctx->num_inflight, 1);
-       init_waitqueue_head(&ctx->wait);
+       init_completion(&ctx->inflight_done);
 }
 EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
 
@@ -2018,8 +2018,8 @@ EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
  */
 void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx)
 {
-       atomic_dec(&ctx->num_inflight);
-       wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0);
+       if (!atomic_dec_and_test(&ctx->num_inflight))
+               wait_for_completion(&ctx->inflight_done);
 }
 EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx);
 
@@ -2032,7 +2032,7 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work)
        status = cmd_status_err(ctx->dev, status, work->opcode, work->out);
        work->user_callback(status, work);
        if (atomic_dec_and_test(&ctx->num_inflight))
-               wake_up(&ctx->wait);
+               complete(&ctx->inflight_done);
 }
 
 int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
@@ -2050,7 +2050,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
        ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
                       mlx5_cmd_exec_cb_handler, work, false);
        if (ret && atomic_dec_and_test(&ctx->num_inflight))
-               wake_up(&ctx->wait);
+               complete(&ctx->inflight_done);
 
        return ret;
 }
index 5bce554..cc7efde 100644 (file)
@@ -6,6 +6,7 @@
 
 #include "en.h"
 #include "en_stats.h"
+#include "en/txrx.h"
 #include <linux/ptp_classify.h>
 
 #define MLX5E_PTP_CHANNEL_IX 0
@@ -68,6 +69,14 @@ static inline bool mlx5e_use_ptpsq(struct sk_buff *skb)
                fk.ports.dst == htons(PTP_EV_PORT));
 }
 
+static inline bool mlx5e_ptpsq_fifo_has_room(struct mlx5e_txqsq *sq)
+{
+       if (!sq->ptpsq)
+               return true;
+
+       return mlx5e_skb_fifo_has_room(&sq->ptpsq->skb_fifo);
+}
+
 int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
                   u8 lag_port, struct mlx5e_ptp **cp);
 void mlx5e_ptp_close(struct mlx5e_ptp *c);
index 10c9a8a..2e42d7c 100644 (file)
@@ -96,6 +96,7 @@ struct mlx5e_tc_flow {
        struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
        struct mlx5e_tc_flow *peer_flow;
        struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
+       struct mlx5e_mod_hdr_handle *slow_mh; /* attached mod header instance for slow path */
        struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
        struct list_head hairpin; /* flows sharing the same hairpin */
        struct list_head peer;    /* flows with peer flow */
@@ -111,6 +112,7 @@ struct mlx5e_tc_flow {
        struct completion del_hw_done;
        struct mlx5_flow_attr *attr;
        struct list_head attrs;
+       u32 chain_mapping;
 };
 
 struct mlx5_flow_handle *
index 4456ad5..cb164b6 100644 (file)
@@ -58,6 +58,12 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
 
 static inline bool
+mlx5e_skb_fifo_has_room(struct mlx5e_skb_fifo *fifo)
+{
+       return (*fifo->pc - *fifo->cc) < fifo->mask;
+}
+
+static inline bool
 mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
 {
        return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
index 2a8fd70..a715601 100644 (file)
@@ -101,7 +101,6 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
        struct xfrm_replay_state_esn *replay_esn;
        u32 seq_bottom = 0;
        u8 overlap;
-       u32 *esn;
 
        if (!(sa_entry->x->props.flags & XFRM_STATE_ESN)) {
                sa_entry->esn_state.trigger = 0;
@@ -116,11 +115,9 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
 
        sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x,
                                                    htonl(seq_bottom));
-       esn = &sa_entry->esn_state.esn;
 
        sa_entry->esn_state.trigger = 1;
        if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
-               ++(*esn);
                sa_entry->esn_state.overlap = 0;
                return true;
        } else if (unlikely(!overlap &&
index 4197006..2ef36cb 100644 (file)
@@ -432,7 +432,7 @@ static int mlx5e_macsec_update_rx_sa(struct mlx5e_macsec *macsec,
                                     bool active)
 {
        struct mlx5_core_dev *mdev = macsec->mdev;
-       struct mlx5_macsec_obj_attrs attrs;
+       struct mlx5_macsec_obj_attrs attrs = {};
        int err = 0;
 
        if (rx_sa->active != active)
@@ -444,7 +444,7 @@ static int mlx5e_macsec_update_rx_sa(struct mlx5e_macsec *macsec,
                return 0;
        }
 
-       attrs.sci = rx_sa->sci;
+       attrs.sci = cpu_to_be64((__force u64)rx_sa->sci);
        attrs.enc_key_id = rx_sa->enc_key_id;
        err = mlx5e_macsec_create_object(mdev, &attrs, false, &rx_sa->macsec_obj_id);
        if (err)
@@ -999,11 +999,11 @@ static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
        }
 
        rx_sa = rx_sc->rx_sa[assoc_num];
-       if (rx_sa) {
+       if (!rx_sa) {
                netdev_err(ctx->netdev,
-                          "MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
+                          "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
                           sci, assoc_num);
-               err = -EEXIST;
+               err = -EINVAL;
                goto out;
        }
 
@@ -1055,11 +1055,11 @@ static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
        }
 
        rx_sa = rx_sc->rx_sa[assoc_num];
-       if (rx_sa) {
+       if (!rx_sa) {
                netdev_err(ctx->netdev,
-                          "MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
+                          "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
                           sci, assoc_num);
-               err = -EEXIST;
+               err = -EINVAL;
                goto out;
        }
 
@@ -1846,25 +1846,16 @@ err_hash:
 void mlx5e_macsec_cleanup(struct mlx5e_priv *priv)
 {
        struct mlx5e_macsec *macsec = priv->macsec;
-       struct mlx5_core_dev *mdev = macsec->mdev;
+       struct mlx5_core_dev *mdev = priv->mdev;
 
        if (!macsec)
                return;
 
        mlx5_notifier_unregister(mdev, &macsec->nb);
-
        mlx5e_macsec_fs_cleanup(macsec->macsec_fs);
-
-       /* Cleanup workqueue */
        destroy_workqueue(macsec->wq);
-
        mlx5e_macsec_aso_cleanup(&macsec->aso, mdev);
-
-       priv->macsec = NULL;
-
        rhashtable_destroy(&macsec->sci_hash);
-
        mutex_destroy(&macsec->lock);
-
        kfree(macsec);
 }
index 13dc628..1ac0cf0 100644 (file)
@@ -1180,7 +1180,7 @@ macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs,
        rx_rule->rule[0] = rule;
 
        /* Rx crypto table without SCI rule */
-       if (cpu_to_be64((__force u64)attrs->sci) & ntohs(MACSEC_PORT_ES)) {
+       if ((cpu_to_be64((__force u64)attrs->sci) & 0xFFFF) == ntohs(MACSEC_PORT_ES)) {
                memset(spec, 0, sizeof(struct mlx5_flow_spec));
                memset(&dest, 0, sizeof(struct mlx5_flow_destination));
                memset(&flow_act, 0, sizeof(flow_act));
index 70a7a61..dd6fea9 100644 (file)
@@ -1405,8 +1405,13 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
                              struct mlx5e_tc_flow *flow,
                              struct mlx5_flow_spec *spec)
 {
+       struct mlx5e_tc_mod_hdr_acts mod_acts = {};
+       struct mlx5e_mod_hdr_handle *mh = NULL;
        struct mlx5_flow_attr *slow_attr;
        struct mlx5_flow_handle *rule;
+       bool fwd_and_modify_cap;
+       u32 chain_mapping = 0;
+       int err;
 
        slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
        if (!slow_attr)
@@ -1417,13 +1422,56 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
        slow_attr->esw_attr->split_count = 0;
        slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
 
+       fwd_and_modify_cap = MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table);
+       if (!fwd_and_modify_cap)
+               goto skip_restore;
+
+       err = mlx5_chains_get_chain_mapping(esw_chains(esw), flow->attr->chain, &chain_mapping);
+       if (err)
+               goto err_get_chain;
+
+       err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
+                                       CHAIN_TO_REG, chain_mapping);
+       if (err)
+               goto err_reg_set;
+
+       mh = mlx5e_mod_hdr_attach(esw->dev, get_mod_hdr_table(flow->priv, flow),
+                                 MLX5_FLOW_NAMESPACE_FDB, &mod_acts);
+       if (IS_ERR(mh)) {
+               err = PTR_ERR(mh);
+               goto err_attach;
+       }
+
+       slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+       slow_attr->modify_hdr = mlx5e_mod_hdr_get(mh);
+
+skip_restore:
        rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
-       if (!IS_ERR(rule))
-               flow_flag_set(flow, SLOW);
+       if (IS_ERR(rule)) {
+               err = PTR_ERR(rule);
+               goto err_offload;
+       }
 
+       flow->slow_mh = mh;
+       flow->chain_mapping = chain_mapping;
+       flow_flag_set(flow, SLOW);
+
+       mlx5e_mod_hdr_dealloc(&mod_acts);
        kfree(slow_attr);
 
        return rule;
+
+err_offload:
+       if (fwd_and_modify_cap)
+               mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), mh);
+err_attach:
+err_reg_set:
+       if (fwd_and_modify_cap)
+               mlx5_chains_put_chain_mapping(esw_chains(esw), chain_mapping);
+err_get_chain:
+       mlx5e_mod_hdr_dealloc(&mod_acts);
+       kfree(slow_attr);
+       return ERR_PTR(err);
 }
 
 void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
@@ -1441,7 +1489,17 @@ void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
        slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
        slow_attr->esw_attr->split_count = 0;
        slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
+       if (flow->slow_mh) {
+               slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+               slow_attr->modify_hdr = mlx5e_mod_hdr_get(flow->slow_mh);
+       }
        mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
+       if (flow->slow_mh) {
+               mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), flow->slow_mh);
+               mlx5_chains_put_chain_mapping(esw_chains(esw), flow->chain_mapping);
+               flow->chain_mapping = 0;
+               flow->slow_mh = NULL;
+       }
        flow_flag_clear(flow, SLOW);
        kfree(slow_attr);
 }
@@ -3575,6 +3633,10 @@ mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
        attr2->action = 0;
        attr2->flags = 0;
        attr2->parse_attr = parse_attr;
+       attr2->esw_attr->out_count = 0;
+       attr2->esw_attr->split_count = 0;
+       attr2->dest_chain = 0;
+       attr2->dest_ft = NULL;
        return attr2;
 }
 
@@ -4008,6 +4070,7 @@ parse_tc_fdb_actions(struct mlx5e_priv *priv,
        struct mlx5e_tc_flow_parse_attr *parse_attr;
        struct mlx5_flow_attr *attr = flow->attr;
        struct mlx5_esw_flow_attr *esw_attr;
+       struct net_device *filter_dev;
        int err;
 
        err = flow_action_supported(flow_action, extack);
@@ -4016,6 +4079,7 @@ parse_tc_fdb_actions(struct mlx5e_priv *priv,
 
        esw_attr = attr->esw_attr;
        parse_attr = attr->parse_attr;
+       filter_dev = parse_attr->filter_dev;
        parse_state = &parse_attr->parse_state;
        mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
        parse_state->ct_priv = get_ct_priv(priv);
@@ -4025,13 +4089,21 @@ parse_tc_fdb_actions(struct mlx5e_priv *priv,
                return err;
 
        /* Forward to/from internal port can only have 1 dest */
-       if ((netif_is_ovs_master(parse_attr->filter_dev) || esw_attr->dest_int_port) &&
+       if ((netif_is_ovs_master(filter_dev) || esw_attr->dest_int_port) &&
            esw_attr->out_count > 1) {
                NL_SET_ERR_MSG_MOD(extack,
                                   "Rules with internal port can have only one destination");
                return -EOPNOTSUPP;
        }
 
+       /* Forward from tunnel/internal port to internal port is not supported */
+       if ((mlx5e_get_tc_tun(filter_dev) || netif_is_ovs_master(filter_dev)) &&
+           esw_attr->dest_int_port) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Forwarding from tunnel/internal port to internal port is not supported");
+               return -EOPNOTSUPP;
+       }
+
        err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
        if (err)
                return err;
index bf2232a..6adca01 100644 (file)
@@ -392,6 +392,11 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
        if (unlikely(sq->ptpsq)) {
                mlx5e_skb_cb_hwtstamp_init(skb);
                mlx5e_skb_fifo_push(&sq->ptpsq->skb_fifo, skb);
+               if (!netif_tx_queue_stopped(sq->txq) &&
+                   !mlx5e_skb_fifo_has_room(&sq->ptpsq->skb_fifo)) {
+                       netif_tx_stop_queue(sq->txq);
+                       sq->stats->stopped++;
+               }
                skb_get(skb);
        }
 
@@ -868,6 +873,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 
        if (netif_tx_queue_stopped(sq->txq) &&
            mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
+           mlx5e_ptpsq_fifo_has_room(sq) &&
            !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
                netif_tx_wake_queue(sq->txq);
                stats->wake++;
index e8896f3..07c5839 100644 (file)
@@ -358,6 +358,23 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
                err = -ETIMEDOUT;
        }
 
+       do {
+               err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &reg16);
+               if (err)
+                       return err;
+               if (reg16 == dev_id)
+                       break;
+               msleep(20);
+       } while (!time_after(jiffies, timeout));
+
+       if (reg16 == dev_id) {
+               mlx5_core_info(dev, "Firmware responds to PCI config cycles again\n");
+       } else {
+               mlx5_core_err(dev, "Firmware is not responsive (0x%04x) after %llu ms\n",
+                             reg16, mlx5_tout_ms(dev, PCI_TOGGLE));
+               err = -ETIMEDOUT;
+       }
+
 restore:
        list_for_each_entry(sdev, &bridge_bus->devices, bus_list) {
                pci_cfg_access_unlock(sdev);
index baa8092..c971ff0 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/mlx5/device.h>
 #include <linux/mlx5/transobj.h>
+#include "clock.h"
 #include "aso.h"
 #include "wq.h"
 
@@ -179,6 +180,7 @@ static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn,
 {
        void *in, *sqc, *wq;
        int inlen, err;
+       u8 ts_format;
 
        inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
                sizeof(u64) * sq->wq_ctrl.buf.npages;
@@ -195,6 +197,11 @@ static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn,
        MLX5_SET(sqc,  sqc, state, MLX5_SQC_STATE_RST);
        MLX5_SET(sqc,  sqc, flush_in_error_en, 1);
 
+       ts_format = mlx5_is_real_time_sq(mdev) ?
+                       MLX5_TIMESTAMP_FORMAT_REAL_TIME :
+                       MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
+       MLX5_SET(sqc, sqc, ts_format, ts_format);
+
        MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
        MLX5_SET(wq,   wq, uar_page,      mdev->mlx5e_res.hw_objs.bfreg.index);
        MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
index 839a01d..8ff1631 100644 (file)
@@ -122,7 +122,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev)
 {
        struct mlx5_mpfs *mpfs = dev->priv.mpfs;
 
-       if (!MLX5_ESWITCH_MANAGER(dev))
+       if (!mpfs)
                return;
 
        WARN_ON(!hlist_empty(mpfs->hash));
@@ -137,7 +137,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac)
        int err = 0;
        u32 index;
 
-       if (!MLX5_ESWITCH_MANAGER(dev))
+       if (!mpfs)
                return 0;
 
        mutex_lock(&mpfs->lock);
@@ -185,7 +185,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
        int err = 0;
        u32 index;
 
-       if (!MLX5_ESWITCH_MANAGER(dev))
+       if (!mpfs)
                return 0;
 
        mutex_lock(&mpfs->lock);
index 0b459d8..283c4cc 100644 (file)
@@ -1872,6 +1872,10 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
 
        err = mlx5_load_one(dev, false);
 
+       if (!err)
+               devlink_health_reporter_state_update(dev->priv.health.fw_fatal_reporter,
+                                                    DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
+
        mlx5_pci_trace(dev, "Done, err = %d, device %s\n", err,
                       !err ? "recovered" : "Failed");
 }
index ddfaf78..91ff19f 100644 (file)
@@ -1200,7 +1200,8 @@ free_rule:
        }
 
 remove_from_nic_tbl:
-       mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
+       if (!nic_matcher->rules)
+               mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
 
 free_hw_ste:
        mlx5dr_domain_nic_unlock(nic_dmn);
index 4685200..e6acd1e 100644 (file)
@@ -6851,7 +6851,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
        char banner[sizeof(version)];
        struct ksz_switch *sw = NULL;
 
-       result = pci_enable_device(pdev);
+       result = pcim_enable_device(pdev);
        if (result)
                return result;
 
index e58a27f..fea4254 100644 (file)
@@ -656,7 +656,15 @@ void lan966x_stats_get(struct net_device *dev,
        stats->rx_dropped = dev->stats.rx_dropped +
                lan966x->stats[idx + SYS_COUNT_RX_LONG] +
                lan966x->stats[idx + SYS_COUNT_DR_LOCAL] +
-               lan966x->stats[idx + SYS_COUNT_DR_TAIL];
+               lan966x->stats[idx + SYS_COUNT_DR_TAIL] +
+               lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_0] +
+               lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_1] +
+               lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_2] +
+               lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_3] +
+               lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_4] +
+               lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_5] +
+               lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_6] +
+               lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_7];
 
        for (i = 0; i < LAN966X_NUM_TC; i++) {
                stats->rx_dropped +=
index 7e4061c..a42035c 100644 (file)
@@ -309,6 +309,7 @@ static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
                lan966x, FDMA_CH_DB_DISCARD);
 
        tx->activated = false;
+       tx->last_in_use = -1;
 }
 
 static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
@@ -687,17 +688,14 @@ static int lan966x_qsys_sw_status(struct lan966x *lan966x)
 
 static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
 {
-       void *rx_dcbs, *tx_dcbs, *tx_dcbs_buf;
-       dma_addr_t rx_dma, tx_dma;
+       dma_addr_t rx_dma;
+       void *rx_dcbs;
        u32 size;
        int err;
 
        /* Store these for later to free them */
        rx_dma = lan966x->rx.dma;
-       tx_dma = lan966x->tx.dma;
        rx_dcbs = lan966x->rx.dcbs;
-       tx_dcbs = lan966x->tx.dcbs;
-       tx_dcbs_buf = lan966x->tx.dcbs_buf;
 
        napi_synchronize(&lan966x->napi);
        napi_disable(&lan966x->napi);
@@ -715,17 +713,6 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
        size = ALIGN(size, PAGE_SIZE);
        dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
 
-       lan966x_fdma_tx_disable(&lan966x->tx);
-       err = lan966x_fdma_tx_alloc(&lan966x->tx);
-       if (err)
-               goto restore_tx;
-
-       size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
-       size = ALIGN(size, PAGE_SIZE);
-       dma_free_coherent(lan966x->dev, size, tx_dcbs, tx_dma);
-
-       kfree(tx_dcbs_buf);
-
        lan966x_fdma_wakeup_netdev(lan966x);
        napi_enable(&lan966x->napi);
 
@@ -735,11 +722,6 @@ restore:
        lan966x->rx.dcbs = rx_dcbs;
        lan966x_fdma_rx_start(&lan966x->rx);
 
-restore_tx:
-       lan966x->tx.dma = tx_dma;
-       lan966x->tx.dcbs = tx_dcbs;
-       lan966x->tx.dcbs_buf = tx_dcbs_buf;
-
        return err;
 }
 
index e66e548..71301db 100644 (file)
@@ -716,16 +716,26 @@ static u64 nfp_net_pf_get_app_cap(struct nfp_pf *pf)
        return val;
 }
 
-static int nfp_pf_cfg_hwinfo(struct nfp_pf *pf, bool sp_indiff)
+static void nfp_pf_cfg_hwinfo(struct nfp_pf *pf)
 {
        struct nfp_nsp *nsp;
        char hwinfo[32];
+       bool sp_indiff;
        int err;
 
        nsp = nfp_nsp_open(pf->cpp);
        if (IS_ERR(nsp))
-               return PTR_ERR(nsp);
+               return;
+
+       if (!nfp_nsp_has_hwinfo_set(nsp))
+               goto end;
 
+       sp_indiff = (nfp_net_pf_get_app_id(pf) == NFP_APP_FLOWER_NIC) ||
+                   (nfp_net_pf_get_app_cap(pf) & NFP_NET_APP_CAP_SP_INDIFF);
+
+       /* No need to clean `sp_indiff` in driver, management firmware
+        * will do it when application firmware is unloaded.
+        */
        snprintf(hwinfo, sizeof(hwinfo), "sp_indiff=%d", sp_indiff);
        err = nfp_nsp_hwinfo_set(nsp, hwinfo, sizeof(hwinfo));
        /* Not a fatal error, no need to return error to stop driver from loading */
@@ -739,21 +749,8 @@ static int nfp_pf_cfg_hwinfo(struct nfp_pf *pf, bool sp_indiff)
                pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp);
        }
 
+end:
        nfp_nsp_close(nsp);
-       return 0;
-}
-
-static int nfp_pf_nsp_cfg(struct nfp_pf *pf)
-{
-       bool sp_indiff = (nfp_net_pf_get_app_id(pf) == NFP_APP_FLOWER_NIC) ||
-                        (nfp_net_pf_get_app_cap(pf) & NFP_NET_APP_CAP_SP_INDIFF);
-
-       return nfp_pf_cfg_hwinfo(pf, sp_indiff);
-}
-
-static void nfp_pf_nsp_clean(struct nfp_pf *pf)
-{
-       nfp_pf_cfg_hwinfo(pf, false);
 }
 
 static int nfp_pci_probe(struct pci_dev *pdev,
@@ -856,13 +853,11 @@ static int nfp_pci_probe(struct pci_dev *pdev,
                goto err_fw_unload;
        }
 
-       err = nfp_pf_nsp_cfg(pf);
-       if (err)
-               goto err_fw_unload;
+       nfp_pf_cfg_hwinfo(pf);
 
        err = nfp_net_pci_probe(pf);
        if (err)
-               goto err_nsp_clean;
+               goto err_fw_unload;
 
        err = nfp_hwmon_register(pf);
        if (err) {
@@ -874,8 +869,6 @@ static int nfp_pci_probe(struct pci_dev *pdev,
 
 err_net_remove:
        nfp_net_pci_remove(pf);
-err_nsp_clean:
-       nfp_pf_nsp_clean(pf);
 err_fw_unload:
        kfree(pf->rtbl);
        nfp_mip_close(pf->mip);
@@ -915,7 +908,6 @@ static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw)
 
        nfp_net_pci_remove(pf);
 
-       nfp_pf_nsp_clean(pf);
        vfree(pf->dumpspec);
        kfree(pf->rtbl);
        nfp_mip_close(pf->mip);
index 5d58fd9..19d4848 100644 (file)
@@ -2817,11 +2817,15 @@ err_out:
         * than the full array, but leave the qcq shells in place
         */
        for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
-               lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
-               ionic_qcq_free(lif, lif->txqcqs[i]);
+               if (lif->txqcqs && lif->txqcqs[i]) {
+                       lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+                       ionic_qcq_free(lif, lif->txqcqs[i]);
+               }
 
-               lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
-               ionic_qcq_free(lif, lif->rxqcqs[i]);
+               if (lif->rxqcqs && lif->rxqcqs[i]) {
+                       lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+                       ionic_qcq_free(lif, lif->rxqcqs[i]);
+               }
        }
 
        if (err)
index d1e1aa1..7022fb2 100644 (file)
@@ -3277,6 +3277,30 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
        bool was_enabled = efx->port_enabled;
        int rc;
 
+#ifdef CONFIG_SFC_SRIOV
+       /* If this function is a VF and we have access to the parent PF,
+        * then use the PF control path to attempt to change the VF MAC address.
+        */
+       if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
+               struct efx_nic *efx_pf = pci_get_drvdata(efx->pci_dev->physfn);
+               struct efx_ef10_nic_data *nic_data = efx->nic_data;
+               u8 mac[ETH_ALEN];
+
+               /* net_dev->dev_addr can be zeroed by efx_net_stop in
+                * efx_ef10_sriov_set_vf_mac, so pass in a copy.
+                */
+               ether_addr_copy(mac, efx->net_dev->dev_addr);
+
+               rc = efx_ef10_sriov_set_vf_mac(efx_pf, nic_data->vf_index, mac);
+               if (!rc)
+                       return 0;
+
+               netif_dbg(efx, drv, efx->net_dev,
+                         "Updating VF mac via PF failed (%d), setting directly\n",
+                         rc);
+       }
+#endif
+
        efx_device_detach_sync(efx);
        efx_net_stop(efx->net_dev);
 
@@ -3297,40 +3321,6 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
                efx_net_open(efx->net_dev);
        efx_device_attach_if_not_resetting(efx);
 
-#ifdef CONFIG_SFC_SRIOV
-       if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
-               struct efx_ef10_nic_data *nic_data = efx->nic_data;
-               struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
-
-               if (rc == -EPERM) {
-                       struct efx_nic *efx_pf;
-
-                       /* Switch to PF and change MAC address on vport */
-                       efx_pf = pci_get_drvdata(pci_dev_pf);
-
-                       rc = efx_ef10_sriov_set_vf_mac(efx_pf,
-                                                      nic_data->vf_index,
-                                                      efx->net_dev->dev_addr);
-               } else if (!rc) {
-                       struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
-                       struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
-                       unsigned int i;
-
-                       /* MAC address successfully changed by VF (with MAC
-                        * spoofing) so update the parent PF if possible.
-                        */
-                       for (i = 0; i < efx_pf->vf_count; ++i) {
-                               struct ef10_vf *vf = nic_data->vf + i;
-
-                               if (vf->efx == efx) {
-                                       ether_addr_copy(vf->mac,
-                                                       efx->net_dev->dev_addr);
-                                       return 0;
-                               }
-                       }
-               }
-       } else
-#endif
        if (rc == -EPERM) {
                netif_err(efx, drv, efx->net_dev,
                          "Cannot change MAC address; use sfboot to enable"
index be72e71..5f201a5 100644 (file)
@@ -162,9 +162,9 @@ struct efx_filter_spec {
        u32     priority:2;
        u32     flags:6;
        u32     dmaq_id:12;
-       u32     vport_id;
        u32     rss_context;
-       __be16  outer_vid __aligned(4); /* allow jhash2() of match values */
+       u32     vport_id;
+       __be16  outer_vid;
        __be16  inner_vid;
        u8      loc_mac[ETH_ALEN];
        u8      rem_mac[ETH_ALEN];
index 4826e6a..9220afe 100644 (file)
@@ -660,17 +660,17 @@ bool efx_filter_spec_equal(const struct efx_filter_spec *left,
             (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
                return false;
 
-       return memcmp(&left->outer_vid, &right->outer_vid,
+       return memcmp(&left->vport_id, &right->vport_id,
                      sizeof(struct efx_filter_spec) -
-                     offsetof(struct efx_filter_spec, outer_vid)) == 0;
+                     offsetof(struct efx_filter_spec, vport_id)) == 0;
 }
 
 u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
 {
-       BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
-       return jhash2((const u32 *)&spec->outer_vid,
+       BUILD_BUG_ON(offsetof(struct efx_filter_spec, vport_id) & 3);
+       return jhash2((const u32 *)&spec->vport_id,
                      (sizeof(struct efx_filter_spec) -
-                      offsetof(struct efx_filter_spec, outer_vid)) / 4,
+                      offsetof(struct efx_filter_spec, vport_id)) / 4,
                      0);
 }
 
index 2240f6d..9b46579 100644 (file)
@@ -1961,11 +1961,13 @@ static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
                        ret = PTR_ERR(priv->phydev);
                        dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
                        priv->phydev = NULL;
+                       mdiobus_unregister(bus);
                        return -ENODEV;
                }
 
                ret = phy_device_register(priv->phydev);
                if (ret) {
+                       phy_device_free(priv->phydev);
                        mdiobus_unregister(bus);
                        dev_err(priv->dev,
                                "phy_device_register err(%d)\n", ret);
index 1fa09b4..d2c6a5d 100644 (file)
@@ -1229,6 +1229,8 @@ static int ave_init(struct net_device *ndev)
 
        phy_support_asym_pause(phydev);
 
+       phydev->mac_managed_pm = true;
+
        phy_attached_info(phydev);
 
        return 0;
@@ -1756,6 +1758,10 @@ static int ave_resume(struct device *dev)
 
        ave_global_reset(ndev);
 
+       ret = phy_init_hw(ndev->phydev);
+       if (ret)
+               return ret;
+
        ave_ethtool_get_wol(ndev, &wol);
        wol.wolopts = priv->wolopts;
        __ave_ethtool_set_wol(ndev, &wol);
index f7269d7..6656d76 100644 (file)
@@ -1243,6 +1243,12 @@ static const struct rk_gmac_ops rk3588_ops = {
        .set_rgmii_speed = rk3588_set_gmac_speed,
        .set_rmii_speed = rk3588_set_gmac_speed,
        .set_clock_selection = rk3588_set_clock_selection,
+       .regs_valid = true,
+       .regs = {
+               0xfe1b0000, /* gmac0 */
+               0xfe1c0000, /* gmac1 */
+               0x0, /* sentinel */
+       },
 };
 
 #define RV1108_GRF_GMAC_CON0           0X0900
index 65c9677..8273e6a 100644 (file)
@@ -1214,6 +1214,7 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
        if (priv->plat->tx_queues_to_use > 1)
                priv->phylink_config.mac_capabilities &=
                        ~(MAC_10HD | MAC_100HD | MAC_1000HD);
+       priv->phylink_config.mac_managed_pm = true;
 
        phylink = phylink_create(&priv->phylink_config, fwnode,
                                 mode, &stmmac_phylink_mac_ops);
index 91f10f7..1c16548 100644 (file)
@@ -1328,7 +1328,7 @@ static int happy_meal_init(struct happy_meal *hp)
        void __iomem *erxregs      = hp->erxregs;
        void __iomem *bregs        = hp->bigmacregs;
        void __iomem *tregs        = hp->tcvregs;
-       const char *bursts;
+       const char *bursts = "64";
        u32 regtmp, rxcfg;
 
        /* If auto-negotiation timer is running, kill it. */
index 11f767a..eea777e 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/vmalloc.h>
 #include <linux/rtnetlink.h>
 #include <linux/ucs2_string.h>
+#include <linux/string.h>
 
 #include "hyperv_net.h"
 #include "netvsc_trace.h"
@@ -335,9 +336,10 @@ static void rndis_filter_receive_response(struct net_device *ndev,
                if (resp->msg_len <=
                    sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
                        memcpy(&request->response_msg, resp, RNDIS_HEADER_SIZE + sizeof(*req_id));
-                       memcpy((void *)&request->response_msg + RNDIS_HEADER_SIZE + sizeof(*req_id),
+                       unsafe_memcpy((void *)&request->response_msg + RNDIS_HEADER_SIZE + sizeof(*req_id),
                               data + RNDIS_HEADER_SIZE + sizeof(*req_id),
-                              resp->msg_len - RNDIS_HEADER_SIZE - sizeof(*req_id));
+                              resp->msg_len - RNDIS_HEADER_SIZE - sizeof(*req_id),
+                              "request->response_msg is followed by a padding of RNDIS_EXT_LEN inside rndis_request");
                        if (request->request_msg.ndis_msg_type ==
                            RNDIS_MSG_QUERY && request->request_msg.msg.
                            query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
index 383ef18..42f2c88 100644 (file)
@@ -179,10 +179,10 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
 static const struct ipa_resource ipa_resource_src[] = {
        [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
                .limits[IPA_RSRC_GROUP_SRC_LWA_DL] = {
-                       .min = 1,       .max = 255,
+                       .min = 1,       .max = 63,
                },
                .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
-                       .min = 1,       .max = 255,
+                       .min = 1,       .max = 63,
                },
                .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
                        .min = 1,       .max = 63,
index 3461ad3..49537fc 100644 (file)
@@ -434,6 +434,9 @@ static void ipa_idle_indication_cfg(struct ipa *ipa,
        const struct ipa_reg *reg;
        u32 val;
 
+       if (ipa->version < IPA_VERSION_3_5_1)
+               return;
+
        reg = ipa_reg(ipa, IDLE_INDICATION_CFG);
        val = ipa_reg_encode(reg, ENTER_IDLE_DEBOUNCE_THRESH,
                             enter_idle_debounce_thresh);
index 116b277..0d002c3 100644 (file)
@@ -127,112 +127,80 @@ static const u32 ipa_reg_counter_cfg_fmask[] = {
 IPA_REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
 
 static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
-       [X_MIN_LIM]                                     = GENMASK(5, 0),
-                                               /* Bits 6-7 reserved */
-       [X_MAX_LIM]                                     = GENMASK(13, 8),
-                                               /* Bits 14-15 reserved */
-       [Y_MIN_LIM]                                     = GENMASK(21, 16),
-                                               /* Bits 22-23 reserved */
-       [Y_MAX_LIM]                                     = GENMASK(29, 24),
-                                               /* Bits 30-31 reserved */
+       [X_MIN_LIM]                                     = GENMASK(7, 0),
+       [X_MAX_LIM]                                     = GENMASK(15, 8),
+       [Y_MIN_LIM]                                     = GENMASK(23, 16),
+       [Y_MAX_LIM]                                     = GENMASK(31, 24),
 };
 
 IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
                      0x00000400, 0x0020);
 
 static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
-       [X_MIN_LIM]                                     = GENMASK(5, 0),
-                                               /* Bits 6-7 reserved */
-       [X_MAX_LIM]                                     = GENMASK(13, 8),
-                                               /* Bits 14-15 reserved */
-       [Y_MIN_LIM]                                     = GENMASK(21, 16),
-                                               /* Bits 22-23 reserved */
-       [Y_MAX_LIM]                                     = GENMASK(29, 24),
-                                               /* Bits 30-31 reserved */
+       [X_MIN_LIM]                                     = GENMASK(7, 0),
+       [X_MAX_LIM]                                     = GENMASK(15, 8),
+       [Y_MIN_LIM]                                     = GENMASK(23, 16),
+       [Y_MAX_LIM]                                     = GENMASK(31, 24),
 };
 
 IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
                      0x00000404, 0x0020);
 
 static const u32 ipa_reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
-       [X_MIN_LIM]                                     = GENMASK(5, 0),
-                                               /* Bits 6-7 reserved */
-       [X_MAX_LIM]                                     = GENMASK(13, 8),
-                                               /* Bits 14-15 reserved */
-       [Y_MIN_LIM]                                     = GENMASK(21, 16),
-                                               /* Bits 22-23 reserved */
-       [Y_MAX_LIM]                                     = GENMASK(29, 24),
-                                               /* Bits 30-31 reserved */
+       [X_MIN_LIM]                                     = GENMASK(7, 0),
+       [X_MAX_LIM]                                     = GENMASK(15, 8),
+       [Y_MIN_LIM]                                     = GENMASK(23, 16),
+       [Y_MAX_LIM]                                     = GENMASK(31, 24),
 };
 
 IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
                      0x00000408, 0x0020);
 
 static const u32 ipa_reg_src_rsrc_grp_67_rsrc_type_fmask[] = {
-       [X_MIN_LIM]                                     = GENMASK(5, 0),
-                                               /* Bits 6-7 reserved */
-       [X_MAX_LIM]                                     = GENMASK(13, 8),
-                                               /* Bits 14-15 reserved */
-       [Y_MIN_LIM]                                     = GENMASK(21, 16),
-                                               /* Bits 22-23 reserved */
-       [Y_MAX_LIM]                                     = GENMASK(29, 24),
-                                               /* Bits 30-31 reserved */
+       [X_MIN_LIM]                                     = GENMASK(7, 0),
+       [X_MAX_LIM]                                     = GENMASK(15, 8),
+       [Y_MIN_LIM]                                     = GENMASK(23, 16),
+       [Y_MAX_LIM]                                     = GENMASK(31, 24),
 };
 
 IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_67_RSRC_TYPE, src_rsrc_grp_67_rsrc_type,
                      0x0000040c, 0x0020);
 
 static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
-       [X_MIN_LIM]                                     = GENMASK(5, 0),
-                                               /* Bits 6-7 reserved */
-       [X_MAX_LIM]                                     = GENMASK(13, 8),
-                                               /* Bits 14-15 reserved */
-       [Y_MIN_LIM]                                     = GENMASK(21, 16),
-                                               /* Bits 22-23 reserved */
-       [Y_MAX_LIM]                                     = GENMASK(29, 24),
-                                               /* Bits 30-31 reserved */
+       [X_MIN_LIM]                                     = GENMASK(7, 0),
+       [X_MAX_LIM]                                     = GENMASK(15, 8),
+       [Y_MIN_LIM]                                     = GENMASK(23, 16),
+       [Y_MAX_LIM]                                     = GENMASK(31, 24),
 };
 
 IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
                      0x00000500, 0x0020);
 
 static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
-       [X_MIN_LIM]                                     = GENMASK(5, 0),
-                                               /* Bits 6-7 reserved */
-       [X_MAX_LIM]                                     = GENMASK(13, 8),
-                                               /* Bits 14-15 reserved */
-       [Y_MIN_LIM]                                     = GENMASK(21, 16),
-                                               /* Bits 22-23 reserved */
-       [Y_MAX_LIM]                                     = GENMASK(29, 24),
-                                               /* Bits 30-31 reserved */
+       [X_MIN_LIM]                                     = GENMASK(7, 0),
+       [X_MAX_LIM]                                     = GENMASK(15, 8),
+       [Y_MIN_LIM]                                     = GENMASK(23, 16),
+       [Y_MAX_LIM]                                     = GENMASK(31, 24),
 };
 
 IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
                      0x00000504, 0x0020);
 
 static const u32 ipa_reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
-       [X_MIN_LIM]                                     = GENMASK(5, 0),
-                                               /* Bits 6-7 reserved */
-       [X_MAX_LIM]                                     = GENMASK(13, 8),
-                                               /* Bits 14-15 reserved */
-       [Y_MIN_LIM]                                     = GENMASK(21, 16),
-                                               /* Bits 22-23 reserved */
-       [Y_MAX_LIM]                                     = GENMASK(29, 24),
-                                               /* Bits 30-31 reserved */
+       [X_MIN_LIM]                                     = GENMASK(7, 0),
+       [X_MAX_LIM]                                     = GENMASK(15, 8),
+       [Y_MIN_LIM]                                     = GENMASK(23, 16),
+       [Y_MAX_LIM]                                     = GENMASK(31, 24),
 };
 
 IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
                      0x00000508, 0x0020);
 
 static const u32 ipa_reg_dst_rsrc_grp_67_rsrc_type_fmask[] = {
-       [X_MIN_LIM]                                     = GENMASK(5, 0),
-                                               /* Bits 6-7 reserved */
-       [X_MAX_LIM]                                     = GENMASK(13, 8),
-                                               /* Bits 14-15 reserved */
-       [Y_MIN_LIM]                                     = GENMASK(21, 16),
-                                               /* Bits 22-23 reserved */
-       [Y_MAX_LIM]                                     = GENMASK(29, 24),
-                                               /* Bits 30-31 reserved */
+       [X_MIN_LIM]                                     = GENMASK(7, 0),
+       [X_MAX_LIM]                                     = GENMASK(15, 8),
+       [Y_MIN_LIM]                                     = GENMASK(23, 16),
+       [Y_MAX_LIM]                                     = GENMASK(31, 24),
 };
 
 IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_67_RSRC_TYPE, dst_rsrc_grp_67_rsrc_type,
index 8f8f730..c5cfe85 100644 (file)
@@ -361,7 +361,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
        }
        spin_unlock(&port->bc_queue.lock);
 
-       schedule_work(&port->bc_work);
+       queue_work(system_unbound_wq, &port->bc_work);
 
        if (err)
                goto free_nskb;
index b5f4df1..0052968 100644 (file)
@@ -117,6 +117,10 @@ static const struct attribute_group *nsim_bus_dev_attr_groups[] = {
 
 static void nsim_bus_dev_release(struct device *dev)
 {
+       struct nsim_bus_dev *nsim_bus_dev;
+
+       nsim_bus_dev = container_of(dev, struct nsim_bus_dev, dev);
+       kfree(nsim_bus_dev);
 }
 
 static struct device_type nsim_bus_dev_type = {
@@ -291,6 +295,8 @@ nsim_bus_dev_new(unsigned int id, unsigned int port_count, unsigned int num_queu
 
 err_nsim_bus_dev_id_free:
        ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
+       put_device(&nsim_bus_dev->dev);
+       nsim_bus_dev = NULL;
 err_nsim_bus_dev_free:
        kfree(nsim_bus_dev);
        return ERR_PTR(err);
@@ -300,9 +306,8 @@ static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev)
 {
        /* Disallow using nsim_bus_dev */
        smp_store_release(&nsim_bus_dev->init, false);
-       device_unregister(&nsim_bus_dev->dev);
        ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
-       kfree(nsim_bus_dev);
+       device_unregister(&nsim_bus_dev->dev);
 }
 
 static struct device_driver nsim_driver = {
index 794fc0c..a7880c7 100644 (file)
@@ -309,8 +309,10 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
        if (IS_ERR(nsim_dev->ddir))
                return PTR_ERR(nsim_dev->ddir);
        nsim_dev->ports_ddir = debugfs_create_dir("ports", nsim_dev->ddir);
-       if (IS_ERR(nsim_dev->ports_ddir))
-               return PTR_ERR(nsim_dev->ports_ddir);
+       if (IS_ERR(nsim_dev->ports_ddir)) {
+               err = PTR_ERR(nsim_dev->ports_ddir);
+               goto err_ddir;
+       }
        debugfs_create_bool("fw_update_status", 0600, nsim_dev->ddir,
                            &nsim_dev->fw_update_status);
        debugfs_create_u32("fw_update_overwrite_mask", 0600, nsim_dev->ddir,
@@ -346,7 +348,7 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
        nsim_dev->nodes_ddir = debugfs_create_dir("rate_nodes", nsim_dev->ddir);
        if (IS_ERR(nsim_dev->nodes_ddir)) {
                err = PTR_ERR(nsim_dev->nodes_ddir);
-               goto err_out;
+               goto err_ports_ddir;
        }
        debugfs_create_bool("fail_trap_drop_counter_get", 0600,
                            nsim_dev->ddir,
@@ -354,8 +356,9 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
        nsim_udp_tunnels_debugfs_create(nsim_dev);
        return 0;
 
-err_out:
+err_ports_ddir:
        debugfs_remove_recursive(nsim_dev->ports_ddir);
+err_ddir:
        debugfs_remove_recursive(nsim_dev->ddir);
        return err;
 }
@@ -442,7 +445,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
                                     &params);
        if (err) {
                pr_err("Failed to register IPv4 top resource\n");
-               goto out;
+               goto err_out;
        }
 
        err = devl_resource_register(devlink, "fib", (u64)-1,
@@ -450,7 +453,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
                                     NSIM_RESOURCE_IPV4, &params);
        if (err) {
                pr_err("Failed to register IPv4 FIB resource\n");
-               return err;
+               goto err_out;
        }
 
        err = devl_resource_register(devlink, "fib-rules", (u64)-1,
@@ -458,7 +461,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
                                     NSIM_RESOURCE_IPV4, &params);
        if (err) {
                pr_err("Failed to register IPv4 FIB rules resource\n");
-               return err;
+               goto err_out;
        }
 
        /* Resources for IPv6 */
@@ -468,7 +471,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
                                     &params);
        if (err) {
                pr_err("Failed to register IPv6 top resource\n");
-               goto out;
+               goto err_out;
        }
 
        err = devl_resource_register(devlink, "fib", (u64)-1,
@@ -476,7 +479,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
                                     NSIM_RESOURCE_IPV6, &params);
        if (err) {
                pr_err("Failed to register IPv6 FIB resource\n");
-               return err;
+               goto err_out;
        }
 
        err = devl_resource_register(devlink, "fib-rules", (u64)-1,
@@ -484,7 +487,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
                                     NSIM_RESOURCE_IPV6, &params);
        if (err) {
                pr_err("Failed to register IPv6 FIB rules resource\n");
-               return err;
+               goto err_out;
        }
 
        /* Resources for nexthops */
@@ -492,8 +495,14 @@ static int nsim_dev_resources_register(struct devlink *devlink)
                                     NSIM_RESOURCE_NEXTHOPS,
                                     DEVLINK_RESOURCE_ID_PARENT_TOP,
                                     &params);
+       if (err) {
+               pr_err("Failed to register NEXTHOPS resource\n");
+               goto err_out;
+       }
+       return 0;
 
-out:
+err_out:
+       devl_resources_unregister(devlink);
        return err;
 }
 
index 8549e0e..b60db8b 100644 (file)
@@ -254,8 +254,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
                                DP83822_EEE_ERROR_CHANGE_INT_EN);
 
                if (!dp83822->fx_enabled)
-                       misr_status |= DP83822_MDI_XOVER_INT_EN |
-                                      DP83822_ANEG_ERR_INT_EN |
+                       misr_status |= DP83822_ANEG_ERR_INT_EN |
                                       DP83822_WOL_PKT_INT_EN;
 
                err = phy_write(phydev, MII_DP83822_MISR2, misr_status);
index 6939563..417527f 100644 (file)
@@ -853,6 +853,14 @@ static int dp83867_config_init(struct phy_device *phydev)
                else
                        val &= ~DP83867_SGMII_TYPE;
                phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_SGMIICTL, val);
+
+               /* This is a SW workaround for link instability if RX_CTRL is
+                * not strapped to mode 3 or 4 in HW. This is required for SGMII
+                * in addition to clearing bit 7, handled above.
+                */
+               if (dp83867->rxctrl_strap_quirk)
+                       phy_set_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
+                                        BIT(8));
        }
 
        val = phy_read(phydev, DP83867_CFG3);
index 75464df..6547b6c 100644 (file)
@@ -1661,6 +1661,9 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
        if (phy_interrupt_is_valid(phy))
                phy_request_interrupt(phy);
 
+       if (pl->config->mac_managed_pm)
+               phy->mac_managed_pm = true;
+
        return 0;
 }
 
index ff09a8c..2397a90 100644 (file)
@@ -311,7 +311,7 @@ err_unreg_dev:
        return ERR_PTR(err);
 
 err_free_dev:
-       kfree(dev);
+       put_device(&dev->dev);
 
        return ERR_PTR(err);
 }
index f577449..85c06db 100644 (file)
@@ -54,16 +54,19 @@ static int virtual_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
        mutex_lock(&nci_mutex);
        if (state != virtual_ncidev_enabled) {
                mutex_unlock(&nci_mutex);
+               kfree_skb(skb);
                return 0;
        }
 
        if (send_buff) {
                mutex_unlock(&nci_mutex);
+               kfree_skb(skb);
                return -1;
        }
        send_buff = skb_copy(skb, GFP_KERNEL);
        mutex_unlock(&nci_mutex);
        wake_up_interruptible(&wq);
+       consume_skb(skb);
 
        return 0;
 }
index 5fc5ea1..ff8b083 100644 (file)
@@ -1039,6 +1039,8 @@ static void apple_nvme_reset_work(struct work_struct *work)
                                         dma_max_mapping_size(anv->dev) >> 9);
        anv->ctrl.max_segments = NVME_MAX_SEGS;
 
+       dma_set_max_seg_size(anv->dev, 0xffffffff);
+
        /*
         * Enable NVMMU and linear submission queues.
         * While we could keep those disabled and pretend this is slightly
index 059737c..dc42206 100644 (file)
@@ -3262,8 +3262,12 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl)
                return ret;
 
        if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
+               /*
+                * Do not return errors unless we are in a controller reset,
+                * the controller works perfectly fine without hwmon.
+                */
                ret = nvme_hwmon_init(ctrl);
-               if (ret < 0)
+               if (ret == -EINTR)
                        return ret;
        }
 
@@ -4846,7 +4850,7 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
        return 0;
 
 out_cleanup_admin_q:
-       blk_mq_destroy_queue(ctrl->fabrics_q);
+       blk_mq_destroy_queue(ctrl->admin_q);
 out_free_tagset:
        blk_mq_free_tag_set(ctrl->admin_tagset);
        return ret;
index 0a586d7..9e6e56c 100644 (file)
@@ -12,7 +12,7 @@
 
 struct nvme_hwmon_data {
        struct nvme_ctrl *ctrl;
-       struct nvme_smart_log log;
+       struct nvme_smart_log *log;
        struct mutex read_lock;
 };
 
@@ -60,14 +60,14 @@ static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
 static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data)
 {
        return nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
-                          NVME_CSI_NVM, &data->log, sizeof(data->log), 0);
+                          NVME_CSI_NVM, data->log, sizeof(*data->log), 0);
 }
 
 static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
                           u32 attr, int channel, long *val)
 {
        struct nvme_hwmon_data *data = dev_get_drvdata(dev);
-       struct nvme_smart_log *log = &data->log;
+       struct nvme_smart_log *log = data->log;
        int temp;
        int err;
 
@@ -163,7 +163,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data,
        case hwmon_temp_max:
        case hwmon_temp_min:
                if ((!channel && data->ctrl->wctemp) ||
-                   (channel && data->log.temp_sensor[channel - 1])) {
+                   (channel && data->log->temp_sensor[channel - 1])) {
                        if (data->ctrl->quirks &
                            NVME_QUIRK_NO_TEMP_THRESH_CHANGE)
                                return 0444;
@@ -176,7 +176,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data,
                break;
        case hwmon_temp_input:
        case hwmon_temp_label:
-               if (!channel || data->log.temp_sensor[channel - 1])
+               if (!channel || data->log->temp_sensor[channel - 1])
                        return 0444;
                break;
        default:
@@ -230,7 +230,13 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl)
 
        data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (!data)
-               return 0;
+               return -ENOMEM;
+
+       data->log = kzalloc(sizeof(*data->log), GFP_KERNEL);
+       if (!data->log) {
+               err = -ENOMEM;
+               goto err_free_data;
+       }
 
        data->ctrl = ctrl;
        mutex_init(&data->read_lock);
@@ -238,8 +244,7 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl)
        err = nvme_hwmon_get_smart_log(data);
        if (err) {
                dev_warn(dev, "Failed to read smart log (error %d)\n", err);
-               kfree(data);
-               return err;
+               goto err_free_log;
        }
 
        hwmon = hwmon_device_register_with_info(dev, "nvme",
@@ -247,11 +252,17 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl)
                                                NULL);
        if (IS_ERR(hwmon)) {
                dev_warn(dev, "Failed to instantiate hwmon device\n");
-               kfree(data);
-               return PTR_ERR(hwmon);
+               err = PTR_ERR(hwmon);
+               goto err_free_log;
        }
        ctrl->hwmon_device = hwmon;
        return 0;
+
+err_free_log:
+       kfree(data->log);
+err_free_data:
+       kfree(data);
+       return err;
 }
 
 void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
@@ -262,6 +273,7 @@ void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
 
                hwmon_device_unregister(ctrl->hwmon_device);
                ctrl->hwmon_device = NULL;
+               kfree(data->log);
                kfree(data);
        }
 }
index 0ea7e44..93e2138 100644 (file)
@@ -516,6 +516,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
        /* set to a default value of 512 until the disk is validated */
        blk_queue_logical_block_size(head->disk->queue, 512);
        blk_set_stacking_limits(&head->disk->queue->limits);
+       blk_queue_dma_alignment(head->disk->queue, 3);
 
        /* we need to propagate up the VMC settings */
        if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
index bcbef6b..31e577b 100644 (file)
@@ -3511,6 +3511,16 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
        { PCI_DEVICE(0x2646, 0x2263),   /* KINGSTON A2000 NVMe SSD  */
                .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+       { PCI_DEVICE(0x2646, 0x5018),   /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */
+               .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+       { PCI_DEVICE(0x2646, 0x5016),   /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */
+               .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+       { PCI_DEVICE(0x2646, 0x501A),   /* KINGSTON OM8PGP4xxxxP OS21005 NVMe SSD */
+               .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+       { PCI_DEVICE(0x2646, 0x501B),   /* KINGSTON OM8PGP4xxxxQ OS21005 NVMe SSD */
+               .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+       { PCI_DEVICE(0x2646, 0x501E),   /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */
+               .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
        { PCI_DEVICE(0x1e4B, 0x1001),   /* MAXIO MAP1001 */
                .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1e4B, 0x1002),   /* MAXIO MAP1002 */
index 1eed0fc..9b47dcb 100644 (file)
@@ -387,7 +387,7 @@ static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
 {
        struct scatterlist sg;
 
-       sg_init_marker(&sg, 1);
+       sg_init_table(&sg, 1);
        sg_set_page(&sg, page, len, off);
        ahash_request_set_crypt(hash, &sg, NULL, len);
        crypto_ahash_update(hash);
@@ -1141,6 +1141,7 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
 {
        struct nvme_tcp_request *req;
+       unsigned int noreclaim_flag;
        int ret = 1;
 
        if (!queue->request) {
@@ -1150,12 +1151,13 @@ static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
        }
        req = queue->request;
 
+       noreclaim_flag = memalloc_noreclaim_save();
        if (req->state == NVME_TCP_SEND_CMD_PDU) {
                ret = nvme_tcp_try_send_cmd_pdu(req);
                if (ret <= 0)
                        goto done;
                if (!nvme_tcp_has_inline_data(req))
-                       return ret;
+                       goto out;
        }
 
        if (req->state == NVME_TCP_SEND_H2C_PDU) {
@@ -1181,6 +1183,8 @@ done:
                nvme_tcp_fail_request(queue->request);
                nvme_tcp_done_send_req(queue);
        }
+out:
+       memalloc_noreclaim_restore(noreclaim_flag);
        return ret;
 }
 
@@ -1296,6 +1300,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
        struct page *page;
        struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
        struct nvme_tcp_queue *queue = &ctrl->queues[qid];
+       unsigned int noreclaim_flag;
 
        if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
                return;
@@ -1308,7 +1313,11 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
                __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
                queue->pf_cache.va = NULL;
        }
+
+       noreclaim_flag = memalloc_noreclaim_save();
        sock_release(queue->sock);
+       memalloc_noreclaim_restore(noreclaim_flag);
+
        kfree(queue->pdu);
        mutex_destroy(&queue->send_mutex);
        mutex_destroy(&queue->queue_lock);
index e34a289..9443ee1 100644 (file)
@@ -1290,12 +1290,8 @@ static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item,
 static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item,
                                               const char *page, size_t cnt)
 {
-       struct nvmet_port *port = to_nvmet_port(item);
        u16 qid_max;
 
-       if (nvmet_is_port_enabled(port, __func__))
-               return -EACCES;
-
        if (sscanf(page, "%hu\n", &qid_max) != 1)
                return -EINVAL;
 
index 1467714..aecb585 100644 (file)
@@ -1176,7 +1176,7 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
         * reset the keep alive timer when the controller is enabled.
         */
        if (ctrl->kato)
-               mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
+               mod_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
 }
 
 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
index 24478ae..8e323e9 100644 (file)
@@ -415,6 +415,13 @@ static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
  * address (access to which generates correct config transaction) falls in
  * this 4 KiB region.
  */
+static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
+                                          unsigned int where)
+{
+       return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
+              (PCI_FUNC(devfn) << 8) | (where & 0xff);
+}
+
 static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
                                        unsigned int devfn,
                                        int where)
@@ -436,9 +443,7 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
                unsigned int offset;
                u32 base;
 
-               offset = PCI_CONF1_EXT_ADDRESS(bus->number, PCI_SLOT(devfn),
-                                              PCI_FUNC(devfn), where) &
-                        ~PCI_CONF1_ENABLE;
+               offset = tegra_pcie_conf_offset(bus->number, devfn, where);
 
                /* move 4 KiB window to offset within the FPCI region */
                base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
index 7e73207..9e46d83 100644 (file)
@@ -667,7 +667,7 @@ static u8 jz4755_lcd_24bit_funcs[] = { 1, 1, 1, 1, 0, 0, };
 static const struct group_desc jz4755_groups[] = {
        INGENIC_PIN_GROUP("uart0-data", jz4755_uart0_data, 0),
        INGENIC_PIN_GROUP("uart0-hwflow", jz4755_uart0_hwflow, 0),
-       INGENIC_PIN_GROUP("uart1-data", jz4755_uart1_data, 0),
+       INGENIC_PIN_GROUP("uart1-data", jz4755_uart1_data, 1),
        INGENIC_PIN_GROUP("uart2-data", jz4755_uart2_data, 1),
        INGENIC_PIN_GROUP("ssi-dt-b", jz4755_ssi_dt_b, 0),
        INGENIC_PIN_GROUP("ssi-dt-f", jz4755_ssi_dt_f, 0),
@@ -721,7 +721,7 @@ static const char *jz4755_ssi_groups[] = {
        "ssi-ce1-b", "ssi-ce1-f",
 };
 static const char *jz4755_mmc0_groups[] = { "mmc0-1bit", "mmc0-4bit", };
-static const char *jz4755_mmc1_groups[] = { "mmc0-1bit", "mmc0-4bit", };
+static const char *jz4755_mmc1_groups[] = { "mmc1-1bit", "mmc1-4bit", };
 static const char *jz4755_i2c_groups[] = { "i2c-data", };
 static const char *jz4755_cim_groups[] = { "cim-data", };
 static const char *jz4755_lcd_groups[] = {
index 62ce395..687aaa6 100644 (file)
@@ -1864,19 +1864,28 @@ static void ocelot_irq_unmask_level(struct irq_data *data)
        if (val & bit)
                ack = true;
 
+       /* Try to clear any rising edges */
+       if (!active && ack)
+               regmap_write_bits(info->map, REG(OCELOT_GPIO_INTR, info, gpio),
+                                 bit, bit);
+
        /* Enable the interrupt now */
        gpiochip_enable_irq(chip, gpio);
        regmap_update_bits(info->map, REG(OCELOT_GPIO_INTR_ENA, info, gpio),
                           bit, bit);
 
        /*
-        * In case the interrupt line is still active and the interrupt
-        * controller has not seen any changes in the interrupt line, then it
-        * means that there happen another interrupt while the line was active.
+        * In case the interrupt line is still active then it means that
+        * there happen another interrupt while the line was active.
         * So we missed that one, so we need to kick the interrupt again
         * handler.
         */
-       if (active && !ack) {
+       regmap_read(info->map, REG(OCELOT_GPIO_IN, info, gpio), &val);
+       if ((!(val & bit) && trigger_level == IRQ_TYPE_LEVEL_LOW) ||
+             (val & bit && trigger_level == IRQ_TYPE_LEVEL_HIGH))
+               active = true;
+
+       if (active) {
                struct ocelot_irq_work *work;
 
                work = kmalloc(sizeof(*work), GFP_ATOMIC);
index 7d2fbf8..c98f35a 100644 (file)
@@ -412,10 +412,6 @@ static int zynqmp_pinconf_cfg_set(struct pinctrl_dev *pctldev,
 
                        break;
                case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
-                       param = PM_PINCTRL_CONFIG_TRI_STATE;
-                       arg = PM_PINCTRL_TRI_STATE_ENABLE;
-                       ret = zynqmp_pm_pinctrl_set_config(pin, param, arg);
-                       break;
                case PIN_CONFIG_MODE_LOW_POWER:
                        /*
                         * These cases are mentioned in dts but configurable
@@ -424,11 +420,6 @@ static int zynqmp_pinconf_cfg_set(struct pinctrl_dev *pctldev,
                         */
                        ret = 0;
                        break;
-               case PIN_CONFIG_OUTPUT_ENABLE:
-                       param = PM_PINCTRL_CONFIG_TRI_STATE;
-                       arg = PM_PINCTRL_TRI_STATE_DISABLE;
-                       ret = zynqmp_pm_pinctrl_set_config(pin, param, arg);
-                       break;
                default:
                        dev_warn(pctldev->dev,
                                 "unsupported configuration parameter '%u'\n",
index a2abfe9..8bf8b21 100644 (file)
@@ -51,6 +51,7 @@
  *                  detection.
  * @skip_wake_irqs: Skip IRQs that are handled by wakeup interrupt controller
  * @disabled_for_mux: These IRQs were disabled because we muxed away.
+ * @ever_gpio:      This bit is set the first time we mux a pin to gpio_func.
  * @soc:            Reference to soc_data of platform specific data.
  * @regs:           Base addresses for the TLMM tiles.
  * @phys_base:      Physical base address
@@ -72,6 +73,7 @@ struct msm_pinctrl {
        DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
        DECLARE_BITMAP(skip_wake_irqs, MAX_NR_GPIO);
        DECLARE_BITMAP(disabled_for_mux, MAX_NR_GPIO);
+       DECLARE_BITMAP(ever_gpio, MAX_NR_GPIO);
 
        const struct msm_pinctrl_soc_data *soc;
        void __iomem *regs[MAX_NR_TILES];
@@ -218,6 +220,25 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
 
        val = msm_readl_ctl(pctrl, g);
 
+       /*
+        * If this is the first time muxing to GPIO and the direction is
+        * output, make sure that we're not going to be glitching the pin
+        * by reading the current state of the pin and setting it as the
+        * output.
+        */
+       if (i == gpio_func && (val & BIT(g->oe_bit)) &&
+           !test_and_set_bit(group, pctrl->ever_gpio)) {
+               u32 io_val = msm_readl_io(pctrl, g);
+
+               if (io_val & BIT(g->in_bit)) {
+                       if (!(io_val & BIT(g->out_bit)))
+                               msm_writel_io(io_val | BIT(g->out_bit), pctrl, g);
+               } else {
+                       if (io_val & BIT(g->out_bit))
+                               msm_writel_io(io_val & ~BIT(g->out_bit), pctrl, g);
+               }
+       }
+
        if (egpio_func && i == egpio_func) {
                if (val & BIT(g->egpio_present))
                        val &= ~BIT(g->egpio_enable);
index f0166ad..9920358 100644 (file)
@@ -199,6 +199,13 @@ static int loongson_hotkey_resume(struct device *dev)
        struct key_entry ke;
        struct backlight_device *bd;
 
+       bd = backlight_device_get_by_type(BACKLIGHT_PLATFORM);
+       if (bd) {
+               loongson_laptop_backlight_update(bd) ?
+               pr_warn("Loongson_backlight: resume brightness failed") :
+               pr_info("Loongson_backlight: resume brightness %d\n", bd->props.brightness);
+       }
+
        /*
         * Only if the firmware supports SW_LID event model, we can handle the
         * event. This is for the consideration of development board without EC.
@@ -228,13 +235,6 @@ static int loongson_hotkey_resume(struct device *dev)
                }
        }
 
-       bd = backlight_device_get_by_type(BACKLIGHT_PLATFORM);
-       if (bd) {
-               loongson_laptop_backlight_update(bd) ?
-               pr_warn("Loongson_backlight: resume brightness failed") :
-               pr_info("Loongson_backlight: resume brightness %d\n", bd->props.brightness);
-       }
-
        return 0;
 }
 
@@ -448,6 +448,7 @@ static int __init event_init(struct generic_sub_driver *sub_driver)
        if (ret < 0) {
                pr_err("Failed to setup input device keymap\n");
                input_free_device(generic_inputdev);
+               generic_inputdev = NULL;
 
                return ret;
        }
@@ -502,8 +503,11 @@ static int __init generic_subdriver_init(struct generic_sub_driver *sub_driver)
        if (ret)
                return -EINVAL;
 
-       if (sub_driver->init)
-               sub_driver->init(sub_driver);
+       if (sub_driver->init) {
+               ret = sub_driver->init(sub_driver);
+               if (ret)
+                       goto err_out;
+       }
 
        if (sub_driver->notify) {
                ret = setup_acpi_notify(sub_driver);
@@ -519,7 +523,7 @@ static int __init generic_subdriver_init(struct generic_sub_driver *sub_driver)
 
 err_out:
        generic_subdriver_exit(sub_driver);
-       return (ret < 0) ? ret : 0;
+       return ret;
 }
 
 static void generic_subdriver_exit(struct generic_sub_driver *sub_driver)
index ce859b3..96e790e 100644 (file)
@@ -663,6 +663,13 @@ static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg)
        struct rtc_time tm;
        int rc;
 
+       /* we haven't yet read SMU version */
+       if (!pdev->major) {
+               rc = amd_pmc_get_smu_version(pdev);
+               if (rc)
+                       return rc;
+       }
+
        if (pdev->major < 64 || (pdev->major == 64 && pdev->minor < 53))
                return 0;
 
index 613c45c..c685a70 100644 (file)
@@ -464,6 +464,15 @@ static const struct dmi_system_id asus_quirks[] = {
                },
                .driver_data = &quirk_asus_tablet_mode,
        },
+       {
+               .callback = dmi_matched,
+               .ident = "ASUS ROG FLOW X16",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "GV601R"),
+               },
+               .driver_data = &quirk_asus_tablet_mode,
+       },
        {},
 };
 
index a1fe1e0..17ec582 100644 (file)
@@ -1914,6 +1914,8 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N,         &tgl_reg_map),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE,           &adl_reg_map),
        X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P,        &tgl_reg_map),
+       X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE,          &adl_reg_map),
+       X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S,        &adl_reg_map),
        {}
 };
 
index 6a823b8..20e5c04 100644 (file)
@@ -263,6 +263,8 @@ enum tpacpi_hkey_event_t {
 #define TPACPI_DBG_BRGHT       0x0020
 #define TPACPI_DBG_MIXER       0x0040
 
+#define FAN_NOT_PRESENT                65535
+
 #define strlencmp(a, b) (strncmp((a), (b), strlen(b)))
 
 
@@ -8876,7 +8878,7 @@ static int __init fan_init(struct ibm_init_struct *iibm)
                        /* Try and probe the 2nd fan */
                        tp_features.second_fan = 1; /* needed for get_speed to work */
                        res = fan2_get_speed(&speed);
-                       if (res >= 0) {
+                       if (res >= 0 && speed != FAN_NOT_PRESENT) {
                                /* It responded - so let's assume it's there */
                                tp_features.second_fan = 1;
                                tp_features.second_fan_ctl = 1;
index 610413b..58cc2ba 100644 (file)
@@ -1233,6 +1233,9 @@ static u32 rtc_handler(void *context)
 
 static inline void rtc_wake_setup(struct device *dev)
 {
+       if (acpi_disabled)
+               return;
+
        acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev);
        /*
         * After the RTC handler is installed, the Fixed_RTC event should
@@ -1286,7 +1289,6 @@ static void cmos_wake_setup(struct device *dev)
 
        use_acpi_alarm_quirks();
 
-       rtc_wake_setup(dev);
        acpi_rtc_info.wake_on = rtc_wake_on;
        acpi_rtc_info.wake_off = rtc_wake_off;
 
@@ -1344,6 +1346,9 @@ static void cmos_check_acpi_rtc_status(struct device *dev,
 {
 }
 
+static void rtc_wake_setup(struct device *dev)
+{
+}
 #endif
 
 #ifdef CONFIG_PNP
@@ -1354,6 +1359,8 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
 {
        int irq, ret;
 
+       cmos_wake_setup(&pnp->dev);
+
        if (pnp_port_start(pnp, 0) == 0x70 && !pnp_irq_valid(pnp, 0)) {
                irq = 0;
 #ifdef CONFIG_X86
@@ -1372,7 +1379,7 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
        if (ret)
                return ret;
 
-       cmos_wake_setup(&pnp->dev);
+       rtc_wake_setup(&pnp->dev);
 
        return 0;
 }
@@ -1461,6 +1468,7 @@ static int __init cmos_platform_probe(struct platform_device *pdev)
        int irq, ret;
 
        cmos_of_init(pdev);
+       cmos_wake_setup(&pdev->dev);
 
        if (RTC_IOMAPPED)
                resource = platform_get_resource(pdev, IORESOURCE_IO, 0);
@@ -1474,7 +1482,7 @@ static int __init cmos_platform_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       cmos_wake_setup(&pdev->dev);
+       rtc_wake_setup(&pdev->dev);
 
        return 0;
 }
index 913b6dd..c7db953 100644 (file)
@@ -753,13 +753,9 @@ static int __unset_online(struct device *dev, void *data)
 {
        struct idset *set = data;
        struct subchannel *sch = to_subchannel(dev);
-       struct ccw_device *cdev;
 
-       if (sch->st == SUBCHANNEL_TYPE_IO) {
-               cdev = sch_get_cdev(sch);
-               if (cdev && cdev->online)
-                       idset_sch_del(set, sch->schid);
-       }
+       if (sch->st == SUBCHANNEL_TYPE_IO && sch->config.ena)
+               idset_sch_del(set, sch->schid);
 
        return 0;
 }
index 2eddd5f..976a65f 100644 (file)
@@ -52,7 +52,7 @@ struct ap_matrix_dev {
        struct mutex guests_lock; /* serializes access to each KVM guest */
        struct mdev_parent parent;
        struct mdev_type mdev_type;
-       struct mdev_type *mdev_types[];
+       struct mdev_type *mdev_types[1];
 };
 
 extern struct ap_matrix_dev *matrix_dev;
index ac0c7cc..852b025 100644 (file)
@@ -2582,7 +2582,7 @@ static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
  *
  * This function obtains the transmit and receive ids required to send
  * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
- * flags are used to the unsolicted response handler is able to process
+ * flags are used to the unsolicited response handler is able to process
  * the ct command sent on the same port.
  **/
 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
@@ -2874,7 +2874,7 @@ out:
  * @len: Number of data bytes
  *
  * This function allocates and posts a data buffer of sufficient size to receive
- * an unsolicted CT command.
+ * an unsolicited CT command.
  **/
 static int lpfcdiag_sli3_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
                                          size_t len)
index 75fd2bf..e941a99 100644 (file)
@@ -90,7 +90,7 @@ lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
                                get_job_ulpstatus(phba, piocbq));
        }
        lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-                       "0145 Ignoring unsolicted CT HBQ Size:%d "
+                       "0145 Ignoring unsolicited CT HBQ Size:%d "
                        "status = x%x\n",
                        size, get_job_ulpstatus(phba, piocbq));
 }
index b49c395..b535f1f 100644 (file)
@@ -4812,7 +4812,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
        rc = lpfc_vmid_res_alloc(phba, vport);
 
        if (rc)
-               goto out;
+               goto out_put_shost;
 
        /* Initialize all internally managed lists. */
        INIT_LIST_HEAD(&vport->fc_nodes);
@@ -4830,16 +4830,17 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
 
        error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
        if (error)
-               goto out_put_shost;
+               goto out_free_vmid;
 
        spin_lock_irq(&phba->port_list_lock);
        list_add_tail(&vport->listentry, &phba->port_list);
        spin_unlock_irq(&phba->port_list_lock);
        return vport;
 
-out_put_shost:
+out_free_vmid:
        kfree(vport->vmid);
        bitmap_free(vport->vmid_priority_range);
+out_put_shost:
        scsi_host_put(shost);
 out:
        return NULL;
index 9be4ba6..d265a2d 100644 (file)
@@ -5874,10 +5874,6 @@ fallback:
 static
 int megasas_get_device_list(struct megasas_instance *instance)
 {
-       memset(instance->pd_list, 0,
-              (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
-       memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
-
        if (instance->enable_fw_dev_list) {
                if (megasas_host_device_list_query(instance, true))
                        return FAILED;
@@ -7220,7 +7216,7 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
 
                if (!fusion->ioc_init_request) {
                        dev_err(&pdev->dev,
-                               "Failed to allocate PD list buffer\n");
+                               "Failed to allocate ioc init request\n");
                        return -ENOMEM;
                }
 
@@ -7439,7 +7435,6 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
            (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
                instance->flag_ieee = 1;
 
-       megasas_dbg_lvl = 0;
        instance->flag = 0;
        instance->unload = 1;
        instance->last_time = 0;
@@ -8762,33 +8757,26 @@ static
 int megasas_update_device_list(struct megasas_instance *instance,
                               int event_type)
 {
-       int dcmd_ret = DCMD_SUCCESS;
+       int dcmd_ret;
 
        if (instance->enable_fw_dev_list) {
-               dcmd_ret = megasas_host_device_list_query(instance, false);
-               if (dcmd_ret != DCMD_SUCCESS)
-                       goto out;
+               return megasas_host_device_list_query(instance, false);
        } else {
                if (event_type & SCAN_PD_CHANNEL) {
                        dcmd_ret = megasas_get_pd_list(instance);
-
                        if (dcmd_ret != DCMD_SUCCESS)
-                               goto out;
+                               return dcmd_ret;
                }
 
                if (event_type & SCAN_VD_CHANNEL) {
                        if (!instance->requestorId ||
                        megasas_get_ld_vf_affiliation(instance, 0)) {
-                               dcmd_ret = megasas_ld_list_query(instance,
+                               return megasas_ld_list_query(instance,
                                                MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
-                               if (dcmd_ret != DCMD_SUCCESS)
-                                       goto out;
                        }
                }
        }
-
-out:
-       return dcmd_ret;
+       return DCMD_SUCCESS;
 }
 
 /**
@@ -8918,7 +8906,7 @@ megasas_aen_polling(struct work_struct *work)
                        sdev1 = scsi_device_lookup(instance->host,
                                                   MEGASAS_MAX_PD_CHANNELS +
                                                   (ld_target_id / MEGASAS_MAX_DEV_PER_CHANNEL),
-                                                  (ld_target_id - MEGASAS_MAX_DEV_PER_CHANNEL),
+                                                  (ld_target_id % MEGASAS_MAX_DEV_PER_CHANNEL),
                                                   0);
                        if (sdev1)
                                megasas_remove_scsi_device(sdev1);
@@ -9016,6 +9004,7 @@ static int __init megasas_init(void)
         */
        pr_info("megasas: %s\n", MEGASAS_VERSION);
 
+       megasas_dbg_lvl = 0;
        support_poll_for_event = 2;
        support_device_change = 1;
        support_nvme_encapsulation = true;
index 8997531..f48740c 100644 (file)
@@ -4,5 +4,6 @@ config SCSI_MPI3MR
        tristate "Broadcom MPI3 Storage Controller Device Driver"
        depends on PCI && SCSI
        select BLK_DEV_BSGLIB
+       select SCSI_SAS_ATTRS
        help
        MPI3 based Storage & RAID Controllers Driver.
index 8b22df8..4e981cc 100644 (file)
@@ -2993,7 +2993,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
        u64 coherent_dma_mask, dma_mask;
 
        if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4 ||
-           dma_get_required_mask(&pdev->dev) <= 32) {
+           dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32)) {
                ioc->dma_mask = 32;
                coherent_dma_mask = dma_mask = DMA_BIT_MASK(32);
        /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
index 2ff2fac..7a7d63a 100644 (file)
@@ -99,6 +99,7 @@ static void pm8001_map_queues(struct Scsi_Host *shost)
 static struct scsi_host_template pm8001_sht = {
        .module                 = THIS_MODULE,
        .name                   = DRV_NAME,
+       .proc_name              = DRV_NAME,
        .queuecommand           = sas_queuecommand,
        .dma_need_drain         = ata_scsi_dma_need_drain,
        .target_alloc           = sas_target_alloc,
index fa1fcbf..b67ad30 100644 (file)
@@ -951,9 +951,9 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
        if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
                return 0;
 
+       mutex_lock(&vha->hw->optrom_mutex);
        if (ha->dcbx_tlv)
                goto do_read;
-       mutex_lock(&vha->hw->optrom_mutex);
        if (qla2x00_chip_is_down(vha)) {
                mutex_unlock(&vha->hw->optrom_mutex);
                return 0;
@@ -3330,11 +3330,34 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
        .bsg_timeout = qla24xx_bsg_timeout,
 };
 
+static uint
+qla2x00_get_host_supported_speeds(scsi_qla_host_t *vha, uint speeds)
+{
+       uint supported_speeds = FC_PORTSPEED_UNKNOWN;
+
+       if (speeds & FDMI_PORT_SPEED_64GB)
+               supported_speeds |= FC_PORTSPEED_64GBIT;
+       if (speeds & FDMI_PORT_SPEED_32GB)
+               supported_speeds |= FC_PORTSPEED_32GBIT;
+       if (speeds & FDMI_PORT_SPEED_16GB)
+               supported_speeds |= FC_PORTSPEED_16GBIT;
+       if (speeds & FDMI_PORT_SPEED_8GB)
+               supported_speeds |= FC_PORTSPEED_8GBIT;
+       if (speeds & FDMI_PORT_SPEED_4GB)
+               supported_speeds |= FC_PORTSPEED_4GBIT;
+       if (speeds & FDMI_PORT_SPEED_2GB)
+               supported_speeds |= FC_PORTSPEED_2GBIT;
+       if (speeds & FDMI_PORT_SPEED_1GB)
+               supported_speeds |= FC_PORTSPEED_1GBIT;
+
+       return supported_speeds;
+}
+
 void
 qla2x00_init_host_attr(scsi_qla_host_t *vha)
 {
        struct qla_hw_data *ha = vha->hw;
-       u32 speeds = FC_PORTSPEED_UNKNOWN;
+       u32 speeds = 0, fdmi_speed = 0;
 
        fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
        fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
@@ -3344,7 +3367,8 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
        fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
        fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
 
-       speeds = qla25xx_fdmi_port_speed_capability(ha);
+       fdmi_speed = qla25xx_fdmi_port_speed_capability(ha);
+       speeds = qla2x00_get_host_supported_speeds(vha, fdmi_speed);
 
        fc_host_supported_speeds(vha->host) = speeds;
 }
index c95177c..cac7c90 100644 (file)
@@ -828,6 +828,14 @@ store_state_field(struct device *dev, struct device_attribute *attr,
        }
 
        mutex_lock(&sdev->state_mutex);
+       switch (sdev->sdev_state) {
+       case SDEV_RUNNING:
+       case SDEV_OFFLINE:
+               break;
+       default:
+               mutex_unlock(&sdev->state_mutex);
+               return -EINVAL;
+       }
        if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) {
                ret = 0;
        } else {
index a334e89..b905713 100644 (file)
@@ -398,7 +398,7 @@ static void aspeed_spi_get_windows(struct aspeed_spi *aspi,
                windows[cs].cs = cs;
                windows[cs].size = data->segment_end(aspi, reg_val) -
                        data->segment_start(aspi, reg_val);
-               windows[cs].offset = cs ? windows[cs - 1].offset + windows[cs - 1].size : 0;
+               windows[cs].offset = data->segment_start(aspi, reg_val) - aspi->ahb_base_phy;
                dev_vdbg(aspi->dev, "CE%d offset=0x%.8x size=0x%x\n", cs,
                         windows[cs].offset, windows[cs].size);
        }
@@ -1163,7 +1163,7 @@ static const struct aspeed_spi_data ast2500_spi_data = {
 static const struct aspeed_spi_data ast2600_fmc_data = {
        .max_cs        = 3,
        .hastype       = false,
-       .mode_bits     = SPI_RX_QUAD | SPI_RX_QUAD,
+       .mode_bits     = SPI_RX_QUAD | SPI_TX_QUAD,
        .we0           = 16,
        .ctl0          = CE0_CTRL_REG,
        .timing        = CE0_TIMING_COMPENSATION_REG,
@@ -1178,7 +1178,7 @@ static const struct aspeed_spi_data ast2600_fmc_data = {
 static const struct aspeed_spi_data ast2600_spi_data = {
        .max_cs        = 2,
        .hastype       = false,
-       .mode_bits     = SPI_RX_QUAD | SPI_RX_QUAD,
+       .mode_bits     = SPI_RX_QUAD | SPI_TX_QUAD,
        .we0           = 16,
        .ctl0          = CE0_CTRL_REG,
        .timing        = CE0_TIMING_COMPENSATION_REG,
index 15b1101..c900c2f 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0=or-later
+// SPDX-License-Identifier: GPL-2.0-or-later
 /* Copyright (C) 2022 Hewlett-Packard Development Company, L.P. */
 
 #include <linux/iopoll.h>
index 55f4ee2..605acb1 100644 (file)
 #define ERASE_OPCODE_SHIFT             8
 #define ERASE_OPCODE_MASK              (0xff << ERASE_OPCODE_SHIFT)
 #define ERASE_64K_OPCODE_SHIFT         16
-#define ERASE_64K_OPCODE_MASK          (0xff << ERASE_OPCODE_SHIFT)
+#define ERASE_64K_OPCODE_MASK          (0xff << ERASE_64K_OPCODE_SHIFT)
 
 /* Flash descriptor fields */
 #define FLVALSIG_MAGIC                 0x0ff0a55a
index cb075c1..7b64e64 100644 (file)
@@ -151,7 +151,7 @@ mpc52xx_spi_fsmstate_idle(int irq, struct mpc52xx_spi *ms, u8 status, u8 data)
        int spr, sppr;
        u8 ctrl1;
 
-       if (status && (irq != NO_IRQ))
+       if (status && irq)
                dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n",
                        status);
 
index 7d89510..678dc51 100644 (file)
@@ -1057,6 +1057,8 @@ static int spi_qup_probe(struct platform_device *pdev)
        else
                master->num_chipselect = num_cs;
 
+       master->use_gpio_descriptors = true;
+       master->max_native_cs = SPI_NUM_CHIPSELECTS;
        master->bus_num = pdev->id;
        master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
        master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
index c89592b..9049726 100644 (file)
@@ -1157,6 +1157,11 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
                msg->actual_length += xfer->len;
                transfer_phase++;
        }
+       if (!xfer->cs_change) {
+               tegra_qspi_transfer_end(spi);
+               spi_transfer_delay_exec(xfer);
+       }
+       ret = 0;
 
 exit:
        msg->status = ret;
index fb7b406..532e12e 100644 (file)
@@ -17,7 +17,6 @@ atomisp-objs += \
        pci/atomisp_compat_css20.o \
        pci/atomisp_csi2.o \
        pci/atomisp_drvfs.o \
-       pci/atomisp_file.o \
        pci/atomisp_fops.o \
        pci/atomisp_ioctl.o \
        pci/atomisp_subdev.o \
index 8f48b23..fa1de45 100644 (file)
@@ -841,8 +841,6 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd,
        if (!ov2680_info)
                return -EINVAL;
 
-       mutex_lock(&dev->input_lock);
-
        res = v4l2_find_nearest_size(ov2680_res_preview,
                                     ARRAY_SIZE(ov2680_res_preview), width,
                                     height, fmt->width, fmt->height);
@@ -855,19 +853,22 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd,
        fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
        if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
                sd_state->pads->try_fmt = *fmt;
-               mutex_unlock(&dev->input_lock);
                return 0;
        }
 
        dev_dbg(&client->dev, "%s: %dx%d\n",
                __func__, fmt->width, fmt->height);
 
+       mutex_lock(&dev->input_lock);
+
        /* s_power has not been called yet for std v4l2 clients (camorama) */
        power_up(sd);
        ret = ov2680_write_reg_array(client, dev->res->regs);
-       if (ret)
+       if (ret) {
                dev_err(&client->dev,
                        "ov2680 write resolution register err: %d\n", ret);
+               goto err;
+       }
 
        vts = dev->res->lines_per_frame;
 
@@ -876,8 +877,10 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd,
                vts = dev->exposure + OV2680_INTEGRATION_TIME_MARGIN;
 
        ret = ov2680_write_reg(client, 2, OV2680_TIMING_VTS_H, vts);
-       if (ret)
+       if (ret) {
                dev_err(&client->dev, "ov2680 write vts err: %d\n", ret);
+               goto err;
+       }
 
        ret = ov2680_get_intg_factor(client, ov2680_info, res);
        if (ret) {
@@ -894,11 +897,7 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd,
        if (v_flag)
                ov2680_v_flip(sd, v_flag);
 
-       /*
-        * ret = startup(sd);
-        * if (ret)
-        * dev_err(&client->dev, "ov2680 startup err\n");
-        */
+       dev->res = res;
 err:
        mutex_unlock(&dev->input_lock);
        return ret;
index 385e22f..c5cbae1 100644 (file)
@@ -65,9 +65,6 @@
 #define        check_bo_null_return_void(bo)   \
        check_null_return_void(bo, "NULL hmm buffer object.\n")
 
-#define        HMM_MAX_ORDER           3
-#define        HMM_MIN_ORDER           0
-
 #define        ISP_VM_START    0x0
 #define        ISP_VM_SIZE     (0x7FFFFFFF)    /* 2G address space */
 #define        ISP_PTR_NULL    NULL
@@ -89,8 +86,6 @@ enum hmm_bo_type {
 #define        HMM_BO_VMAPED           0x10
 #define        HMM_BO_VMAPED_CACHED    0x20
 #define        HMM_BO_ACTIVE           0x1000
-#define        HMM_BO_MEM_TYPE_USER     0x1
-#define        HMM_BO_MEM_TYPE_PFN      0x2
 
 struct hmm_bo_device {
        struct isp_mmu          mmu;
@@ -126,7 +121,6 @@ struct hmm_buffer_object {
        enum hmm_bo_type        type;
        int             mmap_count;
        int             status;
-       int             mem_type;
        void            *vmap_addr; /* kernel virtual address by vmap */
 
        struct rb_node  node;
index f96f5ad..3f602b5 100644 (file)
@@ -740,20 +740,6 @@ enum atomisp_frame_status {
        ATOMISP_FRAME_STATUS_FLASH_FAILED,
 };
 
-/* ISP memories, isp2400 */
-enum atomisp_acc_memory {
-       ATOMISP_ACC_MEMORY_PMEM0 = 0,
-       ATOMISP_ACC_MEMORY_DMEM0,
-       /* for backward compatibility */
-       ATOMISP_ACC_MEMORY_DMEM = ATOMISP_ACC_MEMORY_DMEM0,
-       ATOMISP_ACC_MEMORY_VMEM0,
-       ATOMISP_ACC_MEMORY_VAMEM0,
-       ATOMISP_ACC_MEMORY_VAMEM1,
-       ATOMISP_ACC_MEMORY_VAMEM2,
-       ATOMISP_ACC_MEMORY_HMEM0,
-       ATOMISP_ACC_NR_MEMORY
-};
-
 enum atomisp_ext_isp_id {
        EXT_ISP_CID_ISO = 0,
        EXT_ISP_CID_CAPTURE_HDR,
index 58e0ea5..5463d11 100644 (file)
@@ -26,8 +26,6 @@ struct v4l2_subdev *atomisp_gmin_find_subdev(struct i2c_adapter *adapter,
 int atomisp_gmin_remove_subdev(struct v4l2_subdev *sd);
 int gmin_get_var_int(struct device *dev, bool is_gmin,
                     const char *var, int def);
-int camera_sensor_csi(struct v4l2_subdev *sd, u32 port,
-                     u32 lanes, u32 format, u32 bayer_order, int flag);
 struct camera_sensor_platform_data *
 gmin_camera_platform_data(
     struct v4l2_subdev *subdev,
index 8c65733..0253661 100644 (file)
@@ -141,23 +141,6 @@ struct atomisp_platform_data {
        struct intel_v4l2_subdev_table *subdevs;
 };
 
-/* Describe the capacities of one single sensor. */
-struct atomisp_sensor_caps {
-       /* The number of streams this sensor can output. */
-       int stream_num;
-       bool is_slave;
-};
-
-/* Describe the capacities of sensors connected to one camera port. */
-struct atomisp_camera_caps {
-       /* The number of sensors connected to this camera port. */
-       int sensor_num;
-       /* The capacities of each sensor. */
-       struct atomisp_sensor_caps sensor[MAX_SENSORS_PER_PORT];
-       /* Define whether stream control is required for multiple streams. */
-       bool multi_stream_ctrl;
-};
-
 /*
  *  Sensor of external ISP can send multiple steams with different mipi data
  * type in the same virtual channel. This information needs to come from the
@@ -235,7 +218,6 @@ struct camera_mipi_info {
 };
 
 const struct atomisp_platform_data *atomisp_get_platform_data(void);
-const struct atomisp_camera_caps *atomisp_get_default_camera_caps(void);
 
 /* API from old platform_camera.h, new CPUID implementation */
 #define __IS_SOC(x) (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && \
index d128b79..d3cf6ed 100644 (file)
@@ -28,3 +28,22 @@ Since getting a picture requires multiple processing steps,
 this means that unlike in fixed pipelines the soft pipelines
 on the ISP can do multiple processing steps in a single pipeline
 element (in a single binary).
+
+###
+
+The sensor drivers use of v4l2_get_subdev_hostdata(), which returns
+a camera_mipi_info struct. This struct is allocated/managed by
+the core atomisp code. The most important parts of the struct
+are filled by the atomisp core itself, like e.g. the port number.
+
+The sensor drivers on a set_fmt call do fill in camera_mipi_info.data
+which is a atomisp_sensor_mode_data struct. This gets filled from
+a function called <sensor_name>_get_intg_factor(). This struct is not
+used by the atomisp code at all. It is returned to userspace by
+a ATOMISP_IOC_G_SENSOR_MODE_DATA and the Android userspace does use this.
+
+Other members of camera_mipi_info which are set by some drivers are:
+-metadata_width, metadata_height, metadata_effective_width, set by
+ the ov5693 driver (and used by the atomisp core)
+-raw_bayer_order, adjusted by the ov2680 driver when flipping since
+ flipping can change the bayer order
index c932f34..c72d0e3 100644 (file)
@@ -80,6 +80,8 @@ union host {
        } ptr;
 };
 
+static int atomisp_set_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id);
+
 /*
  * get sensor:dis71430/ov2720 related info from v4l2_subdev->priv data field.
  * subdev->priv is set in mrst.c
@@ -98,15 +100,6 @@ struct atomisp_video_pipe *atomisp_to_video_pipe(struct video_device *dev)
               container_of(dev, struct atomisp_video_pipe, vdev);
 }
 
-/*
- * get struct atomisp_acc_pipe from v4l2 video_device
- */
-struct atomisp_acc_pipe *atomisp_to_acc_pipe(struct video_device *dev)
-{
-       return (struct atomisp_acc_pipe *)
-              container_of(dev, struct atomisp_acc_pipe, vdev);
-}
-
 static unsigned short atomisp_get_sensor_fps(struct atomisp_sub_device *asd)
 {
        struct v4l2_subdev_frame_interval fi = { 0 };
@@ -777,24 +770,6 @@ static struct atomisp_video_pipe *__atomisp_get_pipe(
     enum ia_css_pipe_id css_pipe_id,
     enum ia_css_buffer_type buf_type)
 {
-       struct atomisp_device *isp = asd->isp;
-
-       if (css_pipe_id == IA_CSS_PIPE_ID_COPY &&
-           isp->inputs[asd->input_curr].camera_caps->
-           sensor[asd->sensor_curr].stream_num > 1) {
-               switch (stream_id) {
-               case ATOMISP_INPUT_STREAM_PREVIEW:
-                       return &asd->video_out_preview;
-               case ATOMISP_INPUT_STREAM_POSTVIEW:
-                       return &asd->video_out_vf;
-               case ATOMISP_INPUT_STREAM_VIDEO:
-                       return &asd->video_out_video_capture;
-               case ATOMISP_INPUT_STREAM_CAPTURE:
-               default:
-                       return &asd->video_out_capture;
-               }
-       }
-
        /* video is same in online as in continuouscapture mode */
        if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) {
                /*
@@ -906,7 +881,8 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error,
        enum atomisp_metadata_type md_type;
        struct atomisp_device *isp = asd->isp;
        struct v4l2_control ctrl;
-       bool reset_wdt_timer = false;
+
+       lockdep_assert_held(&isp->mutex);
 
        if (
            buf_type != IA_CSS_BUFFER_TYPE_METADATA &&
@@ -1013,9 +989,6 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error,
                break;
        case IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME:
        case IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME:
-               if (IS_ISP2401)
-                       reset_wdt_timer = true;
-
                pipe->buffers_in_css--;
                frame = buffer.css_buffer.data.frame;
                if (!frame) {
@@ -1068,9 +1041,6 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error,
                break;
        case IA_CSS_BUFFER_TYPE_OUTPUT_FRAME:
        case IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME:
-               if (IS_ISP2401)
-                       reset_wdt_timer = true;
-
                pipe->buffers_in_css--;
                frame = buffer.css_buffer.data.frame;
                if (!frame) {
@@ -1238,8 +1208,6 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error,
                 */
                wake_up(&vb->done);
        }
-       if (IS_ISP2401)
-               atomic_set(&pipe->wdt_count, 0);
 
        /*
         * Requeue should only be done for 3a and dis buffers.
@@ -1256,19 +1224,6 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error,
        }
        if (!error && q_buffers)
                atomisp_qbuffers_to_css(asd);
-
-       if (IS_ISP2401) {
-               /* If there are no buffers queued then
-               * delete wdt timer. */
-               if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
-                       return;
-               if (!atomisp_buffers_queued_pipe(pipe))
-                       atomisp_wdt_stop_pipe(pipe, false);
-               else if (reset_wdt_timer)
-                       /* SOF irq should not reset wdt timer. */
-                       atomisp_wdt_refresh_pipe(pipe,
-                                               ATOMISP_WDT_KEEP_CURRENT_DELAY);
-       }
 }
 
 void atomisp_delayed_init_work(struct work_struct *work)
@@ -1307,10 +1262,14 @@ static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout)
        bool stream_restart[MAX_STREAM_NUM] = {0};
        bool depth_mode = false;
        int i, ret, depth_cnt = 0;
+       unsigned long flags;
 
-       if (!isp->sw_contex.file_input)
-               atomisp_css_irq_enable(isp,
-                                      IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, false);
+       lockdep_assert_held(&isp->mutex);
+
+       if (!atomisp_streaming_count(isp))
+               return;
+
+       atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, false);
 
        BUG_ON(isp->num_of_streams > MAX_STREAM_NUM);
 
@@ -1331,7 +1290,9 @@ static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout)
 
                stream_restart[asd->index] = true;
 
+               spin_lock_irqsave(&isp->lock, flags);
                asd->streaming = ATOMISP_DEVICE_STREAMING_STOPPING;
+               spin_unlock_irqrestore(&isp->lock, flags);
 
                /* stream off sensor */
                ret = v4l2_subdev_call(
@@ -1346,7 +1307,9 @@ static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout)
                css_pipe_id = atomisp_get_css_pipe_id(asd);
                atomisp_css_stop(asd, css_pipe_id, true);
 
+               spin_lock_irqsave(&isp->lock, flags);
                asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED;
+               spin_unlock_irqrestore(&isp->lock, flags);
 
                asd->preview_exp_id = 1;
                asd->postview_exp_id = 1;
@@ -1387,25 +1350,23 @@ static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout)
                                                   IA_CSS_INPUT_MODE_BUFFERED_SENSOR);
 
                css_pipe_id = atomisp_get_css_pipe_id(asd);
-               if (atomisp_css_start(asd, css_pipe_id, true))
+               if (atomisp_css_start(asd, css_pipe_id, true)) {
                        dev_warn(isp->dev,
                                 "start SP failed, so do not set streaming to be enable!\n");
-               else
+               } else {
+                       spin_lock_irqsave(&isp->lock, flags);
                        asd->streaming = ATOMISP_DEVICE_STREAMING_ENABLED;
+                       spin_unlock_irqrestore(&isp->lock, flags);
+               }
 
                atomisp_csi2_configure(asd);
        }
 
-       if (!isp->sw_contex.file_input) {
-               atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF,
-                                      atomisp_css_valid_sof(isp));
+       atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF,
+                              atomisp_css_valid_sof(isp));
 
-               if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_AUTO, true) < 0)
-                       dev_dbg(isp->dev, "DFS auto failed while recovering!\n");
-       } else {
-               if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_MAX, true) < 0)
-                       dev_dbg(isp->dev, "DFS max failed while recovering!\n");
-       }
+       if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_AUTO, true) < 0)
+               dev_dbg(isp->dev, "DFS auto failed while recovering!\n");
 
        for (i = 0; i < isp->num_of_streams; i++) {
                struct atomisp_sub_device *asd;
@@ -1454,361 +1415,24 @@ static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout)
        }
 }
 
-void atomisp_wdt_work(struct work_struct *work)
+void atomisp_assert_recovery_work(struct work_struct *work)
 {
        struct atomisp_device *isp = container_of(work, struct atomisp_device,
-                                    wdt_work);
-       int i;
-       unsigned int pipe_wdt_cnt[MAX_STREAM_NUM][4] = { {0} };
-       bool css_recover = true;
-
-       rt_mutex_lock(&isp->mutex);
-       if (!atomisp_streaming_count(isp)) {
-               atomic_set(&isp->wdt_work_queued, 0);
-               rt_mutex_unlock(&isp->mutex);
-               return;
-       }
-
-       if (!IS_ISP2401) {
-               dev_err(isp->dev, "timeout %d of %d\n",
-                       atomic_read(&isp->wdt_count) + 1,
-                       ATOMISP_ISP_MAX_TIMEOUT_COUNT);
-       } else {
-               for (i = 0; i < isp->num_of_streams; i++) {
-                       struct atomisp_sub_device *asd = &isp->asd[i];
-
-                       pipe_wdt_cnt[i][0] +=
-                           atomic_read(&asd->video_out_capture.wdt_count);
-                       pipe_wdt_cnt[i][1] +=
-                           atomic_read(&asd->video_out_vf.wdt_count);
-                       pipe_wdt_cnt[i][2] +=
-                           atomic_read(&asd->video_out_preview.wdt_count);
-                       pipe_wdt_cnt[i][3] +=
-                           atomic_read(&asd->video_out_video_capture.wdt_count);
-                       css_recover =
-                           (pipe_wdt_cnt[i][0] <= ATOMISP_ISP_MAX_TIMEOUT_COUNT &&
-                           pipe_wdt_cnt[i][1] <= ATOMISP_ISP_MAX_TIMEOUT_COUNT &&
-                           pipe_wdt_cnt[i][2] <= ATOMISP_ISP_MAX_TIMEOUT_COUNT &&
-                           pipe_wdt_cnt[i][3] <= ATOMISP_ISP_MAX_TIMEOUT_COUNT)
-                           ? true : false;
-                       dev_err(isp->dev,
-                               "pipe on asd%d timeout cnt: (%d, %d, %d, %d) of %d, recover = %d\n",
-                               asd->index, pipe_wdt_cnt[i][0], pipe_wdt_cnt[i][1],
-                               pipe_wdt_cnt[i][2], pipe_wdt_cnt[i][3],
-                               ATOMISP_ISP_MAX_TIMEOUT_COUNT, css_recover);
-               }
-       }
-
-       if (css_recover) {
-               ia_css_debug_dump_sp_sw_debug_info();
-               ia_css_debug_dump_debug_info(__func__);
-               for (i = 0; i < isp->num_of_streams; i++) {
-                       struct atomisp_sub_device *asd = &isp->asd[i];
-
-                       if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
-                               continue;
-                       dev_err(isp->dev, "%s, vdev %s buffers in css: %d\n",
-                               __func__,
-                               asd->video_out_capture.vdev.name,
-                               asd->video_out_capture.
-                               buffers_in_css);
-                       dev_err(isp->dev,
-                               "%s, vdev %s buffers in css: %d\n",
-                               __func__,
-                               asd->video_out_vf.vdev.name,
-                               asd->video_out_vf.
-                               buffers_in_css);
-                       dev_err(isp->dev,
-                               "%s, vdev %s buffers in css: %d\n",
-                               __func__,
-                               asd->video_out_preview.vdev.name,
-                               asd->video_out_preview.
-                               buffers_in_css);
-                       dev_err(isp->dev,
-                               "%s, vdev %s buffers in css: %d\n",
-                               __func__,
-                               asd->video_out_video_capture.vdev.name,
-                               asd->video_out_video_capture.
-                               buffers_in_css);
-                       dev_err(isp->dev,
-                               "%s, s3a buffers in css preview pipe:%d\n",
-                               __func__,
-                               asd->s3a_bufs_in_css[IA_CSS_PIPE_ID_PREVIEW]);
-                       dev_err(isp->dev,
-                               "%s, s3a buffers in css capture pipe:%d\n",
-                               __func__,
-                               asd->s3a_bufs_in_css[IA_CSS_PIPE_ID_CAPTURE]);
-                       dev_err(isp->dev,
-                               "%s, s3a buffers in css video pipe:%d\n",
-                               __func__,
-                               asd->s3a_bufs_in_css[IA_CSS_PIPE_ID_VIDEO]);
-                       dev_err(isp->dev,
-                               "%s, dis buffers in css: %d\n",
-                               __func__, asd->dis_bufs_in_css);
-                       dev_err(isp->dev,
-                               "%s, metadata buffers in css preview pipe:%d\n",
-                               __func__,
-                               asd->metadata_bufs_in_css
-                               [ATOMISP_INPUT_STREAM_GENERAL]
-                               [IA_CSS_PIPE_ID_PREVIEW]);
-                       dev_err(isp->dev,
-                               "%s, metadata buffers in css capture pipe:%d\n",
-                               __func__,
-                               asd->metadata_bufs_in_css
-                               [ATOMISP_INPUT_STREAM_GENERAL]
-                               [IA_CSS_PIPE_ID_CAPTURE]);
-                       dev_err(isp->dev,
-                               "%s, metadata buffers in css video pipe:%d\n",
-                               __func__,
-                               asd->metadata_bufs_in_css
-                               [ATOMISP_INPUT_STREAM_GENERAL]
-                               [IA_CSS_PIPE_ID_VIDEO]);
-                       if (asd->enable_raw_buffer_lock->val) {
-                               unsigned int j;
-
-                               dev_err(isp->dev, "%s, raw_buffer_locked_count %d\n",
-                                       __func__, asd->raw_buffer_locked_count);
-                               for (j = 0; j <= ATOMISP_MAX_EXP_ID / 32; j++)
-                                       dev_err(isp->dev, "%s, raw_buffer_bitmap[%d]: 0x%x\n",
-                                               __func__, j,
-                                               asd->raw_buffer_bitmap[j]);
-                       }
-               }
-
-               /*sh_css_dump_sp_state();*/
-               /*sh_css_dump_isp_state();*/
-       } else {
-               for (i = 0; i < isp->num_of_streams; i++) {
-                       struct atomisp_sub_device *asd = &isp->asd[i];
-
-                       if (asd->streaming ==
-                           ATOMISP_DEVICE_STREAMING_ENABLED) {
-                               atomisp_clear_css_buffer_counters(asd);
-                               atomisp_flush_bufs_and_wakeup(asd);
-                               complete(&asd->init_done);
-                       }
-                       if (IS_ISP2401)
-                               atomisp_wdt_stop(asd, false);
-               }
-
-               if (!IS_ISP2401) {
-                       atomic_set(&isp->wdt_count, 0);
-               } else {
-                       isp->isp_fatal_error = true;
-                       atomic_set(&isp->wdt_work_queued, 0);
-
-                       rt_mutex_unlock(&isp->mutex);
-                       return;
-               }
-       }
+                                                 assert_recovery_work);
 
+       mutex_lock(&isp->mutex);
        __atomisp_css_recover(isp, true);
-       if (IS_ISP2401) {
-               for (i = 0; i < isp->num_of_streams; i++) {
-                       struct atomisp_sub_device *asd = &isp->asd[i];
-
-                       if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
-                               continue;
-
-                       atomisp_wdt_refresh(asd,
-                                           isp->sw_contex.file_input ?
-                                           ATOMISP_ISP_FILE_TIMEOUT_DURATION :
-                                           ATOMISP_ISP_TIMEOUT_DURATION);
-               }
-       }
-
-       dev_err(isp->dev, "timeout recovery handling done\n");
-       atomic_set(&isp->wdt_work_queued, 0);
-
-       rt_mutex_unlock(&isp->mutex);
+       mutex_unlock(&isp->mutex);
 }
 
 void atomisp_css_flush(struct atomisp_device *isp)
 {
-       int i;
-
-       if (!atomisp_streaming_count(isp))
-               return;
-
-       /* Disable wdt */
-       for (i = 0; i < isp->num_of_streams; i++) {
-               struct atomisp_sub_device *asd = &isp->asd[i];
-
-               atomisp_wdt_stop(asd, true);
-       }
-
        /* Start recover */
        __atomisp_css_recover(isp, false);
-       /* Restore wdt */
-       for (i = 0; i < isp->num_of_streams; i++) {
-               struct atomisp_sub_device *asd = &isp->asd[i];
-
-               if (asd->streaming !=
-                   ATOMISP_DEVICE_STREAMING_ENABLED)
-                       continue;
 
-               atomisp_wdt_refresh(asd,
-                                   isp->sw_contex.file_input ?
-                                   ATOMISP_ISP_FILE_TIMEOUT_DURATION :
-                                   ATOMISP_ISP_TIMEOUT_DURATION);
-       }
        dev_dbg(isp->dev, "atomisp css flush done\n");
 }
 
-void atomisp_wdt(struct timer_list *t)
-{
-       struct atomisp_sub_device *asd;
-       struct atomisp_device *isp;
-
-       if (!IS_ISP2401) {
-               asd = from_timer(asd, t, wdt);
-               isp = asd->isp;
-       } else {
-               struct atomisp_video_pipe *pipe = from_timer(pipe, t, wdt);
-
-               asd = pipe->asd;
-               isp = asd->isp;
-
-               atomic_inc(&pipe->wdt_count);
-               dev_warn(isp->dev,
-                       "[WARNING]asd %d pipe %s ISP timeout %d!\n",
-                       asd->index, pipe->vdev.name,
-                       atomic_read(&pipe->wdt_count));
-       }
-
-       if (atomic_read(&isp->wdt_work_queued)) {
-               dev_dbg(isp->dev, "ISP watchdog was put into workqueue\n");
-               return;
-       }
-       atomic_set(&isp->wdt_work_queued, 1);
-       queue_work(isp->wdt_work_queue, &isp->wdt_work);
-}
-
-/* ISP2400 */
-void atomisp_wdt_start(struct atomisp_sub_device *asd)
-{
-       atomisp_wdt_refresh(asd, ATOMISP_ISP_TIMEOUT_DURATION);
-}
-
-/* ISP2401 */
-void atomisp_wdt_refresh_pipe(struct atomisp_video_pipe *pipe,
-                             unsigned int delay)
-{
-       unsigned long next;
-
-       if (!pipe->asd) {
-               dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n",
-                       __func__, pipe->vdev.name);
-               return;
-       }
-
-       if (delay != ATOMISP_WDT_KEEP_CURRENT_DELAY)
-               pipe->wdt_duration = delay;
-
-       next = jiffies + pipe->wdt_duration;
-
-       /* Override next if it has been pushed beyon the "next" time */
-       if (atomisp_is_wdt_running(pipe) && time_after(pipe->wdt_expires, next))
-               next = pipe->wdt_expires;
-
-       pipe->wdt_expires = next;
-
-       if (atomisp_is_wdt_running(pipe))
-               dev_dbg(pipe->asd->isp->dev, "WDT will hit after %d ms (%s)\n",
-                       ((int)(next - jiffies) * 1000 / HZ), pipe->vdev.name);
-       else
-               dev_dbg(pipe->asd->isp->dev, "WDT starts with %d ms period (%s)\n",
-                       ((int)(next - jiffies) * 1000 / HZ), pipe->vdev.name);
-
-       mod_timer(&pipe->wdt, next);
-}
-
-void atomisp_wdt_refresh(struct atomisp_sub_device *asd, unsigned int delay)
-{
-       if (!IS_ISP2401) {
-               unsigned long next;
-
-               if (delay != ATOMISP_WDT_KEEP_CURRENT_DELAY)
-                       asd->wdt_duration = delay;
-
-               next = jiffies + asd->wdt_duration;
-
-               /* Override next if it has been pushed beyon the "next" time */
-               if (atomisp_is_wdt_running(asd) && time_after(asd->wdt_expires, next))
-                       next = asd->wdt_expires;
-
-               asd->wdt_expires = next;
-
-               if (atomisp_is_wdt_running(asd))
-                       dev_dbg(asd->isp->dev, "WDT will hit after %d ms\n",
-                               ((int)(next - jiffies) * 1000 / HZ));
-               else
-                       dev_dbg(asd->isp->dev, "WDT starts with %d ms period\n",
-                               ((int)(next - jiffies) * 1000 / HZ));
-
-               mod_timer(&asd->wdt, next);
-               atomic_set(&asd->isp->wdt_count, 0);
-       } else {
-               dev_dbg(asd->isp->dev, "WDT refresh all:\n");
-               if (atomisp_is_wdt_running(&asd->video_out_capture))
-                       atomisp_wdt_refresh_pipe(&asd->video_out_capture, delay);
-               if (atomisp_is_wdt_running(&asd->video_out_preview))
-                       atomisp_wdt_refresh_pipe(&asd->video_out_preview, delay);
-               if (atomisp_is_wdt_running(&asd->video_out_vf))
-                       atomisp_wdt_refresh_pipe(&asd->video_out_vf, delay);
-               if (atomisp_is_wdt_running(&asd->video_out_video_capture))
-                       atomisp_wdt_refresh_pipe(&asd->video_out_video_capture, delay);
-       }
-}
-
-/* ISP2401 */
-void atomisp_wdt_stop_pipe(struct atomisp_video_pipe *pipe, bool sync)
-{
-       if (!pipe->asd) {
-               dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n",
-                       __func__, pipe->vdev.name);
-               return;
-       }
-
-       if (!atomisp_is_wdt_running(pipe))
-               return;
-
-       dev_dbg(pipe->asd->isp->dev,
-               "WDT stop asd %d (%s)\n", pipe->asd->index, pipe->vdev.name);
-
-       if (sync) {
-               del_timer_sync(&pipe->wdt);
-               cancel_work_sync(&pipe->asd->isp->wdt_work);
-       } else {
-               del_timer(&pipe->wdt);
-       }
-}
-
-/* ISP 2401 */
-void atomisp_wdt_start_pipe(struct atomisp_video_pipe *pipe)
-{
-       atomisp_wdt_refresh_pipe(pipe, ATOMISP_ISP_TIMEOUT_DURATION);
-}
-
-void atomisp_wdt_stop(struct atomisp_sub_device *asd, bool sync)
-{
-       dev_dbg(asd->isp->dev, "WDT stop:\n");
-
-       if (!IS_ISP2401) {
-               if (sync) {
-                       del_timer_sync(&asd->wdt);
-                       cancel_work_sync(&asd->isp->wdt_work);
-               } else {
-                       del_timer(&asd->wdt);
-               }
-       } else {
-               atomisp_wdt_stop_pipe(&asd->video_out_capture, sync);
-               atomisp_wdt_stop_pipe(&asd->video_out_preview, sync);
-               atomisp_wdt_stop_pipe(&asd->video_out_vf, sync);
-               atomisp_wdt_stop_pipe(&asd->video_out_video_capture, sync);
-       }
-}
-
 void atomisp_setup_flash(struct atomisp_sub_device *asd)
 {
        struct atomisp_device *isp = asd->isp;
@@ -1884,7 +1508,7 @@ irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr)
         * For CSS2.0: we change the way to not dequeue all the event at one
         * time, instead, dequue one and process one, then another
         */
-       rt_mutex_lock(&isp->mutex);
+       mutex_lock(&isp->mutex);
        if (atomisp_css_isr_thread(isp, frame_done_found, css_pipe_done))
                goto out;
 
@@ -1895,15 +1519,7 @@ irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr)
                atomisp_setup_flash(asd);
        }
 out:
-       rt_mutex_unlock(&isp->mutex);
-       for (i = 0; i < isp->num_of_streams; i++) {
-               asd = &isp->asd[i];
-               if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED
-                   && css_pipe_done[asd->index]
-                   && isp->sw_contex.file_input)
-                       v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
-                                        video, s_stream, 1);
-       }
+       mutex_unlock(&isp->mutex);
        dev_dbg(isp->dev, "<%s\n", __func__);
 
        return IRQ_HANDLED;
@@ -2322,7 +1938,6 @@ static void atomisp_update_grid_info(struct atomisp_sub_device *asd,
 {
        struct atomisp_device *isp = asd->isp;
        int err;
-       u16 stream_id = atomisp_source_pad_to_stream_id(asd, source_pad);
 
        if (atomisp_css_get_grid_info(asd, pipe_id, source_pad))
                return;
@@ -2331,7 +1946,7 @@ static void atomisp_update_grid_info(struct atomisp_sub_device *asd,
           the grid size. */
        atomisp_css_free_stat_buffers(asd);
 
-       err = atomisp_alloc_css_stat_bufs(asd, stream_id);
+       err = atomisp_alloc_css_stat_bufs(asd, ATOMISP_INPUT_STREAM_GENERAL);
        if (err) {
                dev_err(isp->dev, "stat_buf allocate error\n");
                goto err;
@@ -4077,6 +3692,8 @@ void atomisp_handle_parameter_and_buffer(struct atomisp_video_pipe *pipe)
        unsigned long irqflags;
        bool need_to_enqueue_buffer = false;
 
+       lockdep_assert_held(&asd->isp->mutex);
+
        if (!asd) {
                dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n",
                        __func__, pipe->vdev.name);
@@ -4143,19 +3760,6 @@ void atomisp_handle_parameter_and_buffer(struct atomisp_video_pipe *pipe)
                return;
 
        atomisp_qbuffers_to_css(asd);
-
-       if (!IS_ISP2401) {
-               if (!atomisp_is_wdt_running(asd) && atomisp_buffers_queued(asd))
-                       atomisp_wdt_start(asd);
-       } else {
-               if (atomisp_buffers_queued_pipe(pipe)) {
-                       if (!atomisp_is_wdt_running(pipe))
-                               atomisp_wdt_start_pipe(pipe);
-                       else
-                               atomisp_wdt_refresh_pipe(pipe,
-                                                       ATOMISP_WDT_KEEP_CURRENT_DELAY);
-               }
-       }
 }
 
 /*
@@ -4170,6 +3774,8 @@ int atomisp_set_parameters(struct video_device *vdev,
        struct atomisp_css_params *css_param = &asd->params.css_param;
        int ret;
 
+       lockdep_assert_held(&asd->isp->mutex);
+
        if (!asd) {
                dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n",
                        __func__, vdev->name);
@@ -4824,8 +4430,6 @@ int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f,
        const struct atomisp_format_bridge *fmt;
        struct atomisp_input_stream_info *stream_info =
            (struct atomisp_input_stream_info *)snr_mbus_fmt->reserved;
-       u16 stream_index;
-       int source_pad = atomisp_subdev_source_pad(vdev);
        int ret;
 
        if (!asd) {
@@ -4837,7 +4441,6 @@ int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f,
        if (!isp->inputs[asd->input_curr].camera)
                return -EINVAL;
 
-       stream_index = atomisp_source_pad_to_stream_id(asd, source_pad);
        fmt = atomisp_get_format_bridge(f->pixelformat);
        if (!fmt) {
                dev_err(isp->dev, "unsupported pixelformat!\n");
@@ -4851,7 +4454,7 @@ int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f,
        snr_mbus_fmt->width = f->width;
        snr_mbus_fmt->height = f->height;
 
-       __atomisp_init_stream_info(stream_index, stream_info);
+       __atomisp_init_stream_info(ATOMISP_INPUT_STREAM_GENERAL, stream_info);
 
        dev_dbg(isp->dev, "try_mbus_fmt: asking for %ux%u\n",
                snr_mbus_fmt->width, snr_mbus_fmt->height);
@@ -4886,8 +4489,8 @@ int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f,
                return 0;
        }
 
-       if (snr_mbus_fmt->width < f->width
-           && snr_mbus_fmt->height < f->height) {
+       if (!res_overflow || (snr_mbus_fmt->width < f->width &&
+                             snr_mbus_fmt->height < f->height)) {
                f->width = snr_mbus_fmt->width;
                f->height = snr_mbus_fmt->height;
                /* Set the flag when resolution requested is
@@ -4906,41 +4509,6 @@ int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f,
        return 0;
 }
 
-static int
-atomisp_try_fmt_file(struct atomisp_device *isp, struct v4l2_format *f)
-{
-       u32 width = f->fmt.pix.width;
-       u32 height = f->fmt.pix.height;
-       u32 pixelformat = f->fmt.pix.pixelformat;
-       enum v4l2_field field = f->fmt.pix.field;
-       u32 depth;
-
-       if (!atomisp_get_format_bridge(pixelformat)) {
-               dev_err(isp->dev, "Wrong output pixelformat\n");
-               return -EINVAL;
-       }
-
-       depth = atomisp_get_pixel_depth(pixelformat);
-
-       if (field == V4L2_FIELD_ANY) {
-               field = V4L2_FIELD_NONE;
-       } else if (field != V4L2_FIELD_NONE) {
-               dev_err(isp->dev, "Wrong output field\n");
-               return -EINVAL;
-       }
-
-       f->fmt.pix.field = field;
-       f->fmt.pix.width = clamp_t(u32,
-                                  rounddown(width, (u32)ATOM_ISP_STEP_WIDTH),
-                                  ATOM_ISP_MIN_WIDTH, ATOM_ISP_MAX_WIDTH);
-       f->fmt.pix.height = clamp_t(u32, rounddown(height,
-                                   (u32)ATOM_ISP_STEP_HEIGHT),
-                                   ATOM_ISP_MIN_HEIGHT, ATOM_ISP_MAX_HEIGHT);
-       f->fmt.pix.bytesperline = (width * depth) >> 3;
-
-       return 0;
-}
-
 enum mipi_port_id __get_mipi_port(struct atomisp_device *isp,
                                  enum atomisp_camera_port port)
 {
@@ -5171,7 +4739,6 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev,
        int (*configure_pp_input)(struct atomisp_sub_device *asd,
                                  unsigned int width, unsigned int height) =
                                      configure_pp_input_nop;
-       u16 stream_index;
        const struct atomisp_in_fmt_conv *fc;
        int ret, i;
 
@@ -5180,7 +4747,6 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev,
                        __func__, vdev->name);
                return -EINVAL;
        }
-       stream_index = atomisp_source_pad_to_stream_id(asd, source_pad);
 
        v4l2_fh_init(&fh.vfh, vdev);
 
@@ -5200,7 +4766,7 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev,
                        dev_err(isp->dev, "mipi_info is NULL\n");
                        return -EINVAL;
                }
-               if (atomisp_set_sensor_mipi_to_isp(asd, stream_index,
+               if (atomisp_set_sensor_mipi_to_isp(asd, ATOMISP_INPUT_STREAM_GENERAL,
                                                   mipi_info))
                        return -EINVAL;
                fc = atomisp_find_in_fmt_conv_by_atomisp_in_fmt(
@@ -5284,7 +4850,7 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev,
        /* ISP2401 new input system need to use copy pipe */
        if (asd->copy_mode) {
                pipe_id = IA_CSS_PIPE_ID_COPY;
-               atomisp_css_capture_enable_online(asd, stream_index, false);
+               atomisp_css_capture_enable_online(asd, ATOMISP_INPUT_STREAM_GENERAL, false);
        } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
                /* video same in continuouscapture and online modes */
                configure_output = atomisp_css_video_configure_output;
@@ -5316,7 +4882,9 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev,
                                pipe_id = IA_CSS_PIPE_ID_CAPTURE;
 
                                atomisp_update_capture_mode(asd);
-                               atomisp_css_capture_enable_online(asd, stream_index, false);
+                               atomisp_css_capture_enable_online(asd,
+                                                                 ATOMISP_INPUT_STREAM_GENERAL,
+                                                                 false);
                        }
                }
        } else if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW) {
@@ -5341,7 +4909,7 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev,
 
                if (!asd->continuous_mode->val)
                        /* in case of ANR, force capture pipe to offline mode */
-                       atomisp_css_capture_enable_online(asd, stream_index,
+                       atomisp_css_capture_enable_online(asd, ATOMISP_INPUT_STREAM_GENERAL,
                                                          asd->params.low_light ?
                                                          false : asd->params.online_process);
 
@@ -5372,7 +4940,7 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev,
                pipe_id = IA_CSS_PIPE_ID_YUVPP;
 
        if (asd->copy_mode)
-               ret = atomisp_css_copy_configure_output(asd, stream_index,
+               ret = atomisp_css_copy_configure_output(asd, ATOMISP_INPUT_STREAM_GENERAL,
                                                        pix->width, pix->height,
                                                        format->planar ? pix->bytesperline :
                                                        pix->bytesperline * 8 / format->depth,
@@ -5396,8 +4964,9 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev,
                return -EINVAL;
        }
        if (asd->copy_mode)
-               ret = atomisp_css_copy_get_output_frame_info(asd, stream_index,
-                       output_info);
+               ret = atomisp_css_copy_get_output_frame_info(asd,
+                                                            ATOMISP_INPUT_STREAM_GENERAL,
+                                                            output_info);
        else
                ret = get_frame_info(asd, output_info);
        if (ret) {
@@ -5412,8 +4981,7 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev,
        ia_css_frame_free(asd->raw_output_frame);
        asd->raw_output_frame = NULL;
 
-       if (!asd->continuous_mode->val &&
-           !asd->params.online_process && !isp->sw_contex.file_input &&
+       if (!asd->continuous_mode->val && !asd->params.online_process &&
            ia_css_frame_allocate_from_info(&asd->raw_output_frame,
                    raw_output_info))
                return -ENOMEM;
@@ -5462,12 +5030,7 @@ static void atomisp_check_copy_mode(struct atomisp_sub_device *asd,
        src = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
                                      V4L2_SUBDEV_FORMAT_ACTIVE, source_pad);
 
-       if ((sink->code == src->code &&
-            sink->width == f->width &&
-            sink->height == f->height) ||
-           ((asd->isp->inputs[asd->input_curr].type == SOC_CAMERA) &&
-            (asd->isp->inputs[asd->input_curr].camera_caps->
-             sensor[asd->sensor_curr].stream_num > 1)))
+       if (sink->code == src->code && sink->width == f->width && sink->height == f->height)
                asd->copy_mode = true;
        else
                asd->copy_mode = false;
@@ -5495,7 +5058,6 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev,
        struct atomisp_device *isp;
        struct atomisp_input_stream_info *stream_info =
            (struct atomisp_input_stream_info *)ffmt->reserved;
-       u16 stream_index = ATOMISP_INPUT_STREAM_GENERAL;
        int source_pad = atomisp_subdev_source_pad(vdev);
        struct v4l2_subdev_fh fh;
        int ret;
@@ -5510,8 +5072,6 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev,
 
        v4l2_fh_init(&fh.vfh, vdev);
 
-       stream_index = atomisp_source_pad_to_stream_id(asd, source_pad);
-
        format = atomisp_get_format_bridge(pixelformat);
        if (!format)
                return -EINVAL;
@@ -5524,7 +5084,7 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev,
                ffmt->width, ffmt->height, padding_w, padding_h,
                dvs_env_w, dvs_env_h);
 
-       __atomisp_init_stream_info(stream_index, stream_info);
+       __atomisp_init_stream_info(ATOMISP_INPUT_STREAM_GENERAL, stream_info);
 
        req_ffmt = ffmt;
 
@@ -5556,7 +5116,7 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev,
        if (ret)
                return ret;
 
-       __atomisp_update_stream_env(asd, stream_index, stream_info);
+       __atomisp_update_stream_env(asd, ATOMISP_INPUT_STREAM_GENERAL, stream_info);
 
        dev_dbg(isp->dev, "sensor width: %d, height: %d\n",
                ffmt->width, ffmt->height);
@@ -5580,8 +5140,9 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev,
        return css_input_resolution_changed(asd, ffmt);
 }
 
-int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
+int atomisp_set_fmt(struct file *file, void *unused, struct v4l2_format *f)
 {
+       struct video_device *vdev = video_devdata(file);
        struct atomisp_device *isp = video_get_drvdata(vdev);
        struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
        struct atomisp_sub_device *asd = pipe->asd;
@@ -5604,20 +5165,13 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
        struct v4l2_subdev_fh fh;
        int ret;
 
-       if (!asd) {
-               dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
-                       __func__, vdev->name);
-               return -EINVAL;
-       }
+       ret = atomisp_pipe_check(pipe, true);
+       if (ret)
+               return ret;
 
        if (source_pad >= ATOMISP_SUBDEV_PADS_NUM)
                return -EINVAL;
 
-       if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) {
-               dev_warn(isp->dev, "ISP does not support set format while at streaming!\n");
-               return -EBUSY;
-       }
-
        dev_dbg(isp->dev,
                "setting resolution %ux%u on pad %u for asd%d, bytesperline %u\n",
                f->fmt.pix.width, f->fmt.pix.height, source_pad,
@@ -5699,58 +5253,7 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
                        f->fmt.pix.height = r.height;
                }
 
-               if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW &&
-                   (asd->isp->inputs[asd->input_curr].type == SOC_CAMERA) &&
-                   (asd->isp->inputs[asd->input_curr].camera_caps->
-                    sensor[asd->sensor_curr].stream_num > 1)) {
-                       /* For M10MO outputing YUV preview images. */
-                       u16 video_index =
-                           atomisp_source_pad_to_stream_id(asd,
-                                                           ATOMISP_SUBDEV_PAD_SOURCE_VIDEO);
-
-                       ret = atomisp_css_copy_get_output_frame_info(asd,
-                               video_index, &output_info);
-                       if (ret) {
-                               dev_err(isp->dev,
-                                       "copy_get_output_frame_info ret %i", ret);
-                               return -EINVAL;
-                       }
-                       if (!asd->yuvpp_mode) {
-                               /*
-                                * If viewfinder was configured into copy_mode,
-                                * we switch to using yuvpp pipe instead.
-                                */
-                               asd->yuvpp_mode = true;
-                               ret = atomisp_css_copy_configure_output(
-                                         asd, video_index, 0, 0, 0, 0);
-                               if (ret) {
-                                       dev_err(isp->dev,
-                                               "failed to disable copy pipe");
-                                       return -EINVAL;
-                               }
-                               ret = atomisp_css_yuvpp_configure_output(
-                                         asd, video_index,
-                                         output_info.res.width,
-                                         output_info.res.height,
-                                         output_info.padded_width,
-                                         output_info.format);
-                               if (ret) {
-                                       dev_err(isp->dev,
-                                               "failed to set up yuvpp pipe\n");
-                                       return -EINVAL;
-                               }
-                               atomisp_css_video_enable_online(asd, false);
-                               atomisp_css_preview_enable_online(asd,
-                                                                 ATOMISP_INPUT_STREAM_GENERAL, false);
-                       }
-                       atomisp_css_yuvpp_configure_viewfinder(asd, video_index,
-                                                              f->fmt.pix.width, f->fmt.pix.height,
-                                                              format_bridge->planar ? f->fmt.pix.bytesperline
-                                                              : f->fmt.pix.bytesperline * 8
-                                                              / format_bridge->depth, format_bridge->sh_fmt);
-                       atomisp_css_yuvpp_get_viewfinder_frame_info(
-                           asd, video_index, &output_info);
-               } else if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW) {
+               if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW) {
                        atomisp_css_video_configure_viewfinder(asd,
                                                               f->fmt.pix.width, f->fmt.pix.height,
                                                               format_bridge->planar ? f->fmt.pix.bytesperline
@@ -6078,55 +5581,6 @@ done:
        return 0;
 }
 
-int atomisp_set_fmt_file(struct video_device *vdev, struct v4l2_format *f)
-{
-       struct atomisp_device *isp = video_get_drvdata(vdev);
-       struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
-       struct atomisp_sub_device *asd = pipe->asd;
-       struct v4l2_mbus_framefmt ffmt = {0};
-       const struct atomisp_format_bridge *format_bridge;
-       struct v4l2_subdev_fh fh;
-       int ret;
-
-       if (!asd) {
-               dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
-                       __func__, vdev->name);
-               return -EINVAL;
-       }
-
-       v4l2_fh_init(&fh.vfh, vdev);
-
-       dev_dbg(isp->dev, "setting fmt %ux%u 0x%x for file inject\n",
-               f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.pixelformat);
-       ret = atomisp_try_fmt_file(isp, f);
-       if (ret) {
-               dev_err(isp->dev, "atomisp_try_fmt_file err: %d\n", ret);
-               return ret;
-       }
-
-       format_bridge = atomisp_get_format_bridge(f->fmt.pix.pixelformat);
-       if (!format_bridge) {
-               dev_dbg(isp->dev, "atomisp_get_format_bridge err! fmt:0x%x\n",
-                       f->fmt.pix.pixelformat);
-               return -EINVAL;
-       }
-
-       pipe->pix = f->fmt.pix;
-       atomisp_css_input_set_mode(asd, IA_CSS_INPUT_MODE_FIFO);
-       atomisp_css_input_configure_port(asd,
-                                        __get_mipi_port(isp, ATOMISP_CAMERA_PORT_PRIMARY), 2, 0xffff4,
-                                        0, 0, 0, 0);
-       ffmt.width = f->fmt.pix.width;
-       ffmt.height = f->fmt.pix.height;
-       ffmt.code = format_bridge->mbus_code;
-
-       atomisp_subdev_set_ffmt(&asd->subdev, fh.state,
-                               V4L2_SUBDEV_FORMAT_ACTIVE,
-                               ATOMISP_SUBDEV_PAD_SINK, &ffmt);
-
-       return 0;
-}
-
 int atomisp_set_shading_table(struct atomisp_sub_device *asd,
                              struct atomisp_shading_table *user_shading_table)
 {
@@ -6275,6 +5729,8 @@ int atomisp_offline_capture_configure(struct atomisp_sub_device *asd,
 {
        struct v4l2_ctrl *c;
 
+       lockdep_assert_held(&asd->isp->mutex);
+
        /*
        * In case of M10MO ZSL capture case, we need to issue a separate
        * capture request to M10MO which will output captured jpeg image
@@ -6379,36 +5835,6 @@ int atomisp_flash_enable(struct atomisp_sub_device *asd, int num_frames)
        return 0;
 }
 
-int atomisp_source_pad_to_stream_id(struct atomisp_sub_device *asd,
-                                   uint16_t source_pad)
-{
-       int stream_id;
-       struct atomisp_device *isp = asd->isp;
-
-       if (isp->inputs[asd->input_curr].camera_caps->
-           sensor[asd->sensor_curr].stream_num == 1)
-               return ATOMISP_INPUT_STREAM_GENERAL;
-
-       switch (source_pad) {
-       case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE:
-               stream_id = ATOMISP_INPUT_STREAM_CAPTURE;
-               break;
-       case ATOMISP_SUBDEV_PAD_SOURCE_VF:
-               stream_id = ATOMISP_INPUT_STREAM_POSTVIEW;
-               break;
-       case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW:
-               stream_id = ATOMISP_INPUT_STREAM_PREVIEW;
-               break;
-       case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO:
-               stream_id = ATOMISP_INPUT_STREAM_VIDEO;
-               break;
-       default:
-               stream_id = ATOMISP_INPUT_STREAM_GENERAL;
-       }
-
-       return stream_id;
-}
-
 bool atomisp_is_vf_pipe(struct atomisp_video_pipe *pipe)
 {
        struct atomisp_sub_device *asd = pipe->asd;
@@ -6459,7 +5885,7 @@ void atomisp_init_raw_buffer_bitmap(struct atomisp_sub_device *asd)
        spin_unlock_irqrestore(&asd->raw_buffer_bitmap_lock, flags);
 }
 
-int atomisp_set_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id)
+static int atomisp_set_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id)
 {
        int *bitmap, bit;
        unsigned long flags;
@@ -6549,6 +5975,8 @@ int atomisp_exp_id_capture(struct atomisp_sub_device *asd, int *exp_id)
        int value = *exp_id;
        int ret;
 
+       lockdep_assert_held(&isp->mutex);
+
        ret = __is_raw_buffer_locked(asd, value);
        if (ret) {
                dev_err(isp->dev, "%s exp_id %d invalid %d.\n", __func__, value, ret);
@@ -6570,6 +5998,8 @@ int atomisp_exp_id_unlock(struct atomisp_sub_device *asd, int *exp_id)
        int value = *exp_id;
        int ret;
 
+       lockdep_assert_held(&isp->mutex);
+
        ret = __clear_raw_buffer_bitmap(asd, value);
        if (ret) {
                dev_err(isp->dev, "%s exp_id %d invalid %d.\n", __func__, value, ret);
@@ -6605,6 +6035,8 @@ int atomisp_inject_a_fake_event(struct atomisp_sub_device *asd, int *event)
        if (!event || asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
                return -EINVAL;
 
+       lockdep_assert_held(&asd->isp->mutex);
+
        dev_dbg(asd->isp->dev, "%s: trying to inject a fake event 0x%x\n",
                __func__, *event);
 
@@ -6675,19 +6107,6 @@ int atomisp_get_invalid_frame_num(struct video_device *vdev,
        struct ia_css_pipe_info p_info;
        int ret;
 
-       if (!asd) {
-               dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n",
-                       __func__, vdev->name);
-               return -EINVAL;
-       }
-
-       if (asd->isp->inputs[asd->input_curr].camera_caps->
-           sensor[asd->sensor_curr].stream_num > 1) {
-               /* External ISP */
-               *invalid_frame_num = 0;
-               return 0;
-       }
-
        pipe_id = atomisp_get_pipe_id(pipe);
        if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].pipes[pipe_id]) {
                dev_warn(asd->isp->dev,
index ebc7294..c9f92f1 100644 (file)
@@ -54,7 +54,6 @@ void dump_sp_dmem(struct atomisp_device *isp, unsigned int addr,
                  unsigned int size);
 struct camera_mipi_info *atomisp_to_sensor_mipi_info(struct v4l2_subdev *sd);
 struct atomisp_video_pipe *atomisp_to_video_pipe(struct video_device *dev);
-struct atomisp_acc_pipe *atomisp_to_acc_pipe(struct video_device *dev);
 int atomisp_reset(struct atomisp_device *isp);
 void atomisp_flush_bufs_and_wakeup(struct atomisp_sub_device *asd);
 void atomisp_clear_css_buffer_counters(struct atomisp_sub_device *asd);
@@ -66,8 +65,7 @@ bool atomisp_buffers_queued_pipe(struct atomisp_video_pipe *pipe);
 /* Interrupt functions */
 void atomisp_msi_irq_init(struct atomisp_device *isp);
 void atomisp_msi_irq_uninit(struct atomisp_device *isp);
-void atomisp_wdt_work(struct work_struct *work);
-void atomisp_wdt(struct timer_list *t);
+void atomisp_assert_recovery_work(struct work_struct *work);
 void atomisp_setup_flash(struct atomisp_sub_device *asd);
 irqreturn_t atomisp_isr(int irq, void *dev);
 irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr);
@@ -268,8 +266,7 @@ int atomisp_get_sensor_mode_data(struct atomisp_sub_device *asd,
 int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f,
                    bool *res_overflow);
 
-int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f);
-int atomisp_set_fmt_file(struct video_device *vdev, struct v4l2_format *f);
+int atomisp_set_fmt(struct file *file, void *fh, struct v4l2_format *f);
 
 int atomisp_set_shading_table(struct atomisp_sub_device *asd,
                              struct atomisp_shading_table *shading_table);
@@ -300,8 +297,6 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error,
                      bool q_buffers, enum atomisp_input_stream_id stream_id);
 
 void atomisp_css_flush(struct atomisp_device *isp);
-int atomisp_source_pad_to_stream_id(struct atomisp_sub_device *asd,
-                                   uint16_t source_pad);
 
 /* Events. Only one event has to be exported for now. */
 void atomisp_eof_event(struct atomisp_sub_device *asd, uint8_t exp_id);
@@ -324,8 +319,6 @@ void atomisp_flush_params_queue(struct atomisp_video_pipe *asd);
 int atomisp_exp_id_unlock(struct atomisp_sub_device *asd, int *exp_id);
 int atomisp_exp_id_capture(struct atomisp_sub_device *asd, int *exp_id);
 
-/* Function to update Raw Buffer bitmap */
-int atomisp_set_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id);
 void atomisp_init_raw_buffer_bitmap(struct atomisp_sub_device *asd);
 
 /* Function to enable/disable zoom for capture pipe */
index 3393ae6..a6d85d0 100644 (file)
@@ -129,10 +129,6 @@ int atomisp_alloc_metadata_output_buf(struct atomisp_sub_device *asd);
 
 void atomisp_free_metadata_output_buf(struct atomisp_sub_device *asd);
 
-void atomisp_css_get_dis_statistics(struct atomisp_sub_device *asd,
-                                   struct atomisp_css_buffer *isp_css_buffer,
-                                   struct ia_css_isp_dvs_statistics_map *dvs_map);
-
 void atomisp_css_temp_pipe_to_pipe_id(struct atomisp_sub_device *asd,
                                      struct atomisp_css_event *current_event);
 
@@ -434,17 +430,11 @@ void atomisp_css_get_morph_table(struct atomisp_sub_device *asd,
 
 void atomisp_css_morph_table_free(struct ia_css_morph_table *table);
 
-void atomisp_css_set_cont_prev_start_time(struct atomisp_device *isp,
-       unsigned int overlap);
-
 int atomisp_css_get_dis_stat(struct atomisp_sub_device *asd,
                             struct atomisp_dis_statistics *stats);
 
 int atomisp_css_update_stream(struct atomisp_sub_device *asd);
 
-struct atomisp_acc_fw;
-int atomisp_css_set_acc_parameters(struct atomisp_acc_fw *acc_fw);
-
 int atomisp_css_isr_thread(struct atomisp_device *isp,
                           bool *frame_done_found,
                           bool *css_pipe_done);
index 5aa108a..fdc0554 100644 (file)
@@ -1427,7 +1427,6 @@ int atomisp_css_get_grid_info(struct atomisp_sub_device *asd,
        struct ia_css_pipe_info p_info;
        struct ia_css_grid_info old_info;
        struct atomisp_device *isp = asd->isp;
-       int stream_index = atomisp_source_pad_to_stream_id(asd, source_pad);
        int md_width = asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
                       stream_config.metadata_config.resolution.width;
 
@@ -1435,7 +1434,7 @@ int atomisp_css_get_grid_info(struct atomisp_sub_device *asd,
        memset(&old_info, 0, sizeof(struct ia_css_grid_info));
 
        if (ia_css_pipe_get_info(
-               asd->stream_env[stream_index].pipes[pipe_id],
+               asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].pipes[pipe_id],
                &p_info) != 0) {
                dev_err(isp->dev, "ia_css_pipe_get_info failed\n");
                return -EINVAL;
@@ -1574,20 +1573,6 @@ void atomisp_free_metadata_output_buf(struct atomisp_sub_device *asd)
        }
 }
 
-void atomisp_css_get_dis_statistics(struct atomisp_sub_device *asd,
-                                   struct atomisp_css_buffer *isp_css_buffer,
-                                   struct ia_css_isp_dvs_statistics_map *dvs_map)
-{
-       if (asd->params.dvs_stat) {
-               if (dvs_map)
-                       ia_css_translate_dvs2_statistics(
-                           asd->params.dvs_stat, dvs_map);
-               else
-                       ia_css_get_dvs2_statistics(asd->params.dvs_stat,
-                                                  isp_css_buffer->css_buffer.data.stats_dvs);
-       }
-}
-
 void atomisp_css_temp_pipe_to_pipe_id(struct atomisp_sub_device *asd,
                                      struct atomisp_css_event *current_event)
 {
@@ -2694,11 +2679,11 @@ int atomisp_get_css_frame_info(struct atomisp_sub_device *asd,
        struct atomisp_device *isp = asd->isp;
 
        if (ATOMISP_SOC_CAMERA(asd)) {
-               stream_index = atomisp_source_pad_to_stream_id(asd, source_pad);
+               stream_index = ATOMISP_INPUT_STREAM_GENERAL;
        } else {
                stream_index = (pipe_index == IA_CSS_PIPE_ID_YUVPP) ?
                               ATOMISP_INPUT_STREAM_VIDEO :
-                              atomisp_source_pad_to_stream_id(asd, source_pad);
+                              ATOMISP_INPUT_STREAM_GENERAL;
        }
 
        if (0 != ia_css_pipe_get_info(asd->stream_env[stream_index]
@@ -3626,6 +3611,8 @@ int atomisp_css_get_dis_stat(struct atomisp_sub_device *asd,
        struct atomisp_dis_buf *dis_buf;
        unsigned long flags;
 
+       lockdep_assert_held(&isp->mutex);
+
        if (!asd->params.dvs_stat->hor_prod.odd_real ||
            !asd->params.dvs_stat->hor_prod.odd_imag ||
            !asd->params.dvs_stat->hor_prod.even_real ||
@@ -3637,12 +3624,8 @@ int atomisp_css_get_dis_stat(struct atomisp_sub_device *asd,
                return -EINVAL;
 
        /* isp needs to be streaming to get DIS statistics */
-       spin_lock_irqsave(&isp->lock, flags);
-       if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) {
-               spin_unlock_irqrestore(&isp->lock, flags);
+       if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
                return -EINVAL;
-       }
-       spin_unlock_irqrestore(&isp->lock, flags);
 
        if (atomisp_compare_dvs_grid(asd, &stats->dvs2_stat.grid_info) != 0)
                /* If the grid info in the argument differs from the current
@@ -3763,32 +3746,6 @@ void atomisp_css_morph_table_free(struct ia_css_morph_table *table)
        ia_css_morph_table_free(table);
 }
 
-void atomisp_css_set_cont_prev_start_time(struct atomisp_device *isp,
-       unsigned int overlap)
-{
-       /* CSS 2.0 doesn't support this API. */
-       dev_dbg(isp->dev, "set cont prev start time is not supported.\n");
-       return;
-}
-
-/* Set the ACC binary arguments */
-int atomisp_css_set_acc_parameters(struct atomisp_acc_fw *acc_fw)
-{
-       unsigned int mem;
-
-       for (mem = 0; mem < ATOMISP_ACC_NR_MEMORY; mem++) {
-               if (acc_fw->args[mem].length == 0)
-                       continue;
-
-               ia_css_isp_param_set_css_mem_init(&acc_fw->fw->mem_initializers,
-                                                 IA_CSS_PARAM_CLASS_PARAM, mem,
-                                                 acc_fw->args[mem].css_ptr,
-                                                 acc_fw->args[mem].length);
-       }
-
-       return 0;
-}
-
 static struct atomisp_sub_device *__get_atomisp_subdev(
     struct ia_css_pipe *css_pipe,
     struct atomisp_device *isp,
@@ -3824,8 +3781,8 @@ int atomisp_css_isr_thread(struct atomisp_device *isp,
        enum atomisp_input_stream_id stream_id = 0;
        struct atomisp_css_event current_event;
        struct atomisp_sub_device *asd;
-       bool reset_wdt_timer[MAX_STREAM_NUM] = {false};
-       int i;
+
+       lockdep_assert_held(&isp->mutex);
 
        while (!ia_css_dequeue_psys_event(&current_event.event)) {
                if (current_event.event.type ==
@@ -3839,14 +3796,8 @@ int atomisp_css_isr_thread(struct atomisp_device *isp,
                                __func__,
                                current_event.event.fw_assert_module_id,
                                current_event.event.fw_assert_line_no);
-                       for (i = 0; i < isp->num_of_streams; i++)
-                               atomisp_wdt_stop(&isp->asd[i], 0);
-
-                       if (!IS_ISP2401)
-                               atomisp_wdt(&isp->asd[0].wdt);
-                       else
-                               queue_work(isp->wdt_work_queue, &isp->wdt_work);
 
+                       queue_work(system_long_wq, &isp->assert_recovery_work);
                        return -EINVAL;
                } else if (current_event.event.type == IA_CSS_EVENT_TYPE_FW_WARNING) {
                        dev_warn(isp->dev, "%s: ISP reports warning, code is %d, exp_id %d\n",
@@ -3875,20 +3826,12 @@ int atomisp_css_isr_thread(struct atomisp_device *isp,
                        frame_done_found[asd->index] = true;
                        atomisp_buf_done(asd, 0, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME,
                                         current_event.pipe, true, stream_id);
-
-                       if (!IS_ISP2401)
-                               reset_wdt_timer[asd->index] = true; /* ISP running */
-
                        break;
                case IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE:
                        dev_dbg(isp->dev, "event: Second output frame done");
                        frame_done_found[asd->index] = true;
                        atomisp_buf_done(asd, 0, IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME,
                                         current_event.pipe, true, stream_id);
-
-                       if (!IS_ISP2401)
-                               reset_wdt_timer[asd->index] = true; /* ISP running */
-
                        break;
                case IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE:
                        dev_dbg(isp->dev, "event: 3A stats frame done");
@@ -3909,19 +3852,12 @@ int atomisp_css_isr_thread(struct atomisp_device *isp,
                        atomisp_buf_done(asd, 0,
                                         IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME,
                                         current_event.pipe, true, stream_id);
-
-                       if (!IS_ISP2401)
-                               reset_wdt_timer[asd->index] = true; /* ISP running */
-
                        break;
                case IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE:
                        dev_dbg(isp->dev, "event: second VF output frame done");
                        atomisp_buf_done(asd, 0,
                                         IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME,
                                         current_event.pipe, true, stream_id);
-                       if (!IS_ISP2401)
-                               reset_wdt_timer[asd->index] = true; /* ISP running */
-
                        break;
                case IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE:
                        dev_dbg(isp->dev, "event: dis stats frame done");
@@ -3944,24 +3880,6 @@ int atomisp_css_isr_thread(struct atomisp_device *isp,
                }
        }
 
-       if (IS_ISP2401)
-               return 0;
-
-       /* ISP2400: If there are no buffers queued then delete wdt timer. */
-       for (i = 0; i < isp->num_of_streams; i++) {
-               asd = &isp->asd[i];
-               if (!asd)
-                       continue;
-               if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
-                       continue;
-               if (!atomisp_buffers_queued(asd))
-                       atomisp_wdt_stop(asd, false);
-               else if (reset_wdt_timer[i])
-                       /* SOF irq should not reset wdt timer. */
-                       atomisp_wdt_refresh(asd,
-                                           ATOMISP_WDT_KEEP_CURRENT_DELAY);
-       }
-
        return 0;
 }
 
diff --git a/drivers/staging/media/atomisp/pci/atomisp_file.c b/drivers/staging/media/atomisp/pci/atomisp_file.c
deleted file mode 100644 (file)
index 4570a9a..0000000
+++ /dev/null
@@ -1,229 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support for Medifield PNW Camera Imaging ISP subsystem.
- *
- * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
- *
- * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- *
- */
-
-#include <media/v4l2-event.h>
-#include <media/v4l2-mediabus.h>
-
-#include <media/videobuf-vmalloc.h>
-#include <linux/delay.h>
-
-#include "ia_css.h"
-
-#include "atomisp_cmd.h"
-#include "atomisp_common.h"
-#include "atomisp_file.h"
-#include "atomisp_internal.h"
-#include "atomisp_ioctl.h"
-
-static void file_work(struct work_struct *work)
-{
-       struct atomisp_file_device *file_dev =
-           container_of(work, struct atomisp_file_device, work);
-       struct atomisp_device *isp = file_dev->isp;
-       /* only support file injection on subdev0 */
-       struct atomisp_sub_device *asd = &isp->asd[0];
-       struct atomisp_video_pipe *out_pipe = &asd->video_in;
-       unsigned short *buf = videobuf_to_vmalloc(out_pipe->outq.bufs[0]);
-       struct v4l2_mbus_framefmt isp_sink_fmt;
-
-       if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
-               return;
-
-       dev_dbg(isp->dev, ">%s: ready to start streaming\n", __func__);
-       isp_sink_fmt = *atomisp_subdev_get_ffmt(&asd->subdev, NULL,
-                                               V4L2_SUBDEV_FORMAT_ACTIVE,
-                                               ATOMISP_SUBDEV_PAD_SINK);
-
-       while (!ia_css_isp_has_started())
-               usleep_range(1000, 1500);
-
-       ia_css_stream_send_input_frame(asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
-                                      buf, isp_sink_fmt.width,
-                                      isp_sink_fmt.height);
-       dev_dbg(isp->dev, "<%s: streaming done\n", __func__);
-}
-
-static int file_input_s_stream(struct v4l2_subdev *sd, int enable)
-{
-       struct atomisp_file_device *file_dev = v4l2_get_subdevdata(sd);
-       struct atomisp_device *isp = file_dev->isp;
-       /* only support file injection on subdev0 */
-       struct atomisp_sub_device *asd = &isp->asd[0];
-
-       dev_dbg(isp->dev, "%s: enable %d\n", __func__, enable);
-       if (enable) {
-               if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
-                       return 0;
-
-               queue_work(file_dev->work_queue, &file_dev->work);
-               return 0;
-       }
-       cancel_work_sync(&file_dev->work);
-       return 0;
-}
-
-static int file_input_get_fmt(struct v4l2_subdev *sd,
-                             struct v4l2_subdev_state *sd_state,
-                             struct v4l2_subdev_format *format)
-{
-       struct v4l2_mbus_framefmt *fmt = &format->format;
-       struct atomisp_file_device *file_dev = v4l2_get_subdevdata(sd);
-       struct atomisp_device *isp = file_dev->isp;
-       /* only support file injection on subdev0 */
-       struct atomisp_sub_device *asd = &isp->asd[0];
-       struct v4l2_mbus_framefmt *isp_sink_fmt;
-
-       if (format->pad)
-               return -EINVAL;
-       isp_sink_fmt = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
-                                              V4L2_SUBDEV_FORMAT_ACTIVE,
-                                              ATOMISP_SUBDEV_PAD_SINK);
-
-       fmt->width = isp_sink_fmt->width;
-       fmt->height = isp_sink_fmt->height;
-       fmt->code = isp_sink_fmt->code;
-
-       return 0;
-}
-
-static int file_input_set_fmt(struct v4l2_subdev *sd,
-                             struct v4l2_subdev_state *sd_state,
-                             struct v4l2_subdev_format *format)
-{
-       struct v4l2_mbus_framefmt *fmt = &format->format;
-
-       if (format->pad)
-               return -EINVAL;
-       file_input_get_fmt(sd, sd_state, format);
-       if (format->which == V4L2_SUBDEV_FORMAT_TRY)
-               sd_state->pads->try_fmt = *fmt;
-       return 0;
-}
-
-static int file_input_log_status(struct v4l2_subdev *sd)
-{
-       /*to fake*/
-       return 0;
-}
-
-static int file_input_s_power(struct v4l2_subdev *sd, int on)
-{
-       /* to fake */
-       return 0;
-}
-
-static int file_input_enum_mbus_code(struct v4l2_subdev *sd,
-                                    struct v4l2_subdev_state *sd_state,
-                                    struct v4l2_subdev_mbus_code_enum *code)
-{
-       /*to fake*/
-       return 0;
-}
-
-static int file_input_enum_frame_size(struct v4l2_subdev *sd,
-                                     struct v4l2_subdev_state *sd_state,
-                                     struct v4l2_subdev_frame_size_enum *fse)
-{
-       /*to fake*/
-       return 0;
-}
-
-static int file_input_enum_frame_ival(struct v4l2_subdev *sd,
-                                     struct v4l2_subdev_state *sd_state,
-                                     struct v4l2_subdev_frame_interval_enum
-                                     *fie)
-{
-       /*to fake*/
-       return 0;
-}
-
-static const struct v4l2_subdev_video_ops file_input_video_ops = {
-       .s_stream = file_input_s_stream,
-};
-
-static const struct v4l2_subdev_core_ops file_input_core_ops = {
-       .log_status = file_input_log_status,
-       .s_power = file_input_s_power,
-};
-
-static const struct v4l2_subdev_pad_ops file_input_pad_ops = {
-       .enum_mbus_code = file_input_enum_mbus_code,
-       .enum_frame_size = file_input_enum_frame_size,
-       .enum_frame_interval = file_input_enum_frame_ival,
-       .get_fmt = file_input_get_fmt,
-       .set_fmt = file_input_set_fmt,
-};
-
-static const struct v4l2_subdev_ops file_input_ops = {
-       .core = &file_input_core_ops,
-       .video = &file_input_video_ops,
-       .pad = &file_input_pad_ops,
-};
-
-void
-atomisp_file_input_unregister_entities(struct atomisp_file_device *file_dev)
-{
-       media_entity_cleanup(&file_dev->sd.entity);
-       v4l2_device_unregister_subdev(&file_dev->sd);
-}
-
-int atomisp_file_input_register_entities(struct atomisp_file_device *file_dev,
-       struct v4l2_device *vdev)
-{
-       /* Register the subdev and video nodes. */
-       return  v4l2_device_register_subdev(vdev, &file_dev->sd);
-}
-
-void atomisp_file_input_cleanup(struct atomisp_device *isp)
-{
-       struct atomisp_file_device *file_dev = &isp->file_dev;
-
-       if (file_dev->work_queue) {
-               destroy_workqueue(file_dev->work_queue);
-               file_dev->work_queue = NULL;
-       }
-}
-
-int atomisp_file_input_init(struct atomisp_device *isp)
-{
-       struct atomisp_file_device *file_dev = &isp->file_dev;
-       struct v4l2_subdev *sd = &file_dev->sd;
-       struct media_pad *pads = file_dev->pads;
-       struct media_entity *me = &sd->entity;
-
-       file_dev->isp = isp;
-       file_dev->work_queue = alloc_workqueue(isp->v4l2_dev.name, 0, 1);
-       if (!file_dev->work_queue) {
-               dev_err(isp->dev, "Failed to initialize file inject workq\n");
-               return -ENOMEM;
-       }
-
-       INIT_WORK(&file_dev->work, file_work);
-
-       v4l2_subdev_init(sd, &file_input_ops);
-       sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-       strscpy(sd->name, "file_input_subdev", sizeof(sd->name));
-       v4l2_set_subdevdata(sd, file_dev);
-
-       pads[0].flags = MEDIA_PAD_FL_SINK;
-       me->function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
-
-       return media_entity_pads_init(me, 1, pads);
-}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_file.h b/drivers/staging/media/atomisp/pci/atomisp_file.h
deleted file mode 100644 (file)
index f166a2a..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Support for Medifield PNW Camera Imaging ISP subsystem.
- *
- * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
- *
- * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- *
- */
-
-#ifndef __ATOMISP_FILE_H__
-#define __ATOMISP_FILE_H__
-
-#include <media/media-entity.h>
-#include <media/v4l2-subdev.h>
-
-struct atomisp_device;
-
-struct atomisp_file_device {
-       struct v4l2_subdev sd;
-       struct atomisp_device *isp;
-       struct media_pad pads[1];
-
-       struct workqueue_struct *work_queue;
-       struct work_struct work;
-};
-
-void atomisp_file_input_cleanup(struct atomisp_device *isp);
-int atomisp_file_input_init(struct atomisp_device *isp);
-void atomisp_file_input_unregister_entities(
-    struct atomisp_file_device *file_dev);
-int atomisp_file_input_register_entities(struct atomisp_file_device *file_dev,
-       struct v4l2_device *vdev);
-#endif /* __ATOMISP_FILE_H__ */
index 77150e4..84a84e0 100644 (file)
@@ -369,45 +369,6 @@ static int atomisp_get_css_buf_type(struct atomisp_sub_device *asd,
                return IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME;
 }
 
-static int atomisp_qbuffers_to_css_for_all_pipes(struct atomisp_sub_device *asd)
-{
-       enum ia_css_buffer_type buf_type;
-       enum ia_css_pipe_id css_capture_pipe_id = IA_CSS_PIPE_ID_COPY;
-       enum ia_css_pipe_id css_preview_pipe_id = IA_CSS_PIPE_ID_COPY;
-       enum ia_css_pipe_id css_video_pipe_id = IA_CSS_PIPE_ID_COPY;
-       enum atomisp_input_stream_id input_stream_id;
-       struct atomisp_video_pipe *capture_pipe;
-       struct atomisp_video_pipe *preview_pipe;
-       struct atomisp_video_pipe *video_pipe;
-
-       capture_pipe = &asd->video_out_capture;
-       preview_pipe = &asd->video_out_preview;
-       video_pipe = &asd->video_out_video_capture;
-
-       buf_type = atomisp_get_css_buf_type(
-                      asd, css_preview_pipe_id,
-                      atomisp_subdev_source_pad(&preview_pipe->vdev));
-       input_stream_id = ATOMISP_INPUT_STREAM_PREVIEW;
-       atomisp_q_video_buffers_to_css(asd, preview_pipe,
-                                      input_stream_id,
-                                      buf_type, css_preview_pipe_id);
-
-       buf_type = atomisp_get_css_buf_type(asd, css_capture_pipe_id,
-                                           atomisp_subdev_source_pad(&capture_pipe->vdev));
-       input_stream_id = ATOMISP_INPUT_STREAM_GENERAL;
-       atomisp_q_video_buffers_to_css(asd, capture_pipe,
-                                      input_stream_id,
-                                      buf_type, css_capture_pipe_id);
-
-       buf_type = atomisp_get_css_buf_type(asd, css_video_pipe_id,
-                                           atomisp_subdev_source_pad(&video_pipe->vdev));
-       input_stream_id = ATOMISP_INPUT_STREAM_VIDEO;
-       atomisp_q_video_buffers_to_css(asd, video_pipe,
-                                      input_stream_id,
-                                      buf_type, css_video_pipe_id);
-       return 0;
-}
-
 /* queue all available buffers to css */
 int atomisp_qbuffers_to_css(struct atomisp_sub_device *asd)
 {
@@ -423,11 +384,6 @@ int atomisp_qbuffers_to_css(struct atomisp_sub_device *asd)
        bool raw_mode = atomisp_is_mbuscode_raw(
                            asd->fmt[asd->capture_pad].fmt.code);
 
-       if (asd->isp->inputs[asd->input_curr].camera_caps->
-           sensor[asd->sensor_curr].stream_num == 2 &&
-           !asd->yuvpp_mode)
-               return atomisp_qbuffers_to_css_for_all_pipes(asd);
-
        if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
                video_pipe = &asd->video_out_video_capture;
                css_video_pipe_id = IA_CSS_PIPE_ID_VIDEO;
@@ -593,47 +549,6 @@ static void atomisp_buf_release(struct videobuf_queue *vq,
        atomisp_videobuf_free_buf(vb);
 }
 
-static int atomisp_buf_setup_output(struct videobuf_queue *vq,
-                                   unsigned int *count, unsigned int *size)
-{
-       struct atomisp_video_pipe *pipe = vq->priv_data;
-
-       *size = pipe->pix.sizeimage;
-
-       return 0;
-}
-
-static int atomisp_buf_prepare_output(struct videobuf_queue *vq,
-                                     struct videobuf_buffer *vb,
-                                     enum v4l2_field field)
-{
-       struct atomisp_video_pipe *pipe = vq->priv_data;
-
-       vb->size = pipe->pix.sizeimage;
-       vb->width = pipe->pix.width;
-       vb->height = pipe->pix.height;
-       vb->field = field;
-       vb->state = VIDEOBUF_PREPARED;
-
-       return 0;
-}
-
-static void atomisp_buf_queue_output(struct videobuf_queue *vq,
-                                    struct videobuf_buffer *vb)
-{
-       struct atomisp_video_pipe *pipe = vq->priv_data;
-
-       list_add_tail(&vb->queue, &pipe->activeq_out);
-       vb->state = VIDEOBUF_QUEUED;
-}
-
-static void atomisp_buf_release_output(struct videobuf_queue *vq,
-                                      struct videobuf_buffer *vb)
-{
-       videobuf_vmalloc_free(vb);
-       vb->state = VIDEOBUF_NEEDS_INIT;
-}
-
 static const struct videobuf_queue_ops videobuf_qops = {
        .buf_setup      = atomisp_buf_setup,
        .buf_prepare    = atomisp_buf_prepare,
@@ -641,13 +556,6 @@ static const struct videobuf_queue_ops videobuf_qops = {
        .buf_release    = atomisp_buf_release,
 };
 
-static const struct videobuf_queue_ops videobuf_qops_output = {
-       .buf_setup      = atomisp_buf_setup_output,
-       .buf_prepare    = atomisp_buf_prepare_output,
-       .buf_queue      = atomisp_buf_queue_output,
-       .buf_release    = atomisp_buf_release_output,
-};
-
 static int atomisp_init_pipe(struct atomisp_video_pipe *pipe)
 {
        /* init locks */
@@ -660,15 +568,7 @@ static int atomisp_init_pipe(struct atomisp_video_pipe *pipe)
                                    sizeof(struct atomisp_buffer), pipe,
                                    NULL);      /* ext_lock: NULL */
 
-       videobuf_queue_vmalloc_init(&pipe->outq, &videobuf_qops_output, NULL,
-                                   &pipe->irq_lock,
-                                   V4L2_BUF_TYPE_VIDEO_OUTPUT,
-                                   V4L2_FIELD_NONE,
-                                   sizeof(struct atomisp_buffer), pipe,
-                                   NULL);      /* ext_lock: NULL */
-
        INIT_LIST_HEAD(&pipe->activeq);
-       INIT_LIST_HEAD(&pipe->activeq_out);
        INIT_LIST_HEAD(&pipe->buffers_waiting_for_param);
        INIT_LIST_HEAD(&pipe->per_frame_params);
        memset(pipe->frame_request_config_id, 0,
@@ -684,7 +584,6 @@ static void atomisp_dev_init_struct(struct atomisp_device *isp)
 {
        unsigned int i;
 
-       isp->sw_contex.file_input = false;
        isp->need_gfx_throttle = true;
        isp->isp_fatal_error = false;
        isp->mipi_frame_size = 0;
@@ -741,9 +640,7 @@ static unsigned int atomisp_subdev_users(struct atomisp_sub_device *asd)
        return asd->video_out_preview.users +
               asd->video_out_vf.users +
               asd->video_out_capture.users +
-              asd->video_out_video_capture.users +
-              asd->video_acc.users +
-              asd->video_in.users;
+              asd->video_out_video_capture.users;
 }
 
 unsigned int atomisp_dev_users(struct atomisp_device *isp)
@@ -760,48 +657,18 @@ static int atomisp_open(struct file *file)
 {
        struct video_device *vdev = video_devdata(file);
        struct atomisp_device *isp = video_get_drvdata(vdev);
-       struct atomisp_video_pipe *pipe = NULL;
-       struct atomisp_acc_pipe *acc_pipe = NULL;
-       struct atomisp_sub_device *asd;
-       bool acc_node = false;
+       struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+       struct atomisp_sub_device *asd = pipe->asd;
        int ret;
 
        dev_dbg(isp->dev, "open device %s\n", vdev->name);
 
-       /*
-        * Ensure that if we are still loading we block. Once the loading
-        * is over we can proceed. We can't blindly hold the lock until
-        * that occurs as if the load fails we'll deadlock the unload
-        */
-       rt_mutex_lock(&isp->loading);
-       /*
-        * FIXME: revisit this with a better check once the code structure
-        * is cleaned up a bit more
-        */
        ret = v4l2_fh_open(file);
-       if (ret) {
-               dev_err(isp->dev,
-                       "%s: v4l2_fh_open() returned error %d\n",
-                      __func__, ret);
-               rt_mutex_unlock(&isp->loading);
+       if (ret)
                return ret;
-       }
-       if (!isp->ready) {
-               rt_mutex_unlock(&isp->loading);
-               return -ENXIO;
-       }
-       rt_mutex_unlock(&isp->loading);
 
-       rt_mutex_lock(&isp->mutex);
+       mutex_lock(&isp->mutex);
 
-       acc_node = !strcmp(vdev->name, "ATOMISP ISP ACC");
-       if (acc_node) {
-               acc_pipe = atomisp_to_acc_pipe(vdev);
-               asd = acc_pipe->asd;
-       } else {
-               pipe = atomisp_to_video_pipe(vdev);
-               asd = pipe->asd;
-       }
        asd->subdev.devnode = vdev;
        /* Deferred firmware loading case. */
        if (isp->css_env.isp_css_fw.bytes == 0) {
@@ -823,14 +690,6 @@ static int atomisp_open(struct file *file)
                isp->css_env.isp_css_fw.data = NULL;
        }
 
-       if (acc_node && acc_pipe->users) {
-               dev_dbg(isp->dev, "acc node already opened\n");
-               rt_mutex_unlock(&isp->mutex);
-               return -EBUSY;
-       } else if (acc_node) {
-               goto dev_init;
-       }
-
        if (!isp->input_cnt) {
                dev_err(isp->dev, "no camera attached\n");
                ret = -EINVAL;
@@ -842,7 +701,7 @@ static int atomisp_open(struct file *file)
         */
        if (pipe->users) {
                dev_dbg(isp->dev, "video node already opened\n");
-               rt_mutex_unlock(&isp->mutex);
+               mutex_unlock(&isp->mutex);
                return -EBUSY;
        }
 
@@ -850,7 +709,6 @@ static int atomisp_open(struct file *file)
        if (ret)
                goto error;
 
-dev_init:
        if (atomisp_dev_users(isp)) {
                dev_dbg(isp->dev, "skip init isp in open\n");
                goto init_subdev;
@@ -885,16 +743,11 @@ init_subdev:
        atomisp_subdev_init_struct(asd);
 
 done:
-
-       if (acc_node)
-               acc_pipe->users++;
-       else
-               pipe->users++;
-       rt_mutex_unlock(&isp->mutex);
+       pipe->users++;
+       mutex_unlock(&isp->mutex);
 
        /* Ensure that a mode is set */
-       if (!acc_node)
-               v4l2_ctrl_s_ctrl(asd->run_mode, pipe->default_run_mode);
+       v4l2_ctrl_s_ctrl(asd->run_mode, pipe->default_run_mode);
 
        return 0;
 
@@ -902,7 +755,8 @@ css_error:
        atomisp_css_uninit(isp);
        pm_runtime_put(vdev->v4l2_dev->dev);
 error:
-       rt_mutex_unlock(&isp->mutex);
+       mutex_unlock(&isp->mutex);
+       v4l2_fh_release(file);
        return ret;
 }
 
@@ -910,13 +764,12 @@ static int atomisp_release(struct file *file)
 {
        struct video_device *vdev = video_devdata(file);
        struct atomisp_device *isp = video_get_drvdata(vdev);
-       struct atomisp_video_pipe *pipe;
-       struct atomisp_acc_pipe *acc_pipe;
-       struct atomisp_sub_device *asd;
-       bool acc_node;
+       struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+       struct atomisp_sub_device *asd = pipe->asd;
        struct v4l2_requestbuffers req;
        struct v4l2_subdev_fh fh;
        struct v4l2_rect clear_compose = {0};
+       unsigned long flags;
        int ret = 0;
 
        v4l2_fh_init(&fh.vfh, vdev);
@@ -925,23 +778,12 @@ static int atomisp_release(struct file *file)
        if (!isp)
                return -EBADF;
 
-       mutex_lock(&isp->streamoff_mutex);
-       rt_mutex_lock(&isp->mutex);
+       mutex_lock(&isp->mutex);
 
        dev_dbg(isp->dev, "release device %s\n", vdev->name);
-       acc_node = !strcmp(vdev->name, "ATOMISP ISP ACC");
-       if (acc_node) {
-               acc_pipe = atomisp_to_acc_pipe(vdev);
-               asd = acc_pipe->asd;
-       } else {
-               pipe = atomisp_to_video_pipe(vdev);
-               asd = pipe->asd;
-       }
+
        asd->subdev.devnode = vdev;
-       if (acc_node) {
-               acc_pipe->users--;
-               goto subdev_uninit;
-       }
+
        pipe->users--;
 
        if (pipe->capq.streaming)
@@ -950,27 +792,19 @@ static int atomisp_release(struct file *file)
                         __func__);
 
        if (pipe->capq.streaming &&
-           __atomisp_streamoff(file, NULL, V4L2_BUF_TYPE_VIDEO_CAPTURE)) {
-               dev_err(isp->dev,
-                       "atomisp_streamoff failed on release, driver bug");
+           atomisp_streamoff(file, NULL, V4L2_BUF_TYPE_VIDEO_CAPTURE)) {
+               dev_err(isp->dev, "atomisp_streamoff failed on release, driver bug");
                goto done;
        }
 
        if (pipe->users)
                goto done;
 
-       if (__atomisp_reqbufs(file, NULL, &req)) {
-               dev_err(isp->dev,
-                       "atomisp_reqbufs failed on release, driver bug");
+       if (atomisp_reqbufs(file, NULL, &req)) {
+               dev_err(isp->dev, "atomisp_reqbufs failed on release, driver bug");
                goto done;
        }
 
-       if (pipe->outq.bufs[0]) {
-               mutex_lock(&pipe->outq.vb_lock);
-               videobuf_queue_cancel(&pipe->outq);
-               mutex_unlock(&pipe->outq.vb_lock);
-       }
-
        /*
         * A little trick here:
         * file injection input resolution is recorded in the sink pad,
@@ -978,26 +812,17 @@ static int atomisp_release(struct file *file)
         * The sink pad setting can only be cleared when all device nodes
         * get released.
         */
-       if (!isp->sw_contex.file_input && asd->fmt_auto->val) {
+       if (asd->fmt_auto->val) {
                struct v4l2_mbus_framefmt isp_sink_fmt = { 0 };
 
                atomisp_subdev_set_ffmt(&asd->subdev, fh.state,
                                        V4L2_SUBDEV_FORMAT_ACTIVE,
                                        ATOMISP_SUBDEV_PAD_SINK, &isp_sink_fmt);
        }
-subdev_uninit:
+
        if (atomisp_subdev_users(asd))
                goto done;
 
-       /* clear the sink pad for file input */
-       if (isp->sw_contex.file_input && asd->fmt_auto->val) {
-               struct v4l2_mbus_framefmt isp_sink_fmt = { 0 };
-
-               atomisp_subdev_set_ffmt(&asd->subdev, fh.state,
-                                       V4L2_SUBDEV_FORMAT_ACTIVE,
-                                       ATOMISP_SUBDEV_PAD_SINK, &isp_sink_fmt);
-       }
-
        atomisp_css_free_stat_buffers(asd);
        atomisp_free_internal_buffers(asd);
        ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
@@ -1007,7 +832,9 @@ subdev_uninit:
 
        /* clear the asd field to show this camera is not used */
        isp->inputs[asd->input_curr].asd = NULL;
+       spin_lock_irqsave(&isp->lock, flags);
        asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED;
+       spin_unlock_irqrestore(&isp->lock, flags);
 
        if (atomisp_dev_users(isp))
                goto done;
@@ -1029,15 +856,12 @@ subdev_uninit:
                dev_err(isp->dev, "Failed to power off device\n");
 
 done:
-       if (!acc_node) {
-               atomisp_subdev_set_selection(&asd->subdev, fh.state,
-                                            V4L2_SUBDEV_FORMAT_ACTIVE,
-                                            atomisp_subdev_source_pad(vdev),
-                                            V4L2_SEL_TGT_COMPOSE, 0,
-                                            &clear_compose);
-       }
-       rt_mutex_unlock(&isp->mutex);
-       mutex_unlock(&isp->streamoff_mutex);
+       atomisp_subdev_set_selection(&asd->subdev, fh.state,
+                                    V4L2_SUBDEV_FORMAT_ACTIVE,
+                                    atomisp_subdev_source_pad(vdev),
+                                    V4L2_SEL_TGT_COMPOSE, 0,
+                                    &clear_compose);
+       mutex_unlock(&isp->mutex);
 
        return v4l2_fh_release(file);
 }
@@ -1194,7 +1018,7 @@ static int atomisp_mmap(struct file *file, struct vm_area_struct *vma)
        if (!(vma->vm_flags & (VM_WRITE | VM_READ)))
                return -EACCES;
 
-       rt_mutex_lock(&isp->mutex);
+       mutex_lock(&isp->mutex);
 
        if (!(vma->vm_flags & VM_SHARED)) {
                /* Map private buffer.
@@ -1205,7 +1029,7 @@ static int atomisp_mmap(struct file *file, struct vm_area_struct *vma)
                 */
                vma->vm_flags |= VM_SHARED;
                ret = hmm_mmap(vma, vma->vm_pgoff << PAGE_SHIFT);
-               rt_mutex_unlock(&isp->mutex);
+               mutex_unlock(&isp->mutex);
                return ret;
        }
 
@@ -1248,7 +1072,7 @@ static int atomisp_mmap(struct file *file, struct vm_area_struct *vma)
                }
                raw_virt_addr->data_bytes = origin_size;
                vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
-               rt_mutex_unlock(&isp->mutex);
+               mutex_unlock(&isp->mutex);
                return 0;
        }
 
@@ -1260,24 +1084,16 @@ static int atomisp_mmap(struct file *file, struct vm_area_struct *vma)
                ret = -EINVAL;
                goto error;
        }
-       rt_mutex_unlock(&isp->mutex);
+       mutex_unlock(&isp->mutex);
 
        return atomisp_videobuf_mmap_mapper(&pipe->capq, vma);
 
 error:
-       rt_mutex_unlock(&isp->mutex);
+       mutex_unlock(&isp->mutex);
 
        return ret;
 }
 
-static int atomisp_file_mmap(struct file *file, struct vm_area_struct *vma)
-{
-       struct video_device *vdev = video_devdata(file);
-       struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
-
-       return videobuf_mmap_mapper(&pipe->outq, vma);
-}
-
 static __poll_t atomisp_poll(struct file *file,
                             struct poll_table_struct *pt)
 {
@@ -1285,12 +1101,12 @@ static __poll_t atomisp_poll(struct file *file,
        struct atomisp_device *isp = video_get_drvdata(vdev);
        struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
 
-       rt_mutex_lock(&isp->mutex);
+       mutex_lock(&isp->mutex);
        if (pipe->capq.streaming != 1) {
-               rt_mutex_unlock(&isp->mutex);
+               mutex_unlock(&isp->mutex);
                return EPOLLERR;
        }
-       rt_mutex_unlock(&isp->mutex);
+       mutex_unlock(&isp->mutex);
 
        return videobuf_poll_stream(file, &pipe->capq, pt);
 }
@@ -1310,15 +1126,3 @@ const struct v4l2_file_operations atomisp_fops = {
 #endif
        .poll = atomisp_poll,
 };
-
-const struct v4l2_file_operations atomisp_file_fops = {
-       .owner = THIS_MODULE,
-       .open = atomisp_open,
-       .release = atomisp_release,
-       .mmap = atomisp_file_mmap,
-       .unlocked_ioctl = video_ioctl2,
-#ifdef CONFIG_COMPAT
-       /* .compat_ioctl32 = atomisp_compat_ioctl32, */
-#endif
-       .poll = atomisp_poll,
-};
index bf527b3..3d41fab 100644 (file)
@@ -134,24 +134,6 @@ static DEFINE_MUTEX(vcm_lock);
 
 static struct gmin_subdev *find_gmin_subdev(struct v4l2_subdev *subdev);
 
-/*
- * Legacy/stub behavior copied from upstream platform_camera.c.  The
- * atomisp driver relies on these values being non-NULL in a few
- * places, even though they are hard-coded in all current
- * implementations.
- */
-const struct atomisp_camera_caps *atomisp_get_default_camera_caps(void)
-{
-       static const struct atomisp_camera_caps caps = {
-               .sensor_num = 1,
-               .sensor = {
-                       { .stream_num = 1, },
-               },
-       };
-       return &caps;
-}
-EXPORT_SYMBOL_GPL(atomisp_get_default_camera_caps);
-
 const struct atomisp_platform_data *atomisp_get_platform_data(void)
 {
        return &pdata;
@@ -1066,6 +1048,38 @@ static int gmin_flisclk_ctrl(struct v4l2_subdev *subdev, int on)
        return ret;
 }
 
+static int camera_sensor_csi_alloc(struct v4l2_subdev *sd, u32 port, u32 lanes,
+                                  u32 format, u32 bayer_order)
+{
+       struct i2c_client *client = v4l2_get_subdevdata(sd);
+       struct camera_mipi_info *csi;
+
+       csi = kzalloc(sizeof(*csi), GFP_KERNEL);
+       if (!csi)
+               return -ENOMEM;
+
+       csi->port = port;
+       csi->num_lanes = lanes;
+       csi->input_format = format;
+       csi->raw_bayer_order = bayer_order;
+       v4l2_set_subdev_hostdata(sd, csi);
+       csi->metadata_format = ATOMISP_INPUT_FORMAT_EMBEDDED;
+       csi->metadata_effective_width = NULL;
+       dev_info(&client->dev,
+                "camera pdata: port: %d lanes: %d order: %8.8x\n",
+                port, lanes, bayer_order);
+
+       return 0;
+}
+
+static void camera_sensor_csi_free(struct v4l2_subdev *sd)
+{
+       struct camera_mipi_info *csi;
+
+       csi = v4l2_get_subdev_hostdata(sd);
+       kfree(csi);
+}
+
 static int gmin_csi_cfg(struct v4l2_subdev *sd, int flag)
 {
        struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -1074,8 +1088,11 @@ static int gmin_csi_cfg(struct v4l2_subdev *sd, int flag)
        if (!client || !gs)
                return -ENODEV;
 
-       return camera_sensor_csi(sd, gs->csi_port, gs->csi_lanes,
-                                gs->csi_fmt, gs->csi_bayer, flag);
+       if (flag)
+               return camera_sensor_csi_alloc(sd, gs->csi_port, gs->csi_lanes,
+                                              gs->csi_fmt, gs->csi_bayer);
+       camera_sensor_csi_free(sd);
+       return 0;
 }
 
 static struct camera_vcm_control *gmin_get_vcm_ctrl(struct v4l2_subdev *subdev,
@@ -1207,16 +1224,14 @@ static int gmin_get_config_dsm_var(struct device *dev,
        if (!strcmp(var, "CamClk"))
                return -EINVAL;
 
-       obj = acpi_evaluate_dsm(handle, &atomisp_dsm_guid, 0, 0, NULL);
+       /* Return on unexpected object type */
+       obj = acpi_evaluate_dsm_typed(handle, &atomisp_dsm_guid, 0, 0, NULL,
+                                     ACPI_TYPE_PACKAGE);
        if (!obj) {
                dev_info_once(dev, "Didn't find ACPI _DSM table.\n");
                return -EINVAL;
        }
 
-       /* Return on unexpected object type */
-       if (obj->type != ACPI_TYPE_PACKAGE)
-               return -EINVAL;
-
 #if 0 /* Just for debugging purposes */
        for (i = 0; i < obj->package.count; i++) {
                union acpi_object *cur = &obj->package.elements[i];
@@ -1360,35 +1375,6 @@ int gmin_get_var_int(struct device *dev, bool is_gmin, const char *var, int def)
 }
 EXPORT_SYMBOL_GPL(gmin_get_var_int);
 
-int camera_sensor_csi(struct v4l2_subdev *sd, u32 port,
-                     u32 lanes, u32 format, u32 bayer_order, int flag)
-{
-       struct i2c_client *client = v4l2_get_subdevdata(sd);
-       struct camera_mipi_info *csi = NULL;
-
-       if (flag) {
-               csi = kzalloc(sizeof(*csi), GFP_KERNEL);
-               if (!csi)
-                       return -ENOMEM;
-               csi->port = port;
-               csi->num_lanes = lanes;
-               csi->input_format = format;
-               csi->raw_bayer_order = bayer_order;
-               v4l2_set_subdev_hostdata(sd, (void *)csi);
-               csi->metadata_format = ATOMISP_INPUT_FORMAT_EMBEDDED;
-               csi->metadata_effective_width = NULL;
-               dev_info(&client->dev,
-                        "camera pdata: port: %d lanes: %d order: %8.8x\n",
-                        port, lanes, bayer_order);
-       } else {
-               csi = v4l2_get_subdev_hostdata(sd);
-               kfree(csi);
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(camera_sensor_csi);
-
 /* PCI quirk: The BYT ISP advertises PCI runtime PM but it doesn't
  * work.  Disable so the kernel framework doesn't hang the device
  * trying.  The driver itself does direct calls to the PUNIT to manage
index f71ab1e..d9d158c 100644 (file)
@@ -34,7 +34,6 @@
 #include "sh_css_legacy.h"
 
 #include "atomisp_csi2.h"
-#include "atomisp_file.h"
 #include "atomisp_subdev.h"
 #include "atomisp_tpg.h"
 #include "atomisp_compat.h"
 #define ATOM_ISP_POWER_DOWN    0
 #define ATOM_ISP_POWER_UP      1
 
-#define ATOM_ISP_MAX_INPUTS    4
+#define ATOM_ISP_MAX_INPUTS    3
 
 #define ATOMISP_SC_TYPE_SIZE   2
 
 #define ATOMISP_ISP_TIMEOUT_DURATION           (2 * HZ)
 #define ATOMISP_EXT_ISP_TIMEOUT_DURATION        (6 * HZ)
-#define ATOMISP_ISP_FILE_TIMEOUT_DURATION      (60 * HZ)
 #define ATOMISP_WDT_KEEP_CURRENT_DELAY          0
 #define ATOMISP_ISP_MAX_TIMEOUT_COUNT  2
 #define ATOMISP_CSS_STOP_TIMEOUT_US    200000
 #define ATOMISP_DELAYED_INIT_QUEUED    1
 #define ATOMISP_DELAYED_INIT_DONE      2
 
-#define ATOMISP_CALC_CSS_PREV_OVERLAP(lines) \
-       ((lines) * 38 / 100 & 0xfffffe)
-
 /*
  * Define how fast CPU should be able to serve ISP interrupts.
  * The bigger the value, the higher risk that the ISP is not
  * Moorefield/Baytrail platform.
  */
 #define ATOMISP_SOC_CAMERA(asd)  \
-       (asd->isp->inputs[asd->input_curr].type == SOC_CAMERA \
-       && asd->isp->inputs[asd->input_curr].camera_caps-> \
-          sensor[asd->sensor_curr].stream_num == 1)
+       (asd->isp->inputs[asd->input_curr].type == SOC_CAMERA)
 
 #define ATOMISP_USE_YUVPP(asd)  \
        (ATOMISP_SOC_CAMERA(asd) && ATOMISP_CSS_SUPPORT_YUVPP && \
@@ -167,7 +160,6 @@ struct atomisp_input_subdev {
         */
        struct atomisp_sub_device *asd;
 
-       const struct atomisp_camera_caps *camera_caps;
        int sensor_index;
 };
 
@@ -203,7 +195,6 @@ struct atomisp_regs {
 };
 
 struct atomisp_sw_contex {
-       bool file_input;
        int power_state;
        int running_freq;
 };
@@ -241,24 +232,10 @@ struct atomisp_device {
 
        struct atomisp_mipi_csi2_device csi2_port[ATOMISP_CAMERA_NR_PORTS];
        struct atomisp_tpg_device tpg;
-       struct atomisp_file_device file_dev;
 
        /* Purpose of mutex is to protect and serialize use of isp data
         * structures and css API calls. */
-       struct rt_mutex mutex;
-       /*
-        * This mutex ensures that we don't allow an open to succeed while
-        * the initialization process is incomplete
-        */
-       struct rt_mutex loading;
-       /* Set once the ISP is ready to allow opens */
-       bool ready;
-       /*
-        * Serialise streamoff: mutex is dropped during streamoff to
-        * cancel the watchdog queue. MUST be acquired BEFORE
-        * "mutex".
-        */
-       struct mutex streamoff_mutex;
+       struct mutex mutex;
 
        unsigned int input_cnt;
        struct atomisp_input_subdev inputs[ATOM_ISP_MAX_INPUTS];
@@ -272,15 +249,9 @@ struct atomisp_device {
        /* isp timeout status flag */
        bool isp_timeout;
        bool isp_fatal_error;
-       struct workqueue_struct *wdt_work_queue;
-       struct work_struct wdt_work;
-
-       /* ISP2400 */
-       atomic_t wdt_count;
-
-       atomic_t wdt_work_queued;
+       struct work_struct assert_recovery_work;
 
-       spinlock_t lock; /* Just for streaming below */
+       spinlock_t lock; /* Protects asd[i].streaming */
 
        bool need_gfx_throttle;
 
@@ -296,20 +267,4 @@ struct atomisp_device {
 
 extern struct device *atomisp_dev;
 
-#define atomisp_is_wdt_running(a) timer_pending(&(a)->wdt)
-
-/* ISP2401 */
-void atomisp_wdt_refresh_pipe(struct atomisp_video_pipe *pipe,
-                             unsigned int delay);
-void atomisp_wdt_refresh(struct atomisp_sub_device *asd, unsigned int delay);
-
-/* ISP2400 */
-void atomisp_wdt_start(struct atomisp_sub_device *asd);
-
-/* ISP2401 */
-void atomisp_wdt_start_pipe(struct atomisp_video_pipe *pipe);
-void atomisp_wdt_stop_pipe(struct atomisp_video_pipe *pipe, bool sync);
-
-void atomisp_wdt_stop(struct atomisp_sub_device *asd, bool sync);
-
 #endif /* __ATOMISP_INTERNAL_H__ */
index 459645c..0ddb0ed 100644 (file)
@@ -535,6 +535,32 @@ atomisp_get_format_bridge_from_mbus(u32 mbus_code)
        return NULL;
 }
 
+int atomisp_pipe_check(struct atomisp_video_pipe *pipe, bool settings_change)
+{
+       lockdep_assert_held(&pipe->isp->mutex);
+
+       if (pipe->isp->isp_fatal_error)
+               return -EIO;
+
+       switch (pipe->asd->streaming) {
+       case ATOMISP_DEVICE_STREAMING_DISABLED:
+               break;
+       case ATOMISP_DEVICE_STREAMING_ENABLED:
+               if (settings_change) {
+                       dev_err(pipe->isp->dev, "Set fmt/input IOCTL while streaming\n");
+                       return -EBUSY;
+               }
+               break;
+       case ATOMISP_DEVICE_STREAMING_STOPPING:
+               dev_err(pipe->isp->dev, "IOCTL issued while stopping\n");
+               return -EBUSY;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 /*
  * v4l2 ioctls
  * return ISP capabilities
@@ -609,8 +635,7 @@ atomisp_subdev_streaming_count(struct atomisp_sub_device *asd)
        return asd->video_out_preview.capq.streaming
               + asd->video_out_capture.capq.streaming
               + asd->video_out_video_capture.capq.streaming
-              + asd->video_out_vf.capq.streaming
-              + asd->video_in.capq.streaming;
+              + asd->video_out_vf.capq.streaming;
 }
 
 unsigned int atomisp_streaming_count(struct atomisp_device *isp)
@@ -630,19 +655,9 @@ unsigned int atomisp_streaming_count(struct atomisp_device *isp)
 static int atomisp_g_input(struct file *file, void *fh, unsigned int *input)
 {
        struct video_device *vdev = video_devdata(file);
-       struct atomisp_device *isp = video_get_drvdata(vdev);
        struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
 
-       if (!asd) {
-               dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
-                       __func__, vdev->name);
-               return -EINVAL;
-       }
-
-       rt_mutex_lock(&isp->mutex);
        *input = asd->input_curr;
-       rt_mutex_unlock(&isp->mutex);
-
        return 0;
 }
 
@@ -653,22 +668,19 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input)
 {
        struct video_device *vdev = video_devdata(file);
        struct atomisp_device *isp = video_get_drvdata(vdev);
-       struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+       struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+       struct atomisp_sub_device *asd = pipe->asd;
        struct v4l2_subdev *camera = NULL;
        struct v4l2_subdev *motor;
        int ret;
 
-       if (!asd) {
-               dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
-                       __func__, vdev->name);
-               return -EINVAL;
-       }
+       ret = atomisp_pipe_check(pipe, true);
+       if (ret)
+               return ret;
 
-       rt_mutex_lock(&isp->mutex);
        if (input >= ATOM_ISP_MAX_INPUTS || input >= isp->input_cnt) {
                dev_dbg(isp->dev, "input_cnt: %d\n", isp->input_cnt);
-               ret = -EINVAL;
-               goto error;
+               return -EINVAL;
        }
 
        /*
@@ -680,22 +692,13 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input)
                dev_err(isp->dev,
                        "%s, camera is already used by stream: %d\n", __func__,
                        isp->inputs[input].asd->index);
-               ret = -EBUSY;
-               goto error;
+               return -EBUSY;
        }
 
        camera = isp->inputs[input].camera;
        if (!camera) {
                dev_err(isp->dev, "%s, no camera\n", __func__);
-               ret = -EINVAL;
-               goto error;
-       }
-
-       if (atomisp_subdev_streaming_count(asd)) {
-               dev_err(isp->dev,
-                       "ISP is still streaming, stop first\n");
-               ret = -EINVAL;
-               goto error;
+               return -EINVAL;
        }
 
        /* power off the current owned sensor, as it is not used this time */
@@ -714,7 +717,7 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input)
        ret = v4l2_subdev_call(isp->inputs[input].camera, core, s_power, 1);
        if (ret) {
                dev_err(isp->dev, "Failed to power-on sensor\n");
-               goto error;
+               return ret;
        }
        /*
         * Some sensor driver resets the run mode during power-on, thus force
@@ -727,7 +730,7 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input)
                               0, isp->inputs[input].sensor_index, 0);
        if (ret && (ret != -ENOIOCTLCMD)) {
                dev_err(isp->dev, "Failed to select sensor\n");
-               goto error;
+               return ret;
        }
 
        if (!IS_ISP2401) {
@@ -738,20 +741,14 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input)
                        ret = v4l2_subdev_call(motor, core, s_power, 1);
        }
 
-       if (!isp->sw_contex.file_input && motor)
+       if (motor)
                ret = v4l2_subdev_call(motor, core, init, 1);
 
        asd->input_curr = input;
        /* mark this camera is used by the current stream */
        isp->inputs[input].asd = asd;
-       rt_mutex_unlock(&isp->mutex);
 
        return 0;
-
-error:
-       rt_mutex_unlock(&isp->mutex);
-
-       return ret;
 }
 
 static int atomisp_enum_framesizes(struct file *file, void *priv,
@@ -819,12 +816,6 @@ static int atomisp_enum_fmt_cap(struct file *file, void *fh,
        unsigned int i, fi = 0;
        int rval;
 
-       if (!asd) {
-               dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
-                       __func__, vdev->name);
-               return -EINVAL;
-       }
-
        camera = isp->inputs[asd->input_curr].camera;
        if(!camera) {
                dev_err(isp->dev, "%s(): camera is NULL, device is %s\n",
@@ -832,15 +823,12 @@ static int atomisp_enum_fmt_cap(struct file *file, void *fh,
                return -EINVAL;
        }
 
-       rt_mutex_lock(&isp->mutex);
-
        rval = v4l2_subdev_call(camera, pad, enum_mbus_code, NULL, &code);
        if (rval == -ENOIOCTLCMD) {
                dev_warn(isp->dev,
                         "enum_mbus_code pad op not supported by %s. Please fix your sensor driver!\n",
                         camera->name);
        }
-       rt_mutex_unlock(&isp->mutex);
 
        if (rval)
                return rval;
@@ -872,20 +860,6 @@ static int atomisp_enum_fmt_cap(struct file *file, void *fh,
        return -EINVAL;
 }
 
-static int atomisp_g_fmt_file(struct file *file, void *fh,
-                             struct v4l2_format *f)
-{
-       struct video_device *vdev = video_devdata(file);
-       struct atomisp_device *isp = video_get_drvdata(vdev);
-       struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
-
-       rt_mutex_lock(&isp->mutex);
-       f->fmt.pix = pipe->pix;
-       rt_mutex_unlock(&isp->mutex);
-
-       return 0;
-}
-
 static int atomisp_adjust_fmt(struct v4l2_format *f)
 {
        const struct atomisp_format_bridge *format_bridge;
@@ -957,13 +931,16 @@ static int atomisp_try_fmt_cap(struct file *file, void *fh,
                               struct v4l2_format *f)
 {
        struct video_device *vdev = video_devdata(file);
-       struct atomisp_device *isp = video_get_drvdata(vdev);
        int ret;
 
-       rt_mutex_lock(&isp->mutex);
-       ret = atomisp_try_fmt(vdev, &f->fmt.pix, NULL);
-       rt_mutex_unlock(&isp->mutex);
+       /*
+        * atomisp_try_fmt() gived results with padding included, note
+        * (this gets removed again by the atomisp_adjust_fmt() call below.
+        */
+       f->fmt.pix.width += pad_w;
+       f->fmt.pix.height += pad_h;
 
+       ret = atomisp_try_fmt(vdev, &f->fmt.pix, NULL);
        if (ret)
                return ret;
 
@@ -974,12 +951,9 @@ static int atomisp_g_fmt_cap(struct file *file, void *fh,
                             struct v4l2_format *f)
 {
        struct video_device *vdev = video_devdata(file);
-       struct atomisp_device *isp = video_get_drvdata(vdev);
        struct atomisp_video_pipe *pipe;
 
-       rt_mutex_lock(&isp->mutex);
        pipe = atomisp_to_video_pipe(vdev);
-       rt_mutex_unlock(&isp->mutex);
 
        f->fmt.pix = pipe->pix;
 
@@ -994,37 +968,6 @@ static int atomisp_g_fmt_cap(struct file *file, void *fh,
        return atomisp_try_fmt_cap(file, fh, f);
 }
 
-static int atomisp_s_fmt_cap(struct file *file, void *fh,
-                            struct v4l2_format *f)
-{
-       struct video_device *vdev = video_devdata(file);
-       struct atomisp_device *isp = video_get_drvdata(vdev);
-       int ret;
-
-       rt_mutex_lock(&isp->mutex);
-       if (isp->isp_fatal_error) {
-               ret = -EIO;
-               rt_mutex_unlock(&isp->mutex);
-               return ret;
-       }
-       ret = atomisp_set_fmt(vdev, f);
-       rt_mutex_unlock(&isp->mutex);
-       return ret;
-}
-
-static int atomisp_s_fmt_file(struct file *file, void *fh,
-                             struct v4l2_format *f)
-{
-       struct video_device *vdev = video_devdata(file);
-       struct atomisp_device *isp = video_get_drvdata(vdev);
-       int ret;
-
-       rt_mutex_lock(&isp->mutex);
-       ret = atomisp_set_fmt_file(vdev, f);
-       rt_mutex_unlock(&isp->mutex);
-       return ret;
-}
-
 /*
  * Free videobuffer buffer priv data
  */
@@ -1160,8 +1103,7 @@ error:
 /*
  * Initiate Memory Mapping or User Pointer I/O
  */
-int __atomisp_reqbufs(struct file *file, void *fh,
-                     struct v4l2_requestbuffers *req)
+int atomisp_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *req)
 {
        struct video_device *vdev = video_devdata(file);
        struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
@@ -1170,16 +1112,8 @@ int __atomisp_reqbufs(struct file *file, void *fh,
        struct ia_css_frame *frame;
        struct videobuf_vmalloc_memory *vm_mem;
        u16 source_pad = atomisp_subdev_source_pad(vdev);
-       u16 stream_id;
        int ret = 0, i = 0;
 
-       if (!asd) {
-               dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n",
-                       __func__, vdev->name);
-               return -EINVAL;
-       }
-       stream_id = atomisp_source_pad_to_stream_id(asd, source_pad);
-
        if (req->count == 0) {
                mutex_lock(&pipe->capq.vb_lock);
                if (!list_empty(&pipe->capq.stream))
@@ -1200,7 +1134,7 @@ int __atomisp_reqbufs(struct file *file, void *fh,
        if (ret)
                return ret;
 
-       atomisp_alloc_css_stat_bufs(asd, stream_id);
+       atomisp_alloc_css_stat_bufs(asd, ATOMISP_INPUT_STREAM_GENERAL);
 
        /*
         * for user pointer type, buffers are not really allocated here,
@@ -1238,36 +1172,6 @@ error:
        return -ENOMEM;
 }
 
-int atomisp_reqbufs(struct file *file, void *fh,
-                   struct v4l2_requestbuffers *req)
-{
-       struct video_device *vdev = video_devdata(file);
-       struct atomisp_device *isp = video_get_drvdata(vdev);
-       int ret;
-
-       rt_mutex_lock(&isp->mutex);
-       ret = __atomisp_reqbufs(file, fh, req);
-       rt_mutex_unlock(&isp->mutex);
-
-       return ret;
-}
-
-static int atomisp_reqbufs_file(struct file *file, void *fh,
-                               struct v4l2_requestbuffers *req)
-{
-       struct video_device *vdev = video_devdata(file);
-       struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
-
-       if (req->count == 0) {
-               mutex_lock(&pipe->outq.vb_lock);
-               atomisp_videobuf_free_queue(&pipe->outq);
-               mutex_unlock(&pipe->outq.vb_lock);
-               return 0;
-       }
-
-       return videobuf_reqbufs(&pipe->outq, req);
-}
-
 /* application query the status of a buffer */
 static int atomisp_querybuf(struct file *file, void *fh,
                            struct v4l2_buffer *buf)
@@ -1278,15 +1182,6 @@ static int atomisp_querybuf(struct file *file, void *fh,
        return videobuf_querybuf(&pipe->capq, buf);
 }
 
-static int atomisp_querybuf_file(struct file *file, void *fh,
-                                struct v4l2_buffer *buf)
-{
-       struct video_device *vdev = video_devdata(file);
-       struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
-
-       return videobuf_querybuf(&pipe->outq, buf);
-}
-
 /*
  * Applications call the VIDIOC_QBUF ioctl to enqueue an empty (capturing) or
  * filled (output) buffer in the drivers incoming queue.
@@ -1305,32 +1200,16 @@ static int atomisp_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
        struct ia_css_frame *handle = NULL;
        u32 length;
        u32 pgnr;
-       int ret = 0;
-
-       if (!asd) {
-               dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
-                       __func__, vdev->name);
-               return -EINVAL;
-       }
-
-       rt_mutex_lock(&isp->mutex);
-       if (isp->isp_fatal_error) {
-               ret = -EIO;
-               goto error;
-       }
+       int ret;
 
-       if (asd->streaming == ATOMISP_DEVICE_STREAMING_STOPPING) {
-               dev_err(isp->dev, "%s: reject, as ISP at stopping.\n",
-                       __func__);
-               ret = -EIO;
-               goto error;
-       }
+       ret = atomisp_pipe_check(pipe, false);
+       if (ret)
+               return ret;
 
        if (!buf || buf->index >= VIDEO_MAX_FRAME ||
            !pipe->capq.bufs[buf->index]) {
                dev_err(isp->dev, "Invalid index for qbuf.\n");
-               ret = -EINVAL;
-               goto error;
+               return -EINVAL;
        }
 
        /*
@@ -1338,12 +1217,15 @@ static int atomisp_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
         * address and reprograme out page table properly
         */
        if (buf->memory == V4L2_MEMORY_USERPTR) {
+               if (offset_in_page(buf->m.userptr)) {
+                       dev_err(isp->dev, "Error userptr is not page aligned.\n");
+                       return -EINVAL;
+               }
+
                vb = pipe->capq.bufs[buf->index];
                vm_mem = vb->priv;
-               if (!vm_mem) {
-                       ret = -EINVAL;
-                       goto error;
-               }
+               if (!vm_mem)
+                       return -EINVAL;
 
                length = vb->bsize;
                pgnr = (length + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
@@ -1352,17 +1234,15 @@ static int atomisp_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
                        goto done;
 
                if (atomisp_get_css_frame_info(asd,
-                                              atomisp_subdev_source_pad(vdev), &frame_info)) {
-                       ret = -EIO;
-                       goto error;
-               }
+                                              atomisp_subdev_source_pad(vdev), &frame_info))
+                       return -EIO;
 
                ret = ia_css_frame_map(&handle, &frame_info,
                                            (void __user *)buf->m.userptr,
                                            pgnr);
                if (ret) {
                        dev_err(isp->dev, "Failed to map user buffer\n");
-                       goto error;
+                       return ret;
                }
 
                if (vm_mem->vaddr) {
@@ -1406,12 +1286,11 @@ done:
 
        pipe->frame_params[buf->index] = NULL;
 
-       rt_mutex_unlock(&isp->mutex);
-
+       mutex_unlock(&isp->mutex);
        ret = videobuf_qbuf(&pipe->capq, buf);
-       rt_mutex_lock(&isp->mutex);
+       mutex_lock(&isp->mutex);
        if (ret)
-               goto error;
+               return ret;
 
        /* TODO: do this better, not best way to queue to css */
        if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) {
@@ -1419,15 +1298,6 @@ done:
                        atomisp_handle_parameter_and_buffer(pipe);
                } else {
                        atomisp_qbuffers_to_css(asd);
-
-                       if (!IS_ISP2401) {
-                               if (!atomisp_is_wdt_running(asd) && atomisp_buffers_queued(asd))
-                                       atomisp_wdt_start(asd);
-                       } else {
-                               if (!atomisp_is_wdt_running(pipe) &&
-                                   atomisp_buffers_queued_pipe(pipe))
-                                       atomisp_wdt_start_pipe(pipe);
-                       }
                }
        }
 
@@ -1449,58 +1319,11 @@ done:
                        asd->pending_capture_request++;
                        dev_dbg(isp->dev, "Add one pending capture request.\n");
        }
-       rt_mutex_unlock(&isp->mutex);
 
        dev_dbg(isp->dev, "qbuf buffer %d (%s) for asd%d\n", buf->index,
                vdev->name, asd->index);
 
-       return ret;
-
-error:
-       rt_mutex_unlock(&isp->mutex);
-       return ret;
-}
-
-static int atomisp_qbuf_file(struct file *file, void *fh,
-                            struct v4l2_buffer *buf)
-{
-       struct video_device *vdev = video_devdata(file);
-       struct atomisp_device *isp = video_get_drvdata(vdev);
-       struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
-       int ret;
-
-       rt_mutex_lock(&isp->mutex);
-       if (isp->isp_fatal_error) {
-               ret = -EIO;
-               goto error;
-       }
-
-       if (!buf || buf->index >= VIDEO_MAX_FRAME ||
-           !pipe->outq.bufs[buf->index]) {
-               dev_err(isp->dev, "Invalid index for qbuf.\n");
-               ret = -EINVAL;
-               goto error;
-       }
-
-       if (buf->memory != V4L2_MEMORY_MMAP) {
-               dev_err(isp->dev, "Unsupported memory method\n");
-               ret = -EINVAL;
-               goto error;
-       }
-
-       if (buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
-               dev_err(isp->dev, "Unsupported buffer type\n");
-               ret = -EINVAL;
-               goto error;
-       }
-       rt_mutex_unlock(&isp->mutex);
-
-       return videobuf_qbuf(&pipe->outq, buf);
-
-error:
-       rt_mutex_unlock(&isp->mutex);
-
-       return ret;
+       return 0;
 }
 
 static int __get_frame_exp_id(struct atomisp_video_pipe *pipe,
@@ -1529,37 +1352,21 @@ static int atomisp_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
        struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
        struct atomisp_sub_device *asd = pipe->asd;
        struct atomisp_device *isp = video_get_drvdata(vdev);
-       int ret = 0;
-
-       if (!asd) {
-               dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
-                       __func__, vdev->name);
-               return -EINVAL;
-       }
-
-       rt_mutex_lock(&isp->mutex);
-
-       if (isp->isp_fatal_error) {
-               rt_mutex_unlock(&isp->mutex);
-               return -EIO;
-       }
-
-       if (asd->streaming == ATOMISP_DEVICE_STREAMING_STOPPING) {
-               rt_mutex_unlock(&isp->mutex);
-               dev_err(isp->dev, "%s: reject, as ISP at stopping.\n",
-                       __func__);
-               return -EIO;
-       }
+       int ret;
 
-       rt_mutex_unlock(&isp->mutex);
+       ret = atomisp_pipe_check(pipe, false);
+       if (ret)
+               return ret;
 
+       mutex_unlock(&isp->mutex);
        ret = videobuf_dqbuf(&pipe->capq, buf, file->f_flags & O_NONBLOCK);
+       mutex_lock(&isp->mutex);
        if (ret) {
                if (ret != -EAGAIN)
                        dev_dbg(isp->dev, "<%s: %d\n", __func__, ret);
                return ret;
        }
-       rt_mutex_lock(&isp->mutex);
+
        buf->bytesused = pipe->pix.sizeimage;
        buf->reserved = asd->frame_status[buf->index];
 
@@ -1573,7 +1380,6 @@ static int atomisp_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
        if (!(buf->flags & V4L2_BUF_FLAG_ERROR))
                buf->reserved |= __get_frame_exp_id(pipe, buf) << 16;
        buf->reserved2 = pipe->frame_config_id[buf->index];
-       rt_mutex_unlock(&isp->mutex);
 
        dev_dbg(isp->dev,
                "dqbuf buffer %d (%s) for asd%d with exp_id %d, isp_config_id %d\n",
@@ -1622,16 +1428,6 @@ enum ia_css_pipe_id atomisp_get_css_pipe_id(struct atomisp_sub_device *asd)
 
 static unsigned int atomisp_sensor_start_stream(struct atomisp_sub_device *asd)
 {
-       struct atomisp_device *isp = asd->isp;
-
-       if (isp->inputs[asd->input_curr].camera_caps->
-           sensor[asd->sensor_curr].stream_num > 1) {
-               if (asd->high_speed_mode)
-                       return 1;
-               else
-                       return 2;
-       }
-
        if (asd->vfpp->val != ATOMISP_VFPP_ENABLE ||
            asd->copy_mode)
                return 1;
@@ -1650,31 +1446,15 @@ static unsigned int atomisp_sensor_start_stream(struct atomisp_sub_device *asd)
 int atomisp_stream_on_master_slave_sensor(struct atomisp_device *isp,
        bool isp_timeout)
 {
-       unsigned int master = -1, slave = -1, delay_slave = 0;
-       int i, ret;
-
-       /*
-        * ISP only support 2 streams now so ignore multiple master/slave
-        * case to reduce the delay between 2 stream_on calls.
-        */
-       for (i = 0; i < isp->num_of_streams; i++) {
-               int sensor_index = isp->asd[i].input_curr;
-
-               if (isp->inputs[sensor_index].camera_caps->
-                   sensor[isp->asd[i].sensor_curr].is_slave)
-                       slave = sensor_index;
-               else
-                       master = sensor_index;
-       }
+       unsigned int master, slave, delay_slave = 0;
+       int ret;
 
-       if (master == -1 || slave == -1) {
-               master = ATOMISP_DEPTH_DEFAULT_MASTER_SENSOR;
-               slave = ATOMISP_DEPTH_DEFAULT_SLAVE_SENSOR;
-               dev_warn(isp->dev,
-                        "depth mode use default master=%s.slave=%s.\n",
-                        isp->inputs[master].camera->name,
-                        isp->inputs[slave].camera->name);
-       }
+       master = ATOMISP_DEPTH_DEFAULT_MASTER_SENSOR;
+       slave = ATOMISP_DEPTH_DEFAULT_SLAVE_SENSOR;
+       dev_warn(isp->dev,
+                "depth mode use default master=%s.slave=%s.\n",
+                isp->inputs[master].camera->name,
+                isp->inputs[slave].camera->name);
 
        ret = v4l2_subdev_call(isp->inputs[master].camera, core,
                               ioctl, ATOMISP_IOC_G_DEPTH_SYNC_COMP,
@@ -1708,51 +1488,6 @@ int atomisp_stream_on_master_slave_sensor(struct atomisp_device *isp,
        return 0;
 }
 
-/* FIXME! ISP2400 */
-static void __wdt_on_master_slave_sensor(struct atomisp_device *isp,
-                                        unsigned int wdt_duration)
-{
-       if (atomisp_buffers_queued(&isp->asd[0]))
-               atomisp_wdt_refresh(&isp->asd[0], wdt_duration);
-       if (atomisp_buffers_queued(&isp->asd[1]))
-               atomisp_wdt_refresh(&isp->asd[1], wdt_duration);
-}
-
-/* FIXME! ISP2401 */
-static void __wdt_on_master_slave_sensor_pipe(struct atomisp_video_pipe *pipe,
-                                             unsigned int wdt_duration,
-                                             bool enable)
-{
-       static struct atomisp_video_pipe *pipe0;
-
-       if (enable) {
-               if (atomisp_buffers_queued_pipe(pipe0))
-                       atomisp_wdt_refresh_pipe(pipe0, wdt_duration);
-               if (atomisp_buffers_queued_pipe(pipe))
-                       atomisp_wdt_refresh_pipe(pipe, wdt_duration);
-       } else {
-               pipe0 = pipe;
-       }
-}
-
-static void atomisp_pause_buffer_event(struct atomisp_device *isp)
-{
-       struct v4l2_event event = {0};
-       int i;
-
-       event.type = V4L2_EVENT_ATOMISP_PAUSE_BUFFER;
-
-       for (i = 0; i < isp->num_of_streams; i++) {
-               int sensor_index = isp->asd[i].input_curr;
-
-               if (isp->inputs[sensor_index].camera_caps->
-                   sensor[isp->asd[i].sensor_curr].is_slave) {
-                       v4l2_event_queue(isp->asd[i].subdev.devnode, &event);
-                       break;
-               }
-       }
-}
-
 /* Input system HW workaround */
 /* Input system address translation corrupts burst during */
 /* invalidate. SW workaround for this is to set burst length */
@@ -1784,15 +1519,8 @@ static int atomisp_streamon(struct file *file, void *fh,
        struct pci_dev *pdev = to_pci_dev(isp->dev);
        enum ia_css_pipe_id css_pipe_id;
        unsigned int sensor_start_stream;
-       unsigned int wdt_duration = ATOMISP_ISP_TIMEOUT_DURATION;
-       int ret = 0;
        unsigned long irqflags;
-
-       if (!asd) {
-               dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
-                       __func__, vdev->name);
-               return -EINVAL;
-       }
+       int ret;
 
        dev_dbg(isp->dev, "Start stream on pad %d for asd%d\n",
                atomisp_subdev_source_pad(vdev), asd->index);
@@ -1802,19 +1530,12 @@ static int atomisp_streamon(struct file *file, void *fh,
                return -EINVAL;
        }
 
-       rt_mutex_lock(&isp->mutex);
-       if (isp->isp_fatal_error) {
-               ret = -EIO;
-               goto out;
-       }
-
-       if (asd->streaming == ATOMISP_DEVICE_STREAMING_STOPPING) {
-               ret = -EBUSY;
-               goto out;
-       }
+       ret = atomisp_pipe_check(pipe, false);
+       if (ret)
+               return ret;
 
        if (pipe->capq.streaming)
-               goto out;
+               return 0;
 
        /* Input system HW workaround */
        atomisp_dma_burst_len_cfg(asd);
@@ -1829,20 +1550,18 @@ static int atomisp_streamon(struct file *file, void *fh,
        if (list_empty(&pipe->capq.stream)) {
                spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
                dev_dbg(isp->dev, "no buffer in the queue\n");
-               ret = -EINVAL;
-               goto out;
+               return -EINVAL;
        }
        spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
 
        ret = videobuf_streamon(&pipe->capq);
        if (ret)
-               goto out;
+               return ret;
 
        /* Reset pending capture request count. */
        asd->pending_capture_request = 0;
 
-       if ((atomisp_subdev_streaming_count(asd) > sensor_start_stream) &&
-           (!isp->inputs[asd->input_curr].camera_caps->multi_stream_ctrl)) {
+       if (atomisp_subdev_streaming_count(asd) > sensor_start_stream) {
                /* trigger still capture */
                if (asd->continuous_mode->val &&
                    atomisp_subdev_source_pad(vdev)
@@ -1856,11 +1575,11 @@ static int atomisp_streamon(struct file *file, void *fh,
 
                        if (asd->delayed_init == ATOMISP_DELAYED_INIT_QUEUED) {
                                flush_work(&asd->delayed_init_work);
-                               rt_mutex_unlock(&isp->mutex);
-                               if (wait_for_completion_interruptible(
-                                       &asd->init_done) != 0)
+                               mutex_unlock(&isp->mutex);
+                               ret = wait_for_completion_interruptible(&asd->init_done);
+                               mutex_lock(&isp->mutex);
+                               if (ret != 0)
                                        return -ERESTARTSYS;
-                               rt_mutex_lock(&isp->mutex);
                        }
 
                        /* handle per_frame_setting parameter and buffers */
@@ -1882,16 +1601,12 @@ static int atomisp_streamon(struct file *file, void *fh,
                                        asd->params.offline_parm.num_captures,
                                        asd->params.offline_parm.skip_frames,
                                        asd->params.offline_parm.offset);
-                               if (ret) {
-                                       ret = -EINVAL;
-                                       goto out;
-                               }
-                               if (asd->depth_mode->val)
-                                       atomisp_pause_buffer_event(isp);
+                               if (ret)
+                                       return -EINVAL;
                        }
                }
                atomisp_qbuffers_to_css(asd);
-               goto out;
+               return 0;
        }
 
        if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) {
@@ -1917,14 +1632,14 @@ static int atomisp_streamon(struct file *file, void *fh,
 
        ret = atomisp_css_start(asd, css_pipe_id, false);
        if (ret)
-               goto out;
+               return ret;
 
+       spin_lock_irqsave(&isp->lock, irqflags);
        asd->streaming = ATOMISP_DEVICE_STREAMING_ENABLED;
+       spin_unlock_irqrestore(&isp->lock, irqflags);
        atomic_set(&asd->sof_count, -1);
        atomic_set(&asd->sequence, -1);
        atomic_set(&asd->sequence_temp, -1);
-       if (isp->sw_contex.file_input)
-               wdt_duration = ATOMISP_ISP_FILE_TIMEOUT_DURATION;
 
        asd->params.dis_proj_data_valid = false;
        asd->latest_preview_exp_id = 0;
@@ -1938,7 +1653,7 @@ static int atomisp_streamon(struct file *file, void *fh,
 
        /* Only start sensor when the last streaming instance started */
        if (atomisp_subdev_streaming_count(asd) < sensor_start_stream)
-               goto out;
+               return 0;
 
 start_sensor:
        if (isp->flash) {
@@ -1947,26 +1662,21 @@ start_sensor:
                atomisp_setup_flash(asd);
        }
 
-       if (!isp->sw_contex.file_input) {
-               atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF,
-                                      atomisp_css_valid_sof(isp));
-               atomisp_csi2_configure(asd);
-               /*
-                * set freq to max when streaming count > 1 which indicate
-                * dual camera would run
-                */
-               if (atomisp_streaming_count(isp) > 1) {
-                       if (atomisp_freq_scaling(isp,
-                                                ATOMISP_DFS_MODE_MAX, false) < 0)
-                               dev_dbg(isp->dev, "DFS max mode failed!\n");
-               } else {
-                       if (atomisp_freq_scaling(isp,
-                                                ATOMISP_DFS_MODE_AUTO, false) < 0)
-                               dev_dbg(isp->dev, "DFS auto mode failed!\n");
-               }
-       } else {
-               if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_MAX, false) < 0)
+       atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF,
+                              atomisp_css_valid_sof(isp));
+       atomisp_csi2_configure(asd);
+       /*
+        * set freq to max when streaming count > 1 which indicate
+        * dual camera would run
+        */
+       if (atomisp_streaming_count(isp) > 1) {
+               if (atomisp_freq_scaling(isp,
+                                        ATOMISP_DFS_MODE_MAX, false) < 0)
                        dev_dbg(isp->dev, "DFS max mode failed!\n");
+       } else {
+               if (atomisp_freq_scaling(isp,
+                                        ATOMISP_DFS_MODE_AUTO, false) < 0)
+                       dev_dbg(isp->dev, "DFS auto mode failed!\n");
        }
 
        if (asd->depth_mode->val && atomisp_streaming_count(isp) ==
@@ -1974,17 +1684,11 @@ start_sensor:
                ret = atomisp_stream_on_master_slave_sensor(isp, false);
                if (ret) {
                        dev_err(isp->dev, "master slave sensor stream on failed!\n");
-                       goto out;
+                       return ret;
                }
-               if (!IS_ISP2401)
-                       __wdt_on_master_slave_sensor(isp, wdt_duration);
-               else
-                       __wdt_on_master_slave_sensor_pipe(pipe, wdt_duration, true);
                goto start_delay_wq;
        } else if (asd->depth_mode->val && (atomisp_streaming_count(isp) <
                                            ATOMISP_DEPTH_SENSOR_STREAMON_COUNT)) {
-               if (IS_ISP2401)
-                       __wdt_on_master_slave_sensor_pipe(pipe, wdt_duration, false);
                goto start_delay_wq;
        }
 
@@ -1999,41 +1703,29 @@ start_sensor:
        ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
                               video, s_stream, 1);
        if (ret) {
+               spin_lock_irqsave(&isp->lock, irqflags);
                asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED;
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (!IS_ISP2401) {
-               if (atomisp_buffers_queued(asd))
-                       atomisp_wdt_refresh(asd, wdt_duration);
-       } else {
-               if (atomisp_buffers_queued_pipe(pipe))
-                       atomisp_wdt_refresh_pipe(pipe, wdt_duration);
+               spin_unlock_irqrestore(&isp->lock, irqflags);
+               return -EINVAL;
        }
 
 start_delay_wq:
        if (asd->continuous_mode->val) {
-               struct v4l2_mbus_framefmt *sink;
-
-               sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
-                                              V4L2_SUBDEV_FORMAT_ACTIVE,
-                                              ATOMISP_SUBDEV_PAD_SINK);
+               atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+                                       V4L2_SUBDEV_FORMAT_ACTIVE,
+                                       ATOMISP_SUBDEV_PAD_SINK);
 
                reinit_completion(&asd->init_done);
                asd->delayed_init = ATOMISP_DELAYED_INIT_QUEUED;
                queue_work(asd->delayed_init_workq, &asd->delayed_init_work);
-               atomisp_css_set_cont_prev_start_time(isp,
-                                                    ATOMISP_CALC_CSS_PREV_OVERLAP(sink->height));
        } else {
                asd->delayed_init = ATOMISP_DELAYED_INIT_NOT_QUEUED;
        }
-out:
-       rt_mutex_unlock(&isp->mutex);
-       return ret;
+
+       return 0;
 }
 
-int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
+int atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
 {
        struct video_device *vdev = video_devdata(file);
        struct atomisp_device *isp = video_get_drvdata(vdev);
@@ -2050,17 +1742,10 @@ int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
        unsigned long flags;
        bool first_streamoff = false;
 
-       if (!asd) {
-               dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
-                       __func__, vdev->name);
-               return -EINVAL;
-       }
-
        dev_dbg(isp->dev, "Stop stream on pad %d for asd%d\n",
                atomisp_subdev_source_pad(vdev), asd->index);
 
        lockdep_assert_held(&isp->mutex);
-       lockdep_assert_held(&isp->streamoff_mutex);
 
        if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
                dev_dbg(isp->dev, "unsupported v4l2 buf type\n");
@@ -2071,17 +1756,10 @@ int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
         * do only videobuf_streamoff for capture & vf pipes in
         * case of continuous capture
         */
-       if ((asd->continuous_mode->val ||
-            isp->inputs[asd->input_curr].camera_caps->multi_stream_ctrl) &&
-           atomisp_subdev_source_pad(vdev) !=
-           ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW &&
-           atomisp_subdev_source_pad(vdev) !=
-           ATOMISP_SUBDEV_PAD_SOURCE_VIDEO) {
-               if (isp->inputs[asd->input_curr].camera_caps->multi_stream_ctrl) {
-                       v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
-                                        video, s_stream, 0);
-               } else if (atomisp_subdev_source_pad(vdev)
-                          == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE) {
+       if (asd->continuous_mode->val &&
+           atomisp_subdev_source_pad(vdev) != ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW &&
+           atomisp_subdev_source_pad(vdev) != ATOMISP_SUBDEV_PAD_SOURCE_VIDEO) {
+               if (atomisp_subdev_source_pad(vdev) == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE) {
                        /* stop continuous still capture if needed */
                        if (asd->params.offline_parm.num_captures == -1)
                                atomisp_css_offline_capture_configure(asd,
@@ -2118,32 +1796,14 @@ int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
        if (!pipe->capq.streaming)
                return 0;
 
-       spin_lock_irqsave(&isp->lock, flags);
-       if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) {
-               asd->streaming = ATOMISP_DEVICE_STREAMING_STOPPING;
+       if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED)
                first_streamoff = true;
-       }
-       spin_unlock_irqrestore(&isp->lock, flags);
-
-       if (first_streamoff) {
-               /* if other streams are running, should not disable watch dog */
-               rt_mutex_unlock(&isp->mutex);
-               atomisp_wdt_stop(asd, true);
-
-               /*
-                * must stop sending pixels into GP_FIFO before stop
-                * the pipeline.
-                */
-               if (isp->sw_contex.file_input)
-                       v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
-                                        video, s_stream, 0);
-
-               rt_mutex_lock(&isp->mutex);
-       }
 
        spin_lock_irqsave(&isp->lock, flags);
        if (atomisp_subdev_streaming_count(asd) == 1)
                asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED;
+       else
+               asd->streaming = ATOMISP_DEVICE_STREAMING_STOPPING;
        spin_unlock_irqrestore(&isp->lock, flags);
 
        if (!first_streamoff) {
@@ -2154,19 +1814,16 @@ int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
        }
 
        atomisp_clear_css_buffer_counters(asd);
-
-       if (!isp->sw_contex.file_input)
-               atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF,
-                                      false);
+       atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, false);
 
        if (asd->delayed_init == ATOMISP_DELAYED_INIT_QUEUED) {
                cancel_work_sync(&asd->delayed_init_work);
                asd->delayed_init = ATOMISP_DELAYED_INIT_NOT_QUEUED;
        }
-       if (first_streamoff) {
-               css_pipe_id = atomisp_get_css_pipe_id(asd);
-               atomisp_css_stop(asd, css_pipe_id, false);
-       }
+
+       css_pipe_id = atomisp_get_css_pipe_id(asd);
+       atomisp_css_stop(asd, css_pipe_id, false);
+
        /* cancel work queue*/
        if (asd->video_out_capture.users) {
                capture_pipe = &asd->video_out_capture;
@@ -2210,9 +1867,8 @@ stopsensor:
            != atomisp_sensor_start_stream(asd))
                return 0;
 
-       if (!isp->sw_contex.file_input)
-               ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
-                                      video, s_stream, 0);
+       ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+                              video, s_stream, 0);
 
        if (isp->flash) {
                asd->params.num_flash_frames = 0;
@@ -2284,22 +1940,6 @@ stopsensor:
        return ret;
 }
 
-static int atomisp_streamoff(struct file *file, void *fh,
-                            enum v4l2_buf_type type)
-{
-       struct video_device *vdev = video_devdata(file);
-       struct atomisp_device *isp = video_get_drvdata(vdev);
-       int rval;
-
-       mutex_lock(&isp->streamoff_mutex);
-       rt_mutex_lock(&isp->mutex);
-       rval = __atomisp_streamoff(file, fh, type);
-       rt_mutex_unlock(&isp->mutex);
-       mutex_unlock(&isp->streamoff_mutex);
-
-       return rval;
-}
-
 /*
  * To get the current value of a control.
  * applications initialize the id field of a struct v4l2_control and
@@ -2313,12 +1953,6 @@ static int atomisp_g_ctrl(struct file *file, void *fh,
        struct atomisp_device *isp = video_get_drvdata(vdev);
        int i, ret = -EINVAL;
 
-       if (!asd) {
-               dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
-                       __func__, vdev->name);
-               return -EINVAL;
-       }
-
        for (i = 0; i < ctrls_num; i++) {
                if (ci_v4l2_controls[i].id == control->id) {
                        ret = 0;
@@ -2329,8 +1963,6 @@ static int atomisp_g_ctrl(struct file *file, void *fh,
        if (ret)
                return ret;
 
-       rt_mutex_lock(&isp->mutex);
-
        switch (control->id) {
        case V4L2_CID_IRIS_ABSOLUTE:
        case V4L2_CID_EXPOSURE_ABSOLUTE:
@@ -2352,7 +1984,6 @@ static int atomisp_g_ctrl(struct file *file, void *fh,
        case V4L2_CID_TEST_PATTERN_COLOR_GR:
        case V4L2_CID_TEST_PATTERN_COLOR_GB:
        case V4L2_CID_TEST_PATTERN_COLOR_B:
-               rt_mutex_unlock(&isp->mutex);
                return v4l2_g_ctrl(isp->inputs[asd->input_curr].camera->
                                   ctrl_handler, control);
        case V4L2_CID_COLORFX:
@@ -2381,7 +2012,6 @@ static int atomisp_g_ctrl(struct file *file, void *fh,
                break;
        }
 
-       rt_mutex_unlock(&isp->mutex);
        return ret;
 }
 
@@ -2398,12 +2028,6 @@ static int atomisp_s_ctrl(struct file *file, void *fh,
        struct atomisp_device *isp = video_get_drvdata(vdev);
        int i, ret = -EINVAL;
 
-       if (!asd) {
-               dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
-                       __func__, vdev->name);
-               return -EINVAL;
-       }
-
        for (i = 0; i < ctrls_num; i++) {
                if (ci_v4l2_controls[i].id == control->id) {
                        ret = 0;
@@ -2414,7 +2038,6 @@ static int atomisp_s_ctrl(struct file *file, void *fh,
        if (ret)
                return ret;
 
-       rt_mutex_lock(&isp->mutex);
        switch (control->id) {
        case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
        case V4L2_CID_EXPOSURE:
@@ -2435,7 +2058,6 @@ static int atomisp_s_ctrl(struct file *file, void *fh,
        case V4L2_CID_TEST_PATTERN_COLOR_GR:
        case V4L2_CID_TEST_PATTERN_COLOR_GB:
        case V4L2_CID_TEST_PATTERN_COLOR_B:
-               rt_mutex_unlock(&isp->mutex);
                return v4l2_s_ctrl(NULL,
                                   isp->inputs[asd->input_curr].camera->
                                   ctrl_handler, control);
@@ -2467,7 +2089,6 @@ static int atomisp_s_ctrl(struct file *file, void *fh,
                ret = -EINVAL;
                break;
        }
-       rt_mutex_unlock(&isp->mutex);
        return ret;
 }
 
@@ -2485,12 +2106,6 @@ static int atomisp_queryctl(struct file *file, void *fh,
        struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
        struct atomisp_device *isp = video_get_drvdata(vdev);
 
-       if (!asd) {
-               dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
-                       __func__, vdev->name);
-               return -EINVAL;
-       }
-
        switch (qc->id) {
        case V4L2_CID_FOCUS_ABSOLUTE:
        case V4L2_CID_FOCUS_RELATIVE:
@@ -2536,12 +2151,6 @@ static int atomisp_camera_g_ext_ctrls(struct file *file, void *fh,
        int i;
        int ret = 0;
 
-       if (!asd) {
-               dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
-                       __func__, vdev->name);
-               return -EINVAL;
-       }
-
        if (!IS_ISP2401)
                motor = isp->inputs[asd->input_curr].motor;
        else
@@ -2592,9 +2201,7 @@ static int atomisp_camera_g_ext_ctrls(struct file *file, void *fh,
                                                &ctrl);
                        break;
                case V4L2_CID_ZOOM_ABSOLUTE:
-                       rt_mutex_lock(&isp->mutex);
                        ret = atomisp_digital_zoom(asd, 0, &ctrl.value);
-                       rt_mutex_unlock(&isp->mutex);
                        break;
                case V4L2_CID_G_SKIP_FRAMES:
                        ret = v4l2_subdev_call(
@@ -2653,12 +2260,6 @@ static int atomisp_camera_s_ext_ctrls(struct file *file, void *fh,
        int i;
        int ret = 0;
 
-       if (!asd) {
-               dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
-                       __func__, vdev->name);
-               return -EINVAL;
-       }
-
        if (!IS_ISP2401)
                motor = isp->inputs[asd->input_curr].motor;
        else
@@ -2707,7 +2308,6 @@ static int atomisp_camera_s_ext_ctrls(struct file *file, void *fh,
                case V4L2_CID_FLASH_STROBE:
                case V4L2_CID_FLASH_MODE:
                case V4L2_CID_FLASH_STATUS_REGISTER:
-                       rt_mutex_lock(&isp->mutex);
                        if (isp->flash) {
                                ret =
                                    v4l2_s_ctrl(NULL, isp->flash->ctrl_handler,
@@ -2722,12 +2322,9 @@ static int atomisp_camera_s_ext_ctrls(struct file *file, void *fh,
                                        asd->params.num_flash_frames = 0;
                                }
                        }
-                       rt_mutex_unlock(&isp->mutex);
                        break;
                case V4L2_CID_ZOOM_ABSOLUTE:
-                       rt_mutex_lock(&isp->mutex);
                        ret = atomisp_digital_zoom(asd, 1, &ctrl.value);
-                       rt_mutex_unlock(&isp->mutex);
                        break;
                default:
                        ctr = v4l2_ctrl_find(&asd->ctrl_handler, ctrl.id);
@@ -2784,20 +2381,12 @@ static int atomisp_g_parm(struct file *file, void *fh,
        struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
        struct atomisp_device *isp = video_get_drvdata(vdev);
 
-       if (!asd) {
-               dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
-                       __func__, vdev->name);
-               return -EINVAL;
-       }
-
        if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
                dev_err(isp->dev, "unsupported v4l2 buf type\n");
                return -EINVAL;
        }
 
-       rt_mutex_lock(&isp->mutex);
        parm->parm.capture.capturemode = asd->run_mode->val;
-       rt_mutex_unlock(&isp->mutex);
 
        return 0;
 }
@@ -2812,19 +2401,11 @@ static int atomisp_s_parm(struct file *file, void *fh,
        int rval;
        int fps;
 
-       if (!asd) {
-               dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
-                       __func__, vdev->name);
-               return -EINVAL;
-       }
-
        if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
                dev_err(isp->dev, "unsupported v4l2 buf type\n");
                return -EINVAL;
        }
 
-       rt_mutex_lock(&isp->mutex);
-
        asd->high_speed_mode = false;
        switch (parm->parm.capture.capturemode) {
        case CI_MODE_NONE: {
@@ -2843,7 +2424,7 @@ static int atomisp_s_parm(struct file *file, void *fh,
                                asd->high_speed_mode = true;
                }
 
-               goto out;
+               return rval == -ENOIOCTLCMD ? 0 : rval;
        }
        case CI_MODE_VIDEO:
                mode = ATOMISP_RUN_MODE_VIDEO;
@@ -2858,76 +2439,29 @@ static int atomisp_s_parm(struct file *file, void *fh,
                mode = ATOMISP_RUN_MODE_PREVIEW;
                break;
        default:
-               rval = -EINVAL;
-               goto out;
+               return -EINVAL;
        }
 
        rval = v4l2_ctrl_s_ctrl(asd->run_mode, mode);
 
-out:
-       rt_mutex_unlock(&isp->mutex);
-
        return rval == -ENOIOCTLCMD ? 0 : rval;
 }
 
-static int atomisp_s_parm_file(struct file *file, void *fh,
-                              struct v4l2_streamparm *parm)
-{
-       struct video_device *vdev = video_devdata(file);
-       struct atomisp_device *isp = video_get_drvdata(vdev);
-
-       if (parm->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
-               dev_err(isp->dev, "unsupported v4l2 buf type for output\n");
-               return -EINVAL;
-       }
-
-       rt_mutex_lock(&isp->mutex);
-       isp->sw_contex.file_input = true;
-       rt_mutex_unlock(&isp->mutex);
-
-       return 0;
-}
-
 static long atomisp_vidioc_default(struct file *file, void *fh,
                                   bool valid_prio, unsigned int cmd, void *arg)
 {
        struct video_device *vdev = video_devdata(file);
        struct atomisp_device *isp = video_get_drvdata(vdev);
-       struct atomisp_sub_device *asd;
+       struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
        struct v4l2_subdev *motor;
-       bool acc_node;
        int err;
 
-       acc_node = !strcmp(vdev->name, "ATOMISP ISP ACC");
-       if (acc_node)
-               asd = atomisp_to_acc_pipe(vdev)->asd;
-       else
-               asd = atomisp_to_video_pipe(vdev)->asd;
-
        if (!IS_ISP2401)
                motor = isp->inputs[asd->input_curr].motor;
        else
                motor = isp->motor;
 
        switch (cmd) {
-       case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA:
-       case ATOMISP_IOC_S_EXPOSURE:
-       case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP:
-       case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA:
-       case ATOMISP_IOC_EXT_ISP_CTRL:
-       case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_INFO:
-       case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_MODE:
-       case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_MODE:
-       case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT:
-       case ATOMISP_IOC_S_SENSOR_EE_CONFIG:
-       case ATOMISP_IOC_G_UPDATE_EXPOSURE:
-               /* we do not need take isp->mutex for these IOCTLs */
-               break;
-       default:
-               rt_mutex_lock(&isp->mutex);
-               break;
-       }
-       switch (cmd) {
        case ATOMISP_IOC_S_SENSOR_RUNMODE:
                if (IS_ISP2401)
                        err = atomisp_set_sensor_runmode(asd, arg);
@@ -3173,22 +2707,6 @@ static long atomisp_vidioc_default(struct file *file, void *fh,
                break;
        }
 
-       switch (cmd) {
-       case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA:
-       case ATOMISP_IOC_S_EXPOSURE:
-       case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP:
-       case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA:
-       case ATOMISP_IOC_EXT_ISP_CTRL:
-       case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_INFO:
-       case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_MODE:
-       case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_MODE:
-       case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT:
-       case ATOMISP_IOC_G_UPDATE_EXPOSURE:
-               break;
-       default:
-               rt_mutex_unlock(&isp->mutex);
-               break;
-       }
        return err;
 }
 
@@ -3207,7 +2725,7 @@ const struct v4l2_ioctl_ops atomisp_ioctl_ops = {
        .vidioc_enum_fmt_vid_cap = atomisp_enum_fmt_cap,
        .vidioc_try_fmt_vid_cap = atomisp_try_fmt_cap,
        .vidioc_g_fmt_vid_cap = atomisp_g_fmt_cap,
-       .vidioc_s_fmt_vid_cap = atomisp_s_fmt_cap,
+       .vidioc_s_fmt_vid_cap = atomisp_set_fmt,
        .vidioc_reqbufs = atomisp_reqbufs,
        .vidioc_querybuf = atomisp_querybuf,
        .vidioc_qbuf = atomisp_qbuf,
@@ -3218,13 +2736,3 @@ const struct v4l2_ioctl_ops atomisp_ioctl_ops = {
        .vidioc_s_parm = atomisp_s_parm,
        .vidioc_g_parm = atomisp_g_parm,
 };
-
-const struct v4l2_ioctl_ops atomisp_file_ioctl_ops = {
-       .vidioc_querycap = atomisp_querycap,
-       .vidioc_g_fmt_vid_out = atomisp_g_fmt_file,
-       .vidioc_s_fmt_vid_out = atomisp_s_fmt_file,
-       .vidioc_s_parm = atomisp_s_parm_file,
-       .vidioc_reqbufs = atomisp_reqbufs_file,
-       .vidioc_querybuf = atomisp_querybuf_file,
-       .vidioc_qbuf = atomisp_qbuf_file,
-};
index d85e0d6..c660f63 100644 (file)
@@ -34,27 +34,21 @@ atomisp_format_bridge *atomisp_get_format_bridge(unsigned int pixelformat);
 const struct
 atomisp_format_bridge *atomisp_get_format_bridge_from_mbus(u32 mbus_code);
 
+int atomisp_pipe_check(struct atomisp_video_pipe *pipe, bool streaming_ok);
+
 int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
                                uint16_t stream_id);
 
-int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type);
-int __atomisp_reqbufs(struct file *file, void *fh,
-                     struct v4l2_requestbuffers *req);
-
-int atomisp_reqbufs(struct file *file, void *fh,
-                   struct v4l2_requestbuffers *req);
+int atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type);
+int atomisp_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *req);
 
 enum ia_css_pipe_id atomisp_get_css_pipe_id(struct atomisp_sub_device
        *asd);
 
 void atomisp_videobuf_free_buf(struct videobuf_buffer *vb);
 
-extern const struct v4l2_file_operations atomisp_file_fops;
-
 extern const struct v4l2_ioctl_ops atomisp_ioctl_ops;
 
-extern const struct v4l2_ioctl_ops atomisp_file_ioctl_ops;
-
 unsigned int atomisp_streaming_count(struct atomisp_device *isp);
 
 /* compat_ioctl for 32bit userland app and 64bit kernel */
index 394fe69..847dfee 100644 (file)
@@ -373,16 +373,12 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
        struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
        struct atomisp_device *isp = isp_sd->isp;
        struct v4l2_mbus_framefmt *ffmt[ATOMISP_SUBDEV_PADS_NUM];
-       u16 vdev_pad = atomisp_subdev_source_pad(sd->devnode);
        struct v4l2_rect *crop[ATOMISP_SUBDEV_PADS_NUM],
                       *comp[ATOMISP_SUBDEV_PADS_NUM];
-       enum atomisp_input_stream_id stream_id;
        unsigned int i;
        unsigned int padding_w = pad_w;
        unsigned int padding_h = pad_h;
 
-       stream_id = atomisp_source_pad_to_stream_id(isp_sd, vdev_pad);
-
        isp_get_fmt_rect(sd, sd_state, which, ffmt, crop, comp);
 
        dev_dbg(isp->dev,
@@ -478,9 +474,10 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
                        dvs_w = dvs_h = 0;
                }
                atomisp_css_video_set_dis_envelope(isp_sd, dvs_w, dvs_h);
-               atomisp_css_input_set_effective_resolution(isp_sd, stream_id,
-                       crop[pad]->width, crop[pad]->height);
-
+               atomisp_css_input_set_effective_resolution(isp_sd,
+                                                          ATOMISP_INPUT_STREAM_GENERAL,
+                                                          crop[pad]->width,
+                                                          crop[pad]->height);
                break;
        }
        case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE:
@@ -523,14 +520,14 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
                if (r->width * crop[ATOMISP_SUBDEV_PAD_SINK]->height <
                    crop[ATOMISP_SUBDEV_PAD_SINK]->width * r->height)
                        atomisp_css_input_set_effective_resolution(isp_sd,
-                               stream_id,
+                               ATOMISP_INPUT_STREAM_GENERAL,
                                rounddown(crop[ATOMISP_SUBDEV_PAD_SINK]->
                                          height * r->width / r->height,
                                          ATOM_ISP_STEP_WIDTH),
                                crop[ATOMISP_SUBDEV_PAD_SINK]->height);
                else
                        atomisp_css_input_set_effective_resolution(isp_sd,
-                               stream_id,
+                               ATOMISP_INPUT_STREAM_GENERAL,
                                crop[ATOMISP_SUBDEV_PAD_SINK]->width,
                                rounddown(crop[ATOMISP_SUBDEV_PAD_SINK]->
                                          width * r->height / r->width,
@@ -620,16 +617,12 @@ void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd,
        struct atomisp_device *isp = isp_sd->isp;
        struct v4l2_mbus_framefmt *__ffmt =
            atomisp_subdev_get_ffmt(sd, sd_state, which, pad);
-       u16 vdev_pad = atomisp_subdev_source_pad(sd->devnode);
-       enum atomisp_input_stream_id stream_id;
 
        dev_dbg(isp->dev, "ffmt: pad %s w %d h %d code 0x%8.8x which %s\n",
                atomisp_pad_str(pad), ffmt->width, ffmt->height, ffmt->code,
                which == V4L2_SUBDEV_FORMAT_TRY ? "V4L2_SUBDEV_FORMAT_TRY"
                : "V4L2_SUBDEV_FORMAT_ACTIVE");
 
-       stream_id = atomisp_source_pad_to_stream_id(isp_sd, vdev_pad);
-
        switch (pad) {
        case ATOMISP_SUBDEV_PAD_SINK: {
                const struct atomisp_in_fmt_conv *fc =
@@ -649,15 +642,15 @@ void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd,
 
                if (which == V4L2_SUBDEV_FORMAT_ACTIVE) {
                        atomisp_css_input_set_resolution(isp_sd,
-                                                        stream_id, ffmt);
+                                                        ATOMISP_INPUT_STREAM_GENERAL, ffmt);
                        atomisp_css_input_set_binning_factor(isp_sd,
-                                                            stream_id,
+                                                            ATOMISP_INPUT_STREAM_GENERAL,
                                                             atomisp_get_sensor_bin_factor(isp_sd));
-                       atomisp_css_input_set_bayer_order(isp_sd, stream_id,
+                       atomisp_css_input_set_bayer_order(isp_sd, ATOMISP_INPUT_STREAM_GENERAL,
                                                          fc->bayer_order);
-                       atomisp_css_input_set_format(isp_sd, stream_id,
+                       atomisp_css_input_set_format(isp_sd, ATOMISP_INPUT_STREAM_GENERAL,
                                                     fc->atomisp_in_fmt);
-                       atomisp_css_set_default_isys_config(isp_sd, stream_id,
+                       atomisp_css_set_default_isys_config(isp_sd, ATOMISP_INPUT_STREAM_GENERAL,
                                                            ffmt);
                }
 
@@ -874,12 +867,18 @@ static int s_ctrl(struct v4l2_ctrl *ctrl)
 {
        struct atomisp_sub_device *asd = container_of(
                                             ctrl->handler, struct atomisp_sub_device, ctrl_handler);
+       unsigned int streaming;
+       unsigned long flags;
 
        switch (ctrl->id) {
        case V4L2_CID_RUN_MODE:
                return __atomisp_update_run_mode(asd);
        case V4L2_CID_DEPTH_MODE:
-               if (asd->streaming != ATOMISP_DEVICE_STREAMING_DISABLED) {
+               /* Use spinlock instead of mutex to avoid possible locking issues */
+               spin_lock_irqsave(&asd->isp->lock, flags);
+               streaming = asd->streaming;
+               spin_unlock_irqrestore(&asd->isp->lock, flags);
+               if (streaming != ATOMISP_DEVICE_STREAMING_DISABLED) {
                        dev_err(asd->isp->dev,
                                "ISP is streaming, it is not supported to change the depth mode\n");
                        return -EINVAL;
@@ -1066,7 +1065,6 @@ static void atomisp_init_subdev_pipe(struct atomisp_sub_device *asd,
        pipe->isp = asd->isp;
        spin_lock_init(&pipe->irq_lock);
        INIT_LIST_HEAD(&pipe->activeq);
-       INIT_LIST_HEAD(&pipe->activeq_out);
        INIT_LIST_HEAD(&pipe->buffers_waiting_for_param);
        INIT_LIST_HEAD(&pipe->per_frame_params);
        memset(pipe->frame_request_config_id,
@@ -1076,13 +1074,6 @@ static void atomisp_init_subdev_pipe(struct atomisp_sub_device *asd,
               sizeof(struct atomisp_css_params_with_list *));
 }
 
-static void atomisp_init_acc_pipe(struct atomisp_sub_device *asd,
-                                 struct atomisp_acc_pipe *pipe)
-{
-       pipe->asd = asd;
-       pipe->isp = asd->isp;
-}
-
 /*
  * isp_subdev_init_entities - Initialize V4L2 subdev and media entity
  * @asd: ISP CCDC module
@@ -1126,9 +1117,6 @@ static int isp_subdev_init_entities(struct atomisp_sub_device *asd)
        if (ret < 0)
                return ret;
 
-       atomisp_init_subdev_pipe(asd, &asd->video_in,
-                                V4L2_BUF_TYPE_VIDEO_OUTPUT);
-
        atomisp_init_subdev_pipe(asd, &asd->video_out_preview,
                                 V4L2_BUF_TYPE_VIDEO_CAPTURE);
 
@@ -1141,13 +1129,6 @@ static int isp_subdev_init_entities(struct atomisp_sub_device *asd)
        atomisp_init_subdev_pipe(asd, &asd->video_out_video_capture,
                                 V4L2_BUF_TYPE_VIDEO_CAPTURE);
 
-       atomisp_init_acc_pipe(asd, &asd->video_acc);
-
-       ret = atomisp_video_init(&asd->video_in, "MEMORY",
-                                ATOMISP_RUN_MODE_SDV);
-       if (ret < 0)
-               return ret;
-
        ret = atomisp_video_init(&asd->video_out_capture, "CAPTURE",
                                 ATOMISP_RUN_MODE_STILL_CAPTURE);
        if (ret < 0)
@@ -1168,8 +1149,6 @@ static int isp_subdev_init_entities(struct atomisp_sub_device *asd)
        if (ret < 0)
                return ret;
 
-       atomisp_acc_init(&asd->video_acc, "ACC");
-
        ret = v4l2_ctrl_handler_init(&asd->ctrl_handler, 1);
        if (ret)
                return ret;
@@ -1226,7 +1205,11 @@ int atomisp_create_pads_links(struct atomisp_device *isp)
                                return ret;
                }
        }
-       for (i = 0; i < isp->input_cnt - 2; i++) {
+       for (i = 0; i < isp->input_cnt; i++) {
+               /* Don't create links for the test-pattern-generator */
+               if (isp->inputs[i].type == TEST_PATTERN)
+                       continue;
+
                ret = media_create_pad_link(&isp->inputs[i].camera->entity, 0,
                                            &isp->csi2_port[isp->inputs[i].
                                                    port].subdev.entity,
@@ -1262,17 +1245,6 @@ int atomisp_create_pads_links(struct atomisp_device *isp)
                                            entity, 0, 0);
                if (ret < 0)
                        return ret;
-               /*
-                * file input only supported on subdev0
-                * so do not create pad link for subdevs other then subdev0
-                */
-               if (asd->index)
-                       return 0;
-               ret = media_create_pad_link(&asd->video_in.vdev.entity,
-                                           0, &asd->subdev.entity,
-                                           ATOMISP_SUBDEV_PAD_SINK, 0);
-               if (ret < 0)
-                       return ret;
        }
        return 0;
 }
@@ -1302,87 +1274,55 @@ void atomisp_subdev_unregister_entities(struct atomisp_sub_device *asd)
 {
        atomisp_subdev_cleanup_entities(asd);
        v4l2_device_unregister_subdev(&asd->subdev);
-       atomisp_video_unregister(&asd->video_in);
        atomisp_video_unregister(&asd->video_out_preview);
        atomisp_video_unregister(&asd->video_out_vf);
        atomisp_video_unregister(&asd->video_out_capture);
        atomisp_video_unregister(&asd->video_out_video_capture);
-       atomisp_acc_unregister(&asd->video_acc);
 }
 
-int atomisp_subdev_register_entities(struct atomisp_sub_device *asd,
-                                    struct v4l2_device *vdev)
+int atomisp_subdev_register_subdev(struct atomisp_sub_device *asd,
+                                  struct v4l2_device *vdev)
+{
+       return v4l2_device_register_subdev(vdev, &asd->subdev);
+}
+
+int atomisp_subdev_register_video_nodes(struct atomisp_sub_device *asd,
+                                       struct v4l2_device *vdev)
 {
        int ret;
-       u32 device_caps;
 
        /*
         * FIXME: check if all device caps are properly initialized.
-        * Should any of those use V4L2_CAP_META_OUTPUT? Probably yes.
+        * Should any of those use V4L2_CAP_META_CAPTURE? Probably yes.
         */
 
-       device_caps = V4L2_CAP_VIDEO_CAPTURE |
-                     V4L2_CAP_STREAMING;
-
-       /* Register the subdev and video node. */
-
-       ret = v4l2_device_register_subdev(vdev, &asd->subdev);
-       if (ret < 0)
-               goto error;
-
        asd->video_out_preview.vdev.v4l2_dev = vdev;
-       asd->video_out_preview.vdev.device_caps = device_caps |
-                                                 V4L2_CAP_VIDEO_OUTPUT;
+       asd->video_out_preview.vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
        ret = video_register_device(&asd->video_out_preview.vdev,
                                    VFL_TYPE_VIDEO, -1);
        if (ret < 0)
                goto error;
 
        asd->video_out_capture.vdev.v4l2_dev = vdev;
-       asd->video_out_capture.vdev.device_caps = device_caps |
-                                                 V4L2_CAP_VIDEO_OUTPUT;
+       asd->video_out_capture.vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
        ret = video_register_device(&asd->video_out_capture.vdev,
                                    VFL_TYPE_VIDEO, -1);
        if (ret < 0)
                goto error;
 
        asd->video_out_vf.vdev.v4l2_dev = vdev;
-       asd->video_out_vf.vdev.device_caps = device_caps |
-                                            V4L2_CAP_VIDEO_OUTPUT;
+       asd->video_out_vf.vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
        ret = video_register_device(&asd->video_out_vf.vdev,
                                    VFL_TYPE_VIDEO, -1);
        if (ret < 0)
                goto error;
 
        asd->video_out_video_capture.vdev.v4l2_dev = vdev;
-       asd->video_out_video_capture.vdev.device_caps = device_caps |
-                                                       V4L2_CAP_VIDEO_OUTPUT;
+       asd->video_out_video_capture.vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
        ret = video_register_device(&asd->video_out_video_capture.vdev,
                                    VFL_TYPE_VIDEO, -1);
        if (ret < 0)
                goto error;
-       asd->video_acc.vdev.v4l2_dev = vdev;
-       asd->video_acc.vdev.device_caps = device_caps |
-                                         V4L2_CAP_VIDEO_OUTPUT;
-       ret = video_register_device(&asd->video_acc.vdev,
-                                   VFL_TYPE_VIDEO, -1);
-       if (ret < 0)
-               goto error;
-
-       /*
-        * file input only supported on subdev0
-        * so do not create video node for subdevs other then subdev0
-        */
-       if (asd->index)
-               return 0;
-
-       asd->video_in.vdev.v4l2_dev = vdev;
-       asd->video_in.vdev.device_caps = device_caps |
-                                         V4L2_CAP_VIDEO_CAPTURE;
-       ret = video_register_device(&asd->video_in.vdev,
-                                   VFL_TYPE_VIDEO, -1);
-       if (ret < 0)
-               goto error;
 
        return 0;
 
@@ -1415,7 +1355,6 @@ int atomisp_subdev_init(struct atomisp_device *isp)
                return -ENOMEM;
        for (i = 0; i < isp->num_of_streams; i++) {
                asd = &isp->asd[i];
-               spin_lock_init(&asd->lock);
                asd->isp = isp;
                isp_subdev_init_params(asd);
                asd->index = i;
index 798a937..a1f4da3 100644 (file)
@@ -70,9 +70,7 @@ struct atomisp_video_pipe {
        enum v4l2_buf_type type;
        struct media_pad pad;
        struct videobuf_queue capq;
-       struct videobuf_queue outq;
        struct list_head activeq;
-       struct list_head activeq_out;
        /*
         * the buffers waiting for per-frame parameters, this is only valid
         * in per-frame setting mode.
@@ -86,9 +84,10 @@ struct atomisp_video_pipe {
 
        unsigned int buffers_in_css;
 
-       /* irq_lock is used to protect video buffer state change operations and
-        * also to make activeq, activeq_out, capq and outq list
-        * operations atomic. */
+       /*
+        * irq_lock is used to protect video buffer state change operations and
+        * also to make activeq and capq operations atomic.
+        */
        spinlock_t irq_lock;
        unsigned int users;
 
@@ -109,23 +108,6 @@ struct atomisp_video_pipe {
         */
        unsigned int frame_request_config_id[VIDEO_MAX_FRAME];
        struct atomisp_css_params_with_list *frame_params[VIDEO_MAX_FRAME];
-
-       /*
-       * move wdt from asd struct to create wdt for each pipe
-       */
-       /* ISP2401 */
-       struct timer_list wdt;
-       unsigned int wdt_duration;      /* in jiffies */
-       unsigned long wdt_expires;
-       atomic_t wdt_count;
-};
-
-struct atomisp_acc_pipe {
-       struct video_device vdev;
-       unsigned int users;
-       bool running;
-       struct atomisp_sub_device *asd;
-       struct atomisp_device *isp;
 };
 
 struct atomisp_pad_format {
@@ -267,28 +249,6 @@ struct atomisp_css_params_with_list {
        struct list_head list;
 };
 
-struct atomisp_acc_fw {
-       struct ia_css_fw_info *fw;
-       unsigned int handle;
-       unsigned int flags;
-       unsigned int type;
-       struct {
-               size_t length;
-               unsigned long css_ptr;
-       } args[ATOMISP_ACC_NR_MEMORY];
-       struct list_head list;
-};
-
-struct atomisp_map {
-       ia_css_ptr ptr;
-       size_t length;
-       struct list_head list;
-       /* FIXME: should keep book which maps are currently used
-        * by binaries and not allow releasing those
-        * which are in use. Implement by reference counting.
-        */
-};
-
 struct atomisp_sub_device {
        struct v4l2_subdev subdev;
        struct media_pad pads[ATOMISP_SUBDEV_PADS_NUM];
@@ -297,15 +257,12 @@ struct atomisp_sub_device {
 
        enum atomisp_subdev_input_entity input;
        unsigned int output;
-       struct atomisp_video_pipe video_in;
        struct atomisp_video_pipe video_out_capture; /* capture output */
        struct atomisp_video_pipe video_out_vf;      /* viewfinder output */
        struct atomisp_video_pipe video_out_preview; /* preview output */
-       struct atomisp_acc_pipe video_acc;
        /* video pipe main output */
        struct atomisp_video_pipe video_out_video_capture;
        /* struct isp_subdev_params params; */
-       spinlock_t lock;
        struct atomisp_device *isp;
        struct v4l2_ctrl_handler ctrl_handler;
        struct v4l2_ctrl *fmt_auto;
@@ -356,15 +313,16 @@ struct atomisp_sub_device {
 
        /* This field specifies which camera (v4l2 input) is selected. */
        int input_curr;
-       /* This field specifies which sensor is being selected when there
-          are multiple sensors connected to the same MIPI port. */
-       int sensor_curr;
 
        atomic_t sof_count;
        atomic_t sequence;      /* Sequence value that is assigned to buffer. */
        atomic_t sequence_temp;
 
-       unsigned int streaming; /* Hold both mutex and lock to change this */
+       /*
+        * Writers of streaming must hold both isp->mutex and isp->lock.
+        * Readers of streaming need to hold only one of the two locks.
+        */
+       unsigned int streaming;
        bool stream_prepared; /* whether css stream is created */
 
        /* subdev index: will be used to show which subdev is holding the
@@ -390,11 +348,6 @@ struct atomisp_sub_device {
        int raw_buffer_locked_count;
        spinlock_t raw_buffer_bitmap_lock;
 
-       /* ISP 2400 */
-       struct timer_list wdt;
-       unsigned int wdt_duration;      /* in jiffies */
-       unsigned long wdt_expires;
-
        /* ISP2401 */
        bool re_trigger_capture;
 
@@ -450,8 +403,10 @@ int atomisp_update_run_mode(struct atomisp_sub_device *asd);
 void atomisp_subdev_cleanup_pending_events(struct atomisp_sub_device *asd);
 
 void atomisp_subdev_unregister_entities(struct atomisp_sub_device *asd);
-int atomisp_subdev_register_entities(struct atomisp_sub_device *asd,
-                                    struct v4l2_device *vdev);
+int atomisp_subdev_register_subdev(struct atomisp_sub_device *asd,
+                                  struct v4l2_device *vdev);
+int atomisp_subdev_register_video_nodes(struct atomisp_sub_device *asd,
+                                       struct v4l2_device *vdev);
 int atomisp_subdev_init(struct atomisp_device *isp);
 void atomisp_subdev_cleanup(struct atomisp_device *isp);
 int atomisp_create_pads_links(struct atomisp_device *isp);
index 643ba98..d5bb990 100644 (file)
@@ -34,7 +34,6 @@
 #include "atomisp_cmd.h"
 #include "atomisp_common.h"
 #include "atomisp_fops.h"
-#include "atomisp_file.h"
 #include "atomisp_ioctl.h"
 #include "atomisp_internal.h"
 #include "atomisp-regs.h"
@@ -442,12 +441,7 @@ int atomisp_video_init(struct atomisp_video_pipe *video, const char *name,
                video->pad.flags = MEDIA_PAD_FL_SINK;
                video->vdev.fops = &atomisp_fops;
                video->vdev.ioctl_ops = &atomisp_ioctl_ops;
-               break;
-       case V4L2_BUF_TYPE_VIDEO_OUTPUT:
-               direction = "input";
-               video->pad.flags = MEDIA_PAD_FL_SOURCE;
-               video->vdev.fops = &atomisp_file_fops;
-               video->vdev.ioctl_ops = &atomisp_file_ioctl_ops;
+               video->vdev.lock = &video->isp->mutex;
                break;
        default:
                return -EINVAL;
@@ -467,18 +461,6 @@ int atomisp_video_init(struct atomisp_video_pipe *video, const char *name,
        return 0;
 }
 
-void atomisp_acc_init(struct atomisp_acc_pipe *video, const char *name)
-{
-       video->vdev.fops = &atomisp_fops;
-       video->vdev.ioctl_ops = &atomisp_ioctl_ops;
-
-       /* Initialize the video device. */
-       snprintf(video->vdev.name, sizeof(video->vdev.name),
-                "ATOMISP ISP %s", name);
-       video->vdev.release = video_device_release_empty;
-       video_set_drvdata(&video->vdev, video->isp);
-}
-
 void atomisp_video_unregister(struct atomisp_video_pipe *video)
 {
        if (video_is_registered(&video->vdev)) {
@@ -487,12 +469,6 @@ void atomisp_video_unregister(struct atomisp_video_pipe *video)
        }
 }
 
-void atomisp_acc_unregister(struct atomisp_acc_pipe *video)
-{
-       if (video_is_registered(&video->vdev))
-               video_unregister_device(&video->vdev);
-}
-
 static int atomisp_save_iunit_reg(struct atomisp_device *isp)
 {
        struct pci_dev *pdev = to_pci_dev(isp->dev);
@@ -1031,7 +1007,6 @@ static int atomisp_subdev_probe(struct atomisp_device *isp)
                            &subdevs->v4l2_subdev.board_info;
                struct i2c_adapter *adapter =
                    i2c_get_adapter(subdevs->v4l2_subdev.i2c_adapter_id);
-               int sensor_num, i;
 
                dev_info(isp->dev, "Probing Subdev %s\n", board_info->type);
 
@@ -1090,22 +1065,7 @@ static int atomisp_subdev_probe(struct atomisp_device *isp)
                         * pixel_format.
                         */
                        isp->inputs[isp->input_cnt].frame_size.pixel_format = 0;
-                       isp->inputs[isp->input_cnt].camera_caps =
-                           atomisp_get_default_camera_caps();
-                       sensor_num = isp->inputs[isp->input_cnt]
-                                    .camera_caps->sensor_num;
                        isp->input_cnt++;
-                       for (i = 1; i < sensor_num; i++) {
-                               if (isp->input_cnt >= ATOM_ISP_MAX_INPUTS) {
-                                       dev_warn(isp->dev,
-                                                "atomisp inputs out of range\n");
-                                       break;
-                               }
-                               isp->inputs[isp->input_cnt] =
-                                   isp->inputs[isp->input_cnt - 1];
-                               isp->inputs[isp->input_cnt].sensor_index = i;
-                               isp->input_cnt++;
-                       }
                        break;
                case CAMERA_MOTOR:
                        if (isp->motor) {
@@ -1158,7 +1118,6 @@ static void atomisp_unregister_entities(struct atomisp_device *isp)
        for (i = 0; i < isp->num_of_streams; i++)
                atomisp_subdev_unregister_entities(&isp->asd[i]);
        atomisp_tpg_unregister_entities(&isp->tpg);
-       atomisp_file_input_unregister_entities(&isp->file_dev);
        for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++)
                atomisp_mipi_csi2_unregister_entities(&isp->csi2_port[i]);
 
@@ -1210,13 +1169,6 @@ static int atomisp_register_entities(struct atomisp_device *isp)
                goto csi_and_subdev_probe_failed;
        }
 
-       ret =
-           atomisp_file_input_register_entities(&isp->file_dev, &isp->v4l2_dev);
-       if (ret < 0) {
-               dev_err(isp->dev, "atomisp_file_input_register_entities\n");
-               goto file_input_register_failed;
-       }
-
        ret = atomisp_tpg_register_entities(&isp->tpg, &isp->v4l2_dev);
        if (ret < 0) {
                dev_err(isp->dev, "atomisp_tpg_register_entities\n");
@@ -1226,10 +1178,9 @@ static int atomisp_register_entities(struct atomisp_device *isp)
        for (i = 0; i < isp->num_of_streams; i++) {
                struct atomisp_sub_device *asd = &isp->asd[i];
 
-               ret = atomisp_subdev_register_entities(asd, &isp->v4l2_dev);
+               ret = atomisp_subdev_register_subdev(asd, &isp->v4l2_dev);
                if (ret < 0) {
-                       dev_err(isp->dev,
-                               "atomisp_subdev_register_entities fail\n");
+                       dev_err(isp->dev, "atomisp_subdev_register_subdev fail\n");
                        for (; i > 0; i--)
                                atomisp_subdev_unregister_entities(
                                    &isp->asd[i - 1]);
@@ -1267,31 +1218,17 @@ static int atomisp_register_entities(struct atomisp_device *isp)
                }
        }
 
-       dev_dbg(isp->dev,
-               "FILE_INPUT enable, camera_cnt: %d\n", isp->input_cnt);
-       isp->inputs[isp->input_cnt].type = FILE_INPUT;
-       isp->inputs[isp->input_cnt].port = -1;
-       isp->inputs[isp->input_cnt].camera_caps =
-           atomisp_get_default_camera_caps();
-       isp->inputs[isp->input_cnt++].camera = &isp->file_dev.sd;
-
        if (isp->input_cnt < ATOM_ISP_MAX_INPUTS) {
                dev_dbg(isp->dev,
                        "TPG detected, camera_cnt: %d\n", isp->input_cnt);
                isp->inputs[isp->input_cnt].type = TEST_PATTERN;
                isp->inputs[isp->input_cnt].port = -1;
-               isp->inputs[isp->input_cnt].camera_caps =
-                   atomisp_get_default_camera_caps();
                isp->inputs[isp->input_cnt++].camera = &isp->tpg.sd;
        } else {
                dev_warn(isp->dev, "too many atomisp inputs, TPG ignored.\n");
        }
 
-       ret = v4l2_device_register_subdev_nodes(&isp->v4l2_dev);
-       if (ret < 0)
-               goto link_failed;
-
-       return media_device_register(&isp->media_dev);
+       return 0;
 
 link_failed:
        for (i = 0; i < isp->num_of_streams; i++)
@@ -1304,8 +1241,6 @@ wq_alloc_failed:
 subdev_register_failed:
        atomisp_tpg_unregister_entities(&isp->tpg);
 tpg_register_failed:
-       atomisp_file_input_unregister_entities(&isp->file_dev);
-file_input_register_failed:
        for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++)
                atomisp_mipi_csi2_unregister_entities(&isp->csi2_port[i]);
 csi_and_subdev_probe_failed:
@@ -1316,6 +1251,27 @@ v4l2_device_failed:
        return ret;
 }
 
+static int atomisp_register_device_nodes(struct atomisp_device *isp)
+{
+       int i, err;
+
+       for (i = 0; i < isp->num_of_streams; i++) {
+               err = atomisp_subdev_register_video_nodes(&isp->asd[i], &isp->v4l2_dev);
+               if (err)
+                       return err;
+       }
+
+       err = atomisp_create_pads_links(isp);
+       if (err)
+               return err;
+
+       err = v4l2_device_register_subdev_nodes(&isp->v4l2_dev);
+       if (err)
+               return err;
+
+       return media_device_register(&isp->media_dev);
+}
+
 static int atomisp_initialize_modules(struct atomisp_device *isp)
 {
        int ret;
@@ -1326,13 +1282,6 @@ static int atomisp_initialize_modules(struct atomisp_device *isp)
                goto error_mipi_csi2;
        }
 
-       ret = atomisp_file_input_init(isp);
-       if (ret < 0) {
-               dev_err(isp->dev,
-                       "file input device initialization failed\n");
-               goto error_file_input;
-       }
-
        ret = atomisp_tpg_init(isp);
        if (ret < 0) {
                dev_err(isp->dev, "tpg initialization failed\n");
@@ -1350,8 +1299,6 @@ static int atomisp_initialize_modules(struct atomisp_device *isp)
 error_isp_subdev:
 error_tpg:
        atomisp_tpg_cleanup(isp);
-error_file_input:
-       atomisp_file_input_cleanup(isp);
 error_mipi_csi2:
        atomisp_mipi_csi2_cleanup(isp);
        return ret;
@@ -1360,7 +1307,6 @@ error_mipi_csi2:
 static void atomisp_uninitialize_modules(struct atomisp_device *isp)
 {
        atomisp_tpg_cleanup(isp);
-       atomisp_file_input_cleanup(isp);
        atomisp_mipi_csi2_cleanup(isp);
 }
 
@@ -1470,39 +1416,6 @@ static bool is_valid_device(struct pci_dev *pdev, const struct pci_device_id *id
        return true;
 }
 
-static int init_atomisp_wdts(struct atomisp_device *isp)
-{
-       int i, err;
-
-       atomic_set(&isp->wdt_work_queued, 0);
-       isp->wdt_work_queue = alloc_workqueue(isp->v4l2_dev.name, 0, 1);
-       if (!isp->wdt_work_queue) {
-               dev_err(isp->dev, "Failed to initialize wdt work queue\n");
-               err = -ENOMEM;
-               goto alloc_fail;
-       }
-       INIT_WORK(&isp->wdt_work, atomisp_wdt_work);
-
-       for (i = 0; i < isp->num_of_streams; i++) {
-               struct atomisp_sub_device *asd = &isp->asd[i];
-
-               if (!IS_ISP2401) {
-                       timer_setup(&asd->wdt, atomisp_wdt, 0);
-               } else {
-                       timer_setup(&asd->video_out_capture.wdt,
-                                   atomisp_wdt, 0);
-                       timer_setup(&asd->video_out_preview.wdt,
-                                   atomisp_wdt, 0);
-                       timer_setup(&asd->video_out_vf.wdt, atomisp_wdt, 0);
-                       timer_setup(&asd->video_out_video_capture.wdt,
-                                   atomisp_wdt, 0);
-               }
-       }
-       return 0;
-alloc_fail:
-       return err;
-}
-
 #define ATOM_ISP_PCI_BAR       0
 
 static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -1551,9 +1464,7 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
 
        dev_dbg(&pdev->dev, "atomisp mmio base: %p\n", isp->base);
 
-       rt_mutex_init(&isp->mutex);
-       rt_mutex_init(&isp->loading);
-       mutex_init(&isp->streamoff_mutex);
+       mutex_init(&isp->mutex);
        spin_lock_init(&isp->lock);
 
        /* This is not a true PCI device on SoC, so the delay is not needed. */
@@ -1725,8 +1636,6 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
                pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, csi_afe_trim);
        }
 
-       rt_mutex_lock(&isp->loading);
-
        err = atomisp_initialize_modules(isp);
        if (err < 0) {
                dev_err(&pdev->dev, "atomisp_initialize_modules (%d)\n", err);
@@ -1738,13 +1647,8 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
                dev_err(&pdev->dev, "atomisp_register_entities failed (%d)\n", err);
                goto register_entities_fail;
        }
-       err = atomisp_create_pads_links(isp);
-       if (err < 0)
-               goto register_entities_fail;
-       /* init atomisp wdts */
-       err = init_atomisp_wdts(isp);
-       if (err != 0)
-               goto wdt_work_queue_fail;
+
+       INIT_WORK(&isp->assert_recovery_work, atomisp_assert_recovery_work);
 
        /* save the iunit context only once after all the values are init'ed. */
        atomisp_save_iunit_reg(isp);
@@ -1777,8 +1681,10 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
        release_firmware(isp->firmware);
        isp->firmware = NULL;
        isp->css_env.isp_css_fw.data = NULL;
-       isp->ready = true;
-       rt_mutex_unlock(&isp->loading);
+
+       err = atomisp_register_device_nodes(isp);
+       if (err)
+               goto css_init_fail;
 
        atomisp_drvfs_init(isp);
 
@@ -1789,13 +1695,10 @@ css_init_fail:
 request_irq_fail:
        hmm_cleanup();
        pm_runtime_get_noresume(&pdev->dev);
-       destroy_workqueue(isp->wdt_work_queue);
-wdt_work_queue_fail:
        atomisp_unregister_entities(isp);
 register_entities_fail:
        atomisp_uninitialize_modules(isp);
 initialize_modules_fail:
-       rt_mutex_unlock(&isp->loading);
        cpu_latency_qos_remove_request(&isp->pm_qos);
        atomisp_msi_irq_uninit(isp);
        pci_free_irq_vectors(pdev);
@@ -1851,9 +1754,6 @@ static void atomisp_pci_remove(struct pci_dev *pdev)
        atomisp_msi_irq_uninit(isp);
        atomisp_unregister_entities(isp);
 
-       destroy_workqueue(isp->wdt_work_queue);
-       atomisp_file_input_cleanup(isp);
-
        release_firmware(isp->firmware);
 }
 
index 72611b8..ccf1c0a 100644 (file)
 #define __ATOMISP_V4L2_H__
 
 struct atomisp_video_pipe;
-struct atomisp_acc_pipe;
 struct v4l2_device;
 struct atomisp_device;
 struct firmware;
 
 int atomisp_video_init(struct atomisp_video_pipe *video, const char *name,
                       unsigned int run_mode);
-void atomisp_acc_init(struct atomisp_acc_pipe *video, const char *name);
 void atomisp_video_unregister(struct atomisp_video_pipe *video);
-void atomisp_acc_unregister(struct atomisp_acc_pipe *video);
 const struct firmware *atomisp_load_firmware(struct atomisp_device *isp);
 int atomisp_csi_lane_config(struct atomisp_device *isp);
 
index f504941..a5fd6d3 100644 (file)
 #include "hmm/hmm_common.h"
 #include "hmm/hmm_bo.h"
 
-static unsigned int order_to_nr(unsigned int order)
-{
-       return 1U << order;
-}
-
-static unsigned int nr_to_order_bottom(unsigned int nr)
-{
-       return fls(nr) - 1;
-}
-
 static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
                     unsigned int pgnr)
 {
@@ -625,136 +615,40 @@ found:
        return bo;
 }
 
-static void free_private_bo_pages(struct hmm_buffer_object *bo,
-                                 int free_pgnr)
+static void free_pages_bulk_array(unsigned long nr_pages, struct page **page_array)
 {
-       int i, ret;
+       unsigned long i;
 
-       for (i = 0; i < free_pgnr; i++) {
-               ret = set_pages_wb(bo->pages[i], 1);
-               if (ret)
-                       dev_err(atomisp_dev,
-                               "set page to WB err ...ret = %d\n",
-                               ret);
-               /*
-               W/A: set_pages_wb seldom return value = -EFAULT
-               indicate that address of page is not in valid
-               range(0xffff880000000000~0xffffc7ffffffffff)
-               then, _free_pages would panic; Do not know why page
-               address be valid,it maybe memory corruption by lowmemory
-               */
-               if (!ret) {
-                       __free_pages(bo->pages[i], 0);
-               }
-       }
+       for (i = 0; i < nr_pages; i++)
+               __free_pages(page_array[i], 0);
+}
+
+static void free_private_bo_pages(struct hmm_buffer_object *bo)
+{
+       set_pages_array_wb(bo->pages, bo->pgnr);
+       free_pages_bulk_array(bo->pgnr, bo->pages);
 }
 
 /*Allocate pages which will be used only by ISP*/
 static int alloc_private_pages(struct hmm_buffer_object *bo)
 {
+       const gfp_t gfp = __GFP_NOWARN | __GFP_RECLAIM | __GFP_FS;
        int ret;
-       unsigned int pgnr, order, blk_pgnr, alloc_pgnr;
-       struct page *pages;
-       gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */
-       int i, j;
-       int failure_number = 0;
-       bool reduce_order = false;
-       bool lack_mem = true;
-
-       pgnr = bo->pgnr;
-
-       i = 0;
-       alloc_pgnr = 0;
-
-       while (pgnr) {
-               order = nr_to_order_bottom(pgnr);
-               /*
-                * if be short of memory, we will set order to 0
-                * everytime.
-                */
-               if (lack_mem)
-                       order = HMM_MIN_ORDER;
-               else if (order > HMM_MAX_ORDER)
-                       order = HMM_MAX_ORDER;
-retry:
-               /*
-                * When order > HMM_MIN_ORDER, for performance reasons we don't
-                * want alloc_pages() to sleep. In case it fails and fallbacks
-                * to HMM_MIN_ORDER or in case the requested order is originally
-                * the minimum value, we can allow alloc_pages() to sleep for
-                * robustness purpose.
-                *
-                * REVISIT: why __GFP_FS is necessary?
-                */
-               if (order == HMM_MIN_ORDER) {
-                       gfp &= ~GFP_NOWAIT;
-                       gfp |= __GFP_RECLAIM | __GFP_FS;
-               }
-
-               pages = alloc_pages(gfp, order);
-               if (unlikely(!pages)) {
-                       /*
-                        * in low memory case, if allocation page fails,
-                        * we turn to try if order=0 allocation could
-                        * succeed. if order=0 fails too, that means there is
-                        * no memory left.
-                        */
-                       if (order == HMM_MIN_ORDER) {
-                               dev_err(atomisp_dev,
-                                       "%s: cannot allocate pages\n",
-                                       __func__);
-                               goto cleanup;
-                       }
-                       order = HMM_MIN_ORDER;
-                       failure_number++;
-                       reduce_order = true;
-                       /*
-                        * if fail two times continuously, we think be short
-                        * of memory now.
-                        */
-                       if (failure_number == 2) {
-                               lack_mem = true;
-                               failure_number = 0;
-                       }
-                       goto retry;
-               } else {
-                       blk_pgnr = order_to_nr(order);
-
-                       /*
-                        * set memory to uncacheable -- UC_MINUS
-                        */
-                       ret = set_pages_uc(pages, blk_pgnr);
-                       if (ret) {
-                               dev_err(atomisp_dev,
-                                       "set page uncacheablefailed.\n");
-
-                               __free_pages(pages, order);
 
-                               goto cleanup;
-                       }
-
-                       for (j = 0; j < blk_pgnr; j++, i++) {
-                               bo->pages[i] = pages + j;
-                       }
-
-                       pgnr -= blk_pgnr;
+       ret = alloc_pages_bulk_array(gfp, bo->pgnr, bo->pages);
+       if (ret != bo->pgnr) {
+               free_pages_bulk_array(ret, bo->pages);
+               return -ENOMEM;
+       }
 
-                       /*
-                        * if order is not reduced this time, clear
-                        * failure_number.
-                        */
-                       if (reduce_order)
-                               reduce_order = false;
-                       else
-                               failure_number = 0;
-               }
+       ret = set_pages_array_uc(bo->pages, bo->pgnr);
+       if (ret) {
+               dev_err(atomisp_dev, "set pages uncacheable failed.\n");
+               free_pages_bulk_array(bo->pgnr, bo->pages);
+               return ret;
        }
 
        return 0;
-cleanup:
-       alloc_pgnr = i;
-       free_private_bo_pages(bo, alloc_pgnr);
-       return -ENOMEM;
 }
 
 static void free_user_pages(struct hmm_buffer_object *bo,
@@ -762,12 +656,8 @@ static void free_user_pages(struct hmm_buffer_object *bo,
 {
        int i;
 
-       if (bo->mem_type == HMM_BO_MEM_TYPE_PFN) {
-               unpin_user_pages(bo->pages, page_nr);
-       } else {
-               for (i = 0; i < page_nr; i++)
-                       put_page(bo->pages[i]);
-       }
+       for (i = 0; i < page_nr; i++)
+               put_page(bo->pages[i]);
 }
 
 /*
@@ -777,43 +667,13 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
                            const void __user *userptr)
 {
        int page_nr;
-       struct vm_area_struct *vma;
-
-       mutex_unlock(&bo->mutex);
-       mmap_read_lock(current->mm);
-       vma = find_vma(current->mm, (unsigned long)userptr);
-       mmap_read_unlock(current->mm);
-       if (!vma) {
-               dev_err(atomisp_dev, "find_vma failed\n");
-               mutex_lock(&bo->mutex);
-               return -EFAULT;
-       }
-       mutex_lock(&bo->mutex);
-       /*
-        * Handle frame buffer allocated in other kerenl space driver
-        * and map to user space
-        */
 
        userptr = untagged_addr(userptr);
 
-       if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
-               page_nr = pin_user_pages((unsigned long)userptr, bo->pgnr,
-                                        FOLL_LONGTERM | FOLL_WRITE,
-                                        bo->pages, NULL);
-               bo->mem_type = HMM_BO_MEM_TYPE_PFN;
-       } else {
-               /*Handle frame buffer allocated in user space*/
-               mutex_unlock(&bo->mutex);
-               page_nr = get_user_pages_fast((unsigned long)userptr,
-                                             (int)(bo->pgnr), 1, bo->pages);
-               mutex_lock(&bo->mutex);
-               bo->mem_type = HMM_BO_MEM_TYPE_USER;
-       }
-
-       dev_dbg(atomisp_dev, "%s: %d %s pages were allocated as 0x%08x\n",
-               __func__,
-               bo->pgnr,
-               bo->mem_type == HMM_BO_MEM_TYPE_USER ? "user" : "pfn", page_nr);
+       /* Handle frame buffer allocated in user space */
+       mutex_unlock(&bo->mutex);
+       page_nr = get_user_pages_fast((unsigned long)userptr, bo->pgnr, 1, bo->pages);
+       mutex_lock(&bo->mutex);
 
        /* can be written by caller, not forced */
        if (page_nr != bo->pgnr) {
@@ -854,7 +714,7 @@ int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
        mutex_lock(&bo->mutex);
        check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
 
-       bo->pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
+       bo->pages = kcalloc(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
        if (unlikely(!bo->pages)) {
                ret = -ENOMEM;
                goto alloc_err;
@@ -910,7 +770,7 @@ void hmm_bo_free_pages(struct hmm_buffer_object *bo)
        bo->status &= (~HMM_BO_PAGE_ALLOCED);
 
        if (bo->type == HMM_BO_PRIVATE)
-               free_private_bo_pages(bo, bo->pgnr);
+               free_private_bo_pages(bo);
        else if (bo->type == HMM_BO_USER)
                free_user_pages(bo, bo->pgnr);
        else
index 0e7c38b..67915d7 100644 (file)
@@ -950,8 +950,8 @@ sh_css_set_black_frame(struct ia_css_stream *stream,
                params->fpn_config.data = NULL;
        }
        if (!params->fpn_config.data) {
-               params->fpn_config.data = kvmalloc(height * width *
-                                                  sizeof(short), GFP_KERNEL);
+               params->fpn_config.data = kvmalloc(array3_size(height, width, sizeof(short)),
+                                                  GFP_KERNEL);
                if (!params->fpn_config.data) {
                        IA_CSS_ERROR("out of memory");
                        IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
index 294c808..3e74621 100644 (file)
@@ -863,16 +863,16 @@ int imx_media_pipeline_set_stream(struct imx_media_dev *imxmd,
        mutex_lock(&imxmd->md.graph_mutex);
 
        if (on) {
-               ret = __media_pipeline_start(entity, &imxmd->pipe);
+               ret = __media_pipeline_start(entity->pads, &imxmd->pipe);
                if (ret)
                        goto out;
                ret = v4l2_subdev_call(sd, video, s_stream, 1);
                if (ret)
-                       __media_pipeline_stop(entity);
+                       __media_pipeline_stop(entity->pads);
        } else {
                v4l2_subdev_call(sd, video, s_stream, 0);
-               if (entity->pipe)
-                       __media_pipeline_stop(entity);
+               if (media_pad_pipeline(entity->pads))
+                       __media_pipeline_stop(entity->pads);
        }
 
 out:
index cbc66ef..e5b550c 100644 (file)
@@ -1360,7 +1360,7 @@ static int imx7_csi_video_start_streaming(struct vb2_queue *vq,
 
        mutex_lock(&csi->mdev.graph_mutex);
 
-       ret = __media_pipeline_start(&csi->sd.entity, &csi->pipe);
+       ret = __video_device_pipeline_start(csi->vdev, &csi->pipe);
        if (ret)
                goto err_unlock;
 
@@ -1373,7 +1373,7 @@ static int imx7_csi_video_start_streaming(struct vb2_queue *vq,
        return 0;
 
 err_stop:
-       __media_pipeline_stop(&csi->sd.entity);
+       __video_device_pipeline_stop(csi->vdev);
 err_unlock:
        mutex_unlock(&csi->mdev.graph_mutex);
        dev_err(csi->dev, "pipeline start failed with %d\n", ret);
@@ -1396,7 +1396,7 @@ static void imx7_csi_video_stop_streaming(struct vb2_queue *vq)
 
        mutex_lock(&csi->mdev.graph_mutex);
        v4l2_subdev_call(&csi->sd, video, s_stream, 0);
-       __media_pipeline_stop(&csi->sd.entity);
+       __video_device_pipeline_stop(csi->vdev);
        mutex_unlock(&csi->mdev.graph_mutex);
 
        /* release all active buffers */
index dbdd015..caa358e 100644 (file)
@@ -626,8 +626,11 @@ struct ipu3_uapi_stats_3a {
  * @b: white balance gain for B channel.
  * @gb:        white balance gain for Gb channel.
  *
- * Precision u3.13, range [0, 8). White balance correction is done by applying
- * a multiplicative gain to each color channels prior to BNR.
+ * For BNR parameters WB gain factor for the three channels [Ggr, Ggb, Gb, Gr].
+ * Their precision is U3.13 and the range is (0, 8) and the actual gain is
+ * Gx + 1, it is typically Gx = 1.
+ *
+ * Pout = {Pin * (1 + Gx)}.
  */
 struct ipu3_uapi_bnr_static_config_wb_gains_config {
        __u16 gr;
index d1c539c..ce13e74 100644 (file)
@@ -192,33 +192,30 @@ static int imgu_subdev_get_selection(struct v4l2_subdev *sd,
                                     struct v4l2_subdev_state *sd_state,
                                     struct v4l2_subdev_selection *sel)
 {
-       struct v4l2_rect *try_sel, *r;
-       struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
-                                                       struct imgu_v4l2_subdev,
-                                                       subdev);
+       struct imgu_v4l2_subdev *imgu_sd =
+               container_of(sd, struct imgu_v4l2_subdev, subdev);
 
        if (sel->pad != IMGU_NODE_IN)
                return -EINVAL;
 
        switch (sel->target) {
        case V4L2_SEL_TGT_CROP:
-               try_sel = v4l2_subdev_get_try_crop(sd, sd_state, sel->pad);
-               r = &imgu_sd->rect.eff;
-               break;
+               if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
+                       sel->r = *v4l2_subdev_get_try_crop(sd, sd_state,
+                                                          sel->pad);
+               else
+                       sel->r = imgu_sd->rect.eff;
+               return 0;
        case V4L2_SEL_TGT_COMPOSE:
-               try_sel = v4l2_subdev_get_try_compose(sd, sd_state, sel->pad);
-               r = &imgu_sd->rect.bds;
-               break;
+               if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
+                       sel->r = *v4l2_subdev_get_try_compose(sd, sd_state,
+                                                             sel->pad);
+               else
+                       sel->r = imgu_sd->rect.bds;
+               return 0;
        default:
                return -EINVAL;
        }
-
-       if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
-               sel->r = *try_sel;
-       else
-               sel->r = *r;
-
-       return 0;
 }
 
 static int imgu_subdev_set_selection(struct v4l2_subdev *sd,
@@ -486,7 +483,7 @@ static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
        pipe = node->pipe;
        imgu_pipe = &imgu->imgu_pipe[pipe];
        atomic_set(&node->sequence, 0);
-       r = media_pipeline_start(&node->vdev.entity, &imgu_pipe->pipeline);
+       r = video_device_pipeline_start(&node->vdev, &imgu_pipe->pipeline);
        if (r < 0)
                goto fail_return_bufs;
 
@@ -511,7 +508,7 @@ static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
        return 0;
 
 fail_stop_pipeline:
-       media_pipeline_stop(&node->vdev.entity);
+       video_device_pipeline_stop(&node->vdev);
 fail_return_bufs:
        imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_QUEUED);
 
@@ -551,7 +548,7 @@ static void imgu_vb2_stop_streaming(struct vb2_queue *vq)
        imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_ERROR);
        mutex_unlock(&imgu->streaming_lock);
 
-       media_pipeline_stop(&node->vdev.entity);
+       video_device_pipeline_stop(&node->vdev);
 }
 
 /******************** v4l2_ioctl_ops ********************/
index 8549d95..52f224d 100644 (file)
@@ -1102,6 +1102,7 @@ static int vdec_probe(struct platform_device *pdev)
 
 err_vdev_release:
        video_device_release(vdev);
+       v4l2_device_unregister(&core->v4l2_dev);
        return ret;
 }
 
@@ -1110,6 +1111,7 @@ static int vdec_remove(struct platform_device *pdev)
        struct amvdec_core *core = platform_get_drvdata(pdev);
 
        video_unregister_device(core->vdev_dec);
+       v4l2_device_unregister(&core->v4l2_dev);
 
        return 0;
 }
index 28aacda..fa2a36d 100644 (file)
@@ -548,10 +548,8 @@ static int iss_pipeline_is_last(struct media_entity *me)
        struct iss_pipeline *pipe;
        struct media_pad *pad;
 
-       if (!me->pipe)
-               return 0;
        pipe = to_iss_pipeline(me);
-       if (pipe->stream_state == ISS_PIPELINE_STREAM_STOPPED)
+       if (!pipe || pipe->stream_state == ISS_PIPELINE_STREAM_STOPPED)
                return 0;
        pad = media_pad_remote_pad_first(&pipe->output->pad);
        return pad->entity == me;
index 842509d..60f3d84 100644 (file)
@@ -870,8 +870,7 @@ iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
         * Start streaming on the pipeline. No link touching an entity in the
         * pipeline can be activated or deactivated once streaming is started.
         */
-       pipe = entity->pipe
-            ? to_iss_pipeline(entity) : &video->pipe;
+       pipe = to_iss_pipeline(&video->video.entity) ? : &video->pipe;
        pipe->external = NULL;
        pipe->external_rate = 0;
        pipe->external_bpp = 0;
@@ -887,7 +886,7 @@ iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
        if (video->iss->pdata->set_constraints)
                video->iss->pdata->set_constraints(video->iss, true);
 
-       ret = media_pipeline_start(entity, &pipe->pipe);
+       ret = video_device_pipeline_start(&video->video, &pipe->pipe);
        if (ret < 0)
                goto err_media_pipeline_start;
 
@@ -978,7 +977,7 @@ iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
 err_omap4iss_set_stream:
        vb2_streamoff(&vfh->queue, type);
 err_iss_video_check_format:
-       media_pipeline_stop(&video->video.entity);
+       video_device_pipeline_stop(&video->video);
 err_media_pipeline_start:
        if (video->iss->pdata->set_constraints)
                video->iss->pdata->set_constraints(video->iss, false);
@@ -1032,7 +1031,7 @@ iss_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
 
        if (video->iss->pdata->set_constraints)
                video->iss->pdata->set_constraints(video->iss, false);
-       media_pipeline_stop(&video->video.entity);
+       video_device_pipeline_stop(&video->video);
 
 done:
        mutex_unlock(&video->stream_lock);
index 526281b..ca2d5ed 100644 (file)
@@ -90,8 +90,15 @@ struct iss_pipeline {
        int external_bpp;
 };
 
-#define to_iss_pipeline(__e) \
-       container_of((__e)->pipe, struct iss_pipeline, pipe)
+static inline struct iss_pipeline *to_iss_pipeline(struct media_entity *entity)
+{
+       struct media_pipeline *pipe = media_entity_pipeline(entity);
+
+       if (!pipe)
+               return NULL;
+
+       return container_of(pipe, struct iss_pipeline, pipe);
+}
 
 static inline int iss_pipeline_ready(struct iss_pipeline *pipe)
 {
index 21c13f9..621944f 100644 (file)
@@ -2,6 +2,7 @@
 config VIDEO_SUNXI_CEDRUS
        tristate "Allwinner Cedrus VPU driver"
        depends on VIDEO_DEV
+       depends on RESET_CONTROLLER
        depends on HAS_DMA
        depends on OF
        select MEDIA_CONTROLLER
index f10a041..d58370a 100644 (file)
@@ -547,7 +547,7 @@ static int tegra210_vi_start_streaming(struct vb2_queue *vq, u32 count)
                       VI_INCR_SYNCPT_NO_STALL);
 
        /* start the pipeline */
-       ret = media_pipeline_start(&chan->video.entity, pipe);
+       ret = video_device_pipeline_start(&chan->video, pipe);
        if (ret < 0)
                goto error_pipeline_start;
 
@@ -595,7 +595,7 @@ error_kthread_done:
 error_kthread_start:
        tegra_channel_set_stream(chan, false);
 error_set_stream:
-       media_pipeline_stop(&chan->video.entity);
+       video_device_pipeline_stop(&chan->video);
 error_pipeline_start:
        tegra_channel_release_buffers(chan, VB2_BUF_STATE_QUEUED);
        return ret;
@@ -617,7 +617,7 @@ static void tegra210_vi_stop_streaming(struct vb2_queue *vq)
 
        tegra_channel_release_buffers(chan, VB2_BUF_STATE_ERROR);
        tegra_channel_set_stream(chan, false);
-       media_pipeline_stop(&chan->video.entity);
+       video_device_pipeline_stop(&chan->video);
 }
 
 /*
index b7f16ee..cb4f7cc 100644 (file)
@@ -284,6 +284,25 @@ void target_pr_kref_release(struct kref *kref)
        complete(&deve->pr_comp);
 }
 
+/*
+ * Establish UA condition on SCSI device - all LUNs
+ */
+void target_dev_ua_allocate(struct se_device *dev, u8 asc, u8 ascq)
+{
+       struct se_dev_entry *se_deve;
+       struct se_lun *lun;
+
+       spin_lock(&dev->se_port_lock);
+       list_for_each_entry(lun, &dev->dev_sep_list, lun_dev_link) {
+
+               spin_lock(&lun->lun_deve_lock);
+               list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link)
+                       core_scsi3_ua_allocate(se_deve, asc, ascq);
+               spin_unlock(&lun->lun_deve_lock);
+       }
+       spin_unlock(&dev->se_port_lock);
+}
+
 static void
 target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
                             bool skip_new)
index 8351c97..d9266cf 100644 (file)
@@ -230,14 +230,12 @@ static void iblock_unplug_device(struct se_dev_plug *se_plug)
        clear_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags);
 }
 
-static unsigned long long iblock_emulate_read_cap_with_block_size(
-       struct se_device *dev,
-       struct block_device *bd,
-       struct request_queue *q)
+static sector_t iblock_get_blocks(struct se_device *dev)
 {
-       u32 block_size = bdev_logical_block_size(bd);
+       struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+       u32 block_size = bdev_logical_block_size(ib_dev->ibd_bd);
        unsigned long long blocks_long =
-               div_u64(bdev_nr_bytes(bd), block_size) - 1;
+               div_u64(bdev_nr_bytes(ib_dev->ibd_bd), block_size) - 1;
 
        if (block_size == dev->dev_attrib.block_size)
                return blocks_long;
@@ -829,15 +827,6 @@ fail:
        return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 }
 
-static sector_t iblock_get_blocks(struct se_device *dev)
-{
-       struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
-       struct block_device *bd = ib_dev->ibd_bd;
-       struct request_queue *q = bdev_get_queue(bd);
-
-       return iblock_emulate_read_cap_with_block_size(dev, bd, q);
-}
-
 static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
 {
        struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
index 30fcf69..38a6d08 100644 (file)
@@ -89,6 +89,7 @@ int   target_configure_device(struct se_device *dev);
 void   target_free_device(struct se_device *);
 int    target_for_each_device(int (*fn)(struct se_device *dev, void *data),
                               void *data);
+void   target_dev_ua_allocate(struct se_device *dev, u8 asc, u8 ascq);
 
 /* target_core_configfs.c */
 extern struct configfs_item_operations target_core_dev_item_ops;
index a1d6755..1493b1d 100644 (file)
@@ -2956,13 +2956,28 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
                        __core_scsi3_complete_pro_preempt(dev, pr_reg_n,
                                (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL,
                                type, scope, preempt_type);
-
-                       if (preempt_type == PREEMPT_AND_ABORT)
-                               core_scsi3_release_preempt_and_abort(
-                                       &preempt_and_abort_list, pr_reg_n);
                }
+
                spin_unlock(&dev->dev_reservation_lock);
 
+               /*
+                * SPC-4 5.12.11.2.6 Preempting and aborting
+                * The actions described in this subclause shall be performed
+                * for all I_T nexuses that are registered with the non-zero
+                * SERVICE ACTION RESERVATION KEY value, without regard for
+                * whether the preempted I_T nexuses hold the persistent
+                * reservation. If the SERVICE ACTION RESERVATION KEY field is
+                * set to zero and an all registrants persistent reservation is
+                * present, the device server shall abort all commands for all
+                * registered I_T nexuses.
+                */
+               if (preempt_type == PREEMPT_AND_ABORT) {
+                       core_tmr_lun_reset(dev, NULL, &preempt_and_abort_list,
+                                          cmd);
+                       core_scsi3_release_preempt_and_abort(
+                               &preempt_and_abort_list, pr_reg_n);
+               }
+
                if (pr_tmpl->pr_aptpl_active)
                        core_scsi3_update_and_write_aptpl(cmd->se_dev, true);
 
@@ -3022,7 +3037,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
                if (calling_it_nexus)
                        continue;
 
-               if (pr_reg->pr_res_key != sa_res_key)
+               if (sa_res_key && pr_reg->pr_res_key != sa_res_key)
                        continue;
 
                pr_reg_nacl = pr_reg->pr_reg_nacl;
@@ -3425,8 +3440,6 @@ after_iport_check:
         *       transport protocols where port names are not required;
         * d) Register the reservation key specified in the SERVICE ACTION
         *    RESERVATION KEY field;
-        * e) Retain the reservation key specified in the SERVICE ACTION
-        *    RESERVATION KEY field and associated information;
         *
         * Also, It is not an error for a REGISTER AND MOVE service action to
         * register an I_T nexus that is already registered with the same
@@ -3448,6 +3461,12 @@ after_iport_check:
                dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
                                                iport_ptr);
                new_reg = 1;
+       } else {
+               /*
+                * e) Retain the reservation key specified in the SERVICE ACTION
+                *    RESERVATION KEY field and associated information;
+                */
+               dest_pr_reg->pr_res_key = sa_res_key;
        }
        /*
         * f) Release the persistent reservation for the persistent reservation
index 7838dc2..5926316 100644 (file)
@@ -3531,8 +3531,7 @@ static void target_tmr_work(struct work_struct *work)
                tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
                                         TMR_FUNCTION_REJECTED;
                if (tmr->response == TMR_FUNCTION_COMPLETE) {
-                       target_ua_allocate_lun(cmd->se_sess->se_node_acl,
-                                              cmd->orig_fe_lun, 0x29,
+                       target_dev_ua_allocate(dev, 0x29,
                                               ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED);
                }
                break;
index 2a5570b..b80e25e 100644 (file)
@@ -516,11 +516,7 @@ static int start_power_clamp(void)
        cpus_read_lock();
 
        /* prefer BSP */
-       control_cpu = 0;
-       if (!cpu_online(control_cpu)) {
-               control_cpu = get_cpu();
-               put_cpu();
-       }
+       control_cpu = cpumask_first(cpu_online_mask);
 
        clamping = true;
        schedule_delayed_work(&poll_pkg_cstate_work, 0);
index 7256e6c..b1f59a5 100644 (file)
@@ -772,7 +772,7 @@ static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask)
 }
 
 /**
- * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
+ * ufshcd_utmrl_clear - Clear a bit in UTMRLCLR register
  * @hba: per adapter instance
  * @pos: position of the bit to be cleared
  */
@@ -3098,7 +3098,7 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba,
 
        if (ret)
                dev_err(hba->dev,
-                       "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retries\n",
+                       "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n",
                        __func__, opcode, idn, ret, retries);
        return ret;
 }
index 3d69a81..b7f412d 100644 (file)
@@ -383,7 +383,7 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
        rgn = hpb->rgn_tbl + rgn_idx;
        srgn = rgn->srgn_tbl + srgn_idx;
 
-       /* If command type is WRITE or DISCARD, set bitmap as drity */
+       /* If command type is WRITE or DISCARD, set bitmap as dirty */
        if (ufshpb_is_write_or_discard(cmd)) {
                ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
                                   transfer_len, true);
@@ -616,7 +616,7 @@ static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
 static enum rq_end_io_ret ufshpb_umap_req_compl_fn(struct request *req,
                                                   blk_status_t error)
 {
-       struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
+       struct ufshpb_req *umap_req = req->end_io_data;
 
        ufshpb_put_req(umap_req->hpb, umap_req);
        return RQ_END_IO_NONE;
@@ -625,7 +625,7 @@ static enum rq_end_io_ret ufshpb_umap_req_compl_fn(struct request *req,
 static enum rq_end_io_ret ufshpb_map_req_compl_fn(struct request *req,
                                                  blk_status_t error)
 {
-       struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
+       struct ufshpb_req *map_req = req->end_io_data;
        struct ufshpb_lu *hpb = map_req->hpb;
        struct ufshpb_subregion *srgn;
        unsigned long flags;
index 745e48e..62387cc 100644 (file)
@@ -118,7 +118,6 @@ int ufs_qcom_ice_init(struct ufs_qcom_host *host)
        host->ice_mmio = devm_ioremap_resource(dev, res);
        if (IS_ERR(host->ice_mmio)) {
                err = PTR_ERR(host->ice_mmio);
-               dev_err(dev, "Failed to map ICE registers; err=%d\n", err);
                return err;
        }
 
index ea51624..c0e7c76 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/of.h>
+#include <linux/of_graph.h>
 #include <linux/acpi.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/reset.h>
@@ -85,7 +86,7 @@ static int dwc3_get_dr_mode(struct dwc3 *dwc)
                 * mode. If the controller supports DRD but the dr_mode is not
                 * specified or set to OTG, then set the mode to peripheral.
                 */
-               if (mode == USB_DR_MODE_OTG &&
+               if (mode == USB_DR_MODE_OTG && !dwc->edev &&
                    (!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) ||
                     !device_property_read_bool(dwc->dev, "usb-role-switch")) &&
                    !DWC3_VER_IS_PRIOR(DWC3, 330A))
@@ -1690,6 +1691,46 @@ static void dwc3_check_params(struct dwc3 *dwc)
        }
 }
 
+static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
+{
+       struct device *dev = dwc->dev;
+       struct device_node *np_phy;
+       struct extcon_dev *edev = NULL;
+       const char *name;
+
+       if (device_property_read_bool(dev, "extcon"))
+               return extcon_get_edev_by_phandle(dev, 0);
+
+       /*
+        * Device tree platforms should get extcon via phandle.
+        * On ACPI platforms, we get the name from a device property.
+        * This device property is for kernel internal use only and
+        * is expected to be set by the glue code.
+        */
+       if (device_property_read_string(dev, "linux,extcon-name", &name) == 0)
+               return extcon_get_extcon_dev(name);
+
+       /*
+        * Try to get an extcon device from the USB PHY controller's "port"
+        * node. Check if it has the "port" node first, to avoid printing the
+        * error message from underlying code, as it's a valid case: extcon
+        * device (and "port" node) may be missing in case of "usb-role-switch"
+        * or OTG mode.
+        */
+       np_phy = of_parse_phandle(dev->of_node, "phys", 0);
+       if (of_graph_is_present(np_phy)) {
+               struct device_node *np_conn;
+
+               np_conn = of_graph_get_remote_node(np_phy, -1, -1);
+               if (np_conn)
+                       edev = extcon_find_edev_by_node(np_conn);
+               of_node_put(np_conn);
+       }
+       of_node_put(np_phy);
+
+       return edev;
+}
+
 static int dwc3_probe(struct platform_device *pdev)
 {
        struct device           *dev = &pdev->dev;
@@ -1840,6 +1881,12 @@ static int dwc3_probe(struct platform_device *pdev)
                goto err2;
        }
 
+       dwc->edev = dwc3_get_extcon(dwc);
+       if (IS_ERR(dwc->edev)) {
+               ret = dev_err_probe(dwc->dev, PTR_ERR(dwc->edev), "failed to get extcon\n");
+               goto err3;
+       }
+
        ret = dwc3_get_dr_mode(dwc);
        if (ret)
                goto err3;
index 8cad9e7..039bf24 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <linux/extcon.h>
-#include <linux/of_graph.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/property.h>
@@ -439,51 +438,6 @@ static int dwc3_drd_notifier(struct notifier_block *nb,
        return NOTIFY_DONE;
 }
 
-static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
-{
-       struct device *dev = dwc->dev;
-       struct device_node *np_phy;
-       struct extcon_dev *edev = NULL;
-       const char *name;
-
-       if (device_property_read_bool(dev, "extcon"))
-               return extcon_get_edev_by_phandle(dev, 0);
-
-       /*
-        * Device tree platforms should get extcon via phandle.
-        * On ACPI platforms, we get the name from a device property.
-        * This device property is for kernel internal use only and
-        * is expected to be set by the glue code.
-        */
-       if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) {
-               edev = extcon_get_extcon_dev(name);
-               if (!edev)
-                       return ERR_PTR(-EPROBE_DEFER);
-
-               return edev;
-       }
-
-       /*
-        * Try to get an extcon device from the USB PHY controller's "port"
-        * node. Check if it has the "port" node first, to avoid printing the
-        * error message from underlying code, as it's a valid case: extcon
-        * device (and "port" node) may be missing in case of "usb-role-switch"
-        * or OTG mode.
-        */
-       np_phy = of_parse_phandle(dev->of_node, "phys", 0);
-       if (of_graph_is_present(np_phy)) {
-               struct device_node *np_conn;
-
-               np_conn = of_graph_get_remote_node(np_phy, -1, -1);
-               if (np_conn)
-                       edev = extcon_find_edev_by_node(np_conn);
-               of_node_put(np_conn);
-       }
-       of_node_put(np_phy);
-
-       return edev;
-}
-
 #if IS_ENABLED(CONFIG_USB_ROLE_SWITCH)
 #define ROLE_SWITCH 1
 static int dwc3_usb_role_switch_set(struct usb_role_switch *sw,
@@ -588,10 +542,6 @@ int dwc3_drd_init(struct dwc3 *dwc)
            device_property_read_bool(dwc->dev, "usb-role-switch"))
                return dwc3_setup_role_switch(dwc);
 
-       dwc->edev = dwc3_get_extcon(dwc);
-       if (IS_ERR(dwc->edev))
-               return PTR_ERR(dwc->edev);
-
        if (dwc->edev) {
                dwc->edev_nb.notifier_call = dwc3_drd_notifier;
                ret = extcon_register_notifier(dwc->edev, EXTCON_USB_HOST,
index 6c14a79..fea5290 100644 (file)
@@ -251,7 +251,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
        /* Manage SoftReset */
        reset_control_deassert(dwc3_data->rstc_rst);
 
-       child = of_get_child_by_name(node, "usb");
+       child = of_get_compatible_child(node, "snps,dwc3");
        if (!child) {
                dev_err(&pdev->dev, "failed to find dwc3 core node\n");
                ret = -ENODEV;
index 079cd33..5fe2d13 100644 (file)
@@ -1292,8 +1292,8 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
                        trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
                }
 
-               /* always enable Interrupt on Missed ISOC */
-               trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
+               if (!no_interrupt && !chain)
+                       trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
                break;
 
        case USB_ENDPOINT_XFER_BULK:
@@ -1698,6 +1698,16 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int
        cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
        memset(&params, 0, sizeof(params));
        ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+       /*
+        * If the End Transfer command was timed out while the device is
+        * not in SETUP phase, it's possible that an incoming Setup packet
+        * may prevent the command's completion. Let's retry when the
+        * ep0state returns to EP0_SETUP_PHASE.
+        */
+       if (ret == -ETIMEDOUT && dep->dwc->ep0state != EP0_SETUP_PHASE) {
+               dep->flags |= DWC3_EP_DELAY_STOP;
+               return 0;
+       }
        WARN_ON_ONCE(ret);
        dep->resource_index = 0;
 
@@ -3238,6 +3248,10 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
        if (event->status & DEPEVT_STATUS_SHORT && !chain)
                return 1;
 
+       if ((trb->ctrl & DWC3_TRB_CTRL_ISP_IMI) &&
+           DWC3_TRB_SIZE_TRBSTS(trb->size) == DWC3_TRBSTS_MISSED_ISOC)
+               return 1;
+
        if ((trb->ctrl & DWC3_TRB_CTRL_IOC) ||
            (trb->ctrl & DWC3_TRB_CTRL_LST))
                return 1;
@@ -3719,7 +3733,7 @@ void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
         * timeout. Delay issuing the End Transfer command until the Setup TRB is
         * prepared.
         */
-       if (dwc->ep0state != EP0_SETUP_PHASE) {
+       if (dwc->ep0state != EP0_SETUP_PHASE && !dwc->delayed_status) {
                dep->flags |= DWC3_EP_DELAY_STOP;
                return;
        }
index ec500ee..0aa3d7e 100644 (file)
@@ -304,6 +304,7 @@ int uvcg_queue_enable(struct uvc_video_queue *queue, int enable)
 
                queue->sequence = 0;
                queue->buf_used = 0;
+               queue->flags &= ~UVC_QUEUE_DROP_INCOMPLETE;
        } else {
                ret = vb2_streamoff(&queue->queue, queue->queue.type);
                if (ret < 0)
@@ -329,10 +330,11 @@ int uvcg_queue_enable(struct uvc_video_queue *queue, int enable)
 void uvcg_complete_buffer(struct uvc_video_queue *queue,
                                          struct uvc_buffer *buf)
 {
-       if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) &&
-            buf->length != buf->bytesused) {
-               buf->state = UVC_BUF_STATE_QUEUED;
+       if (queue->flags & UVC_QUEUE_DROP_INCOMPLETE) {
+               queue->flags &= ~UVC_QUEUE_DROP_INCOMPLETE;
+               buf->state = UVC_BUF_STATE_ERROR;
                vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0);
+               vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
                return;
        }
 
index bb037fc..dd1c6b2 100644 (file)
@@ -88,6 +88,7 @@ uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video,
                struct uvc_buffer *buf)
 {
        void *mem = req->buf;
+       struct uvc_request *ureq = req->context;
        int len = video->req_size;
        int ret;
 
@@ -113,13 +114,14 @@ uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video,
                video->queue.buf_used = 0;
                buf->state = UVC_BUF_STATE_DONE;
                list_del(&buf->queue);
-               uvcg_complete_buffer(&video->queue, buf);
                video->fid ^= UVC_STREAM_FID;
+               ureq->last_buf = buf;
 
                video->payload_size = 0;
        }
 
        if (video->payload_size == video->max_payload_size ||
+           video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE ||
            buf->bytesused == video->queue.buf_used)
                video->payload_size = 0;
 }
@@ -155,10 +157,10 @@ uvc_video_encode_isoc_sg(struct usb_request *req, struct uvc_video *video,
        sg = sg_next(sg);
 
        for_each_sg(sg, iter, ureq->sgt.nents - 1, i) {
-               if (!len || !buf->sg || !sg_dma_len(buf->sg))
+               if (!len || !buf->sg || !buf->sg->length)
                        break;
 
-               sg_left = sg_dma_len(buf->sg) - buf->offset;
+               sg_left = buf->sg->length - buf->offset;
                part = min_t(unsigned int, len, sg_left);
 
                sg_set_page(iter, sg_page(buf->sg), part, buf->offset);
@@ -180,7 +182,8 @@ uvc_video_encode_isoc_sg(struct usb_request *req, struct uvc_video *video,
        req->length -= len;
        video->queue.buf_used += req->length - header_len;
 
-       if (buf->bytesused == video->queue.buf_used || !buf->sg) {
+       if (buf->bytesused == video->queue.buf_used || !buf->sg ||
+                       video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE) {
                video->queue.buf_used = 0;
                buf->state = UVC_BUF_STATE_DONE;
                buf->offset = 0;
@@ -195,6 +198,7 @@ uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
                struct uvc_buffer *buf)
 {
        void *mem = req->buf;
+       struct uvc_request *ureq = req->context;
        int len = video->req_size;
        int ret;
 
@@ -209,12 +213,13 @@ uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
 
        req->length = video->req_size - len;
 
-       if (buf->bytesused == video->queue.buf_used) {
+       if (buf->bytesused == video->queue.buf_used ||
+                       video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE) {
                video->queue.buf_used = 0;
                buf->state = UVC_BUF_STATE_DONE;
                list_del(&buf->queue);
-               uvcg_complete_buffer(&video->queue, buf);
                video->fid ^= UVC_STREAM_FID;
+               ureq->last_buf = buf;
        }
 }
 
@@ -255,6 +260,11 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
        case 0:
                break;
 
+       case -EXDEV:
+               uvcg_dbg(&video->uvc->func, "VS request missed xfer.\n");
+               queue->flags |= UVC_QUEUE_DROP_INCOMPLETE;
+               break;
+
        case -ESHUTDOWN:        /* disconnect from host. */
                uvcg_dbg(&video->uvc->func, "VS request cancelled.\n");
                uvcg_queue_cancel(queue, 1);
@@ -431,7 +441,8 @@ static void uvcg_video_pump(struct work_struct *work)
 
                /* Endpoint now owns the request */
                req = NULL;
-               video->req_int_count++;
+               if (buf->state != UVC_BUF_STATE_DONE)
+                       video->req_int_count++;
        }
 
        if (!req)
index b0dfca4..4f3bc27 100644 (file)
@@ -591,6 +591,7 @@ int ast_vhub_init_dev(struct ast_vhub *vhub, unsigned int idx)
                d->gadget.max_speed = USB_SPEED_HIGH;
        d->gadget.speed = USB_SPEED_UNKNOWN;
        d->gadget.dev.of_node = vhub->pdev->dev.of_node;
+       d->gadget.dev.of_node_reused = true;
 
        rc = usb_add_gadget_udc(d->port_dev, &d->gadget);
        if (rc != 0)
index 5ac0ef8..53ffaf4 100644 (file)
@@ -151,6 +151,7 @@ static void bdc_uspc_disconnected(struct bdc *bdc, bool reinit)
        bdc->delayed_status = false;
        bdc->reinit = reinit;
        bdc->test_mode = false;
+       usb_gadget_set_state(&bdc->gadget, USB_STATE_NOTATTACHED);
 }
 
 /* TNotify wkaeup timer */
index 9e56aa2..81ca2bc 100644 (file)
@@ -889,15 +889,19 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
                if (dev->eps[i].stream_info)
                        xhci_free_stream_info(xhci,
                                        dev->eps[i].stream_info);
-               /* Endpoints on the TT/root port lists should have been removed
-                * when usb_disable_device() was called for the device.
-                * We can't drop them anyway, because the udev might have gone
-                * away by this point, and we can't tell what speed it was.
+               /*
+                * Endpoints are normally deleted from the bandwidth list when
+                * endpoints are dropped, before device is freed.
+                * If host is dying or being removed then endpoints aren't
+                * dropped cleanly, so delete the endpoint from list here.
+                * Only applicable for hosts with software bandwidth checking.
                 */
-               if (!list_empty(&dev->eps[i].bw_endpoint_list))
-                       xhci_warn(xhci, "Slot %u endpoint %u "
-                                       "not removed from BW list!\n",
-                                       slot_id, i);
+
+               if (!list_empty(&dev->eps[i].bw_endpoint_list)) {
+                       list_del_init(&dev->eps[i].bw_endpoint_list);
+                       xhci_dbg(xhci, "Slot %u endpoint %u not removed from BW list!\n",
+                                slot_id, i);
+               }
        }
        /* If this is a hub, free the TT(s) from the TT list */
        xhci_free_tt_info(xhci, dev, slot_id);
index 40228a3..7bccbe5 100644 (file)
 #define PCI_DEVICE_ID_INTEL_CML_XHCI                   0xa3af
 #define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI            0x9a13
 #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI           0x1138
-#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI            0x461e
-#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI          0x464e
-#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI        0x51ed
-#define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_XHCI           0xa71e
-#define PCI_DEVICE_ID_INTEL_METEOR_LAKE_XHCI           0x7ec0
+#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI                0x51ed
 
 #define PCI_DEVICE_ID_AMD_RENOIR_XHCI                  0x1639
 #define PCI_DEVICE_ID_AMD_PROMONTORYA_4                        0x43b9
 #define PCI_DEVICE_ID_AMD_PROMONTORYA_3                        0x43ba
 #define PCI_DEVICE_ID_AMD_PROMONTORYA_2                        0x43bb
 #define PCI_DEVICE_ID_AMD_PROMONTORYA_1                        0x43bc
-#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_1           0x161a
-#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_2           0x161b
-#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_3           0x161d
-#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4           0x161e
-#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5           0x15d6
-#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6           0x15d7
-#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7           0x161c
-#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8           0x161f
 
 #define PCI_DEVICE_ID_ASMEDIA_1042_XHCI                        0x1042
 #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI               0x1142
@@ -258,6 +246,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                xhci->quirks |= XHCI_MISSING_CAS;
 
        if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+           pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI)
+               xhci->quirks |= XHCI_RESET_TO_DEFAULT;
+
+       if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
            (pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI ||
             pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_XHCI ||
             pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_XHCI ||
@@ -268,12 +260,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
             pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI ||
             pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI ||
             pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI ||
-            pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI ||
-            pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI ||
-            pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI ||
-            pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI ||
-            pdev->device == PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_XHCI ||
-            pdev->device == PCI_DEVICE_ID_INTEL_METEOR_LAKE_XHCI))
+            pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI))
                xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
 
        if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
@@ -306,8 +293,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
        }
 
        if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
-               pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI)
+               pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) {
+               /*
+                * try to tame the ASMedia 1042 controller which reports 0.96
+                * but appears to behave more like 1.0
+                */
+               xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
                xhci->quirks |= XHCI_BROKEN_STREAMS;
+       }
        if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
                pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) {
                xhci->quirks |= XHCI_TRUST_TX_LENGTH;
@@ -336,15 +329,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
             pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4))
                xhci->quirks |= XHCI_NO_SOFT_RETRY;
 
-       if (pdev->vendor == PCI_VENDOR_ID_AMD &&
-           (pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_1 ||
-           pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_2 ||
-           pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_3 ||
-           pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4 ||
-           pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5 ||
-           pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6 ||
-           pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7 ||
-           pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8))
+       /* xHC spec requires PCI devices to support D3hot and D3cold */
+       if (xhci->hci_version >= 0x120)
                xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
 
        if (xhci->quirks & XHCI_RESET_ON_RESUME)
index 5176765..79d7931 100644 (file)
@@ -810,9 +810,15 @@ void xhci_shutdown(struct usb_hcd *hcd)
 
        spin_lock_irq(&xhci->lock);
        xhci_halt(xhci);
-       /* Workaround for spurious wakeups at shutdown with HSW */
-       if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
+
+       /*
+        * Workaround for spurious wakeps at shutdown with HSW, and for boot
+        * firmware delay in ADL-P PCH if port are left in U3 at shutdown
+        */
+       if (xhci->quirks & XHCI_SPURIOUS_WAKEUP ||
+           xhci->quirks & XHCI_RESET_TO_DEFAULT)
                xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
+
        spin_unlock_irq(&xhci->lock);
 
        xhci_cleanup_msix(xhci);
index c0964fe..cc084d9 100644 (file)
@@ -1897,6 +1897,7 @@ struct xhci_hcd {
 #define XHCI_BROKEN_D3COLD     BIT_ULL(41)
 #define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42)
 #define XHCI_SUSPEND_RESUME_CLKS       BIT_ULL(43)
+#define XHCI_RESET_TO_DEFAULT  BIT_ULL(44)
 
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
index 3df64d2..a86032a 100644 (file)
@@ -91,7 +91,7 @@ struct SiS_Ext {
        unsigned char VB_ExtTVYFilterIndex;
        unsigned char VB_ExtTVYFilterIndexROM661;
        unsigned char REFindex;
-       char ROMMODEIDX661;
+       signed char ROMMODEIDX661;
 };
 
 struct SiS_Ext2 {
index 74fb5a4..a7987fc 100644 (file)
@@ -183,16 +183,6 @@ out:
 }
 EXPORT_SYMBOL_GPL(ucsi_send_command);
 
-int ucsi_resume(struct ucsi *ucsi)
-{
-       u64 command;
-
-       /* Restore UCSI notification enable mask after system resume */
-       command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
-
-       return ucsi_send_command(ucsi, command, NULL, 0);
-}
-EXPORT_SYMBOL_GPL(ucsi_resume);
 /* -------------------------------------------------------------------------- */
 
 struct ucsi_work {
@@ -744,6 +734,7 @@ static void ucsi_partner_change(struct ucsi_connector *con)
 
 static int ucsi_check_connection(struct ucsi_connector *con)
 {
+       u8 prev_flags = con->status.flags;
        u64 command;
        int ret;
 
@@ -754,10 +745,13 @@ static int ucsi_check_connection(struct ucsi_connector *con)
                return ret;
        }
 
+       if (con->status.flags == prev_flags)
+               return 0;
+
        if (con->status.flags & UCSI_CONSTAT_CONNECTED) {
-               if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) ==
-                   UCSI_CONSTAT_PWR_OPMODE_PD)
-                       ucsi_partner_task(con, ucsi_check_altmodes, 30, 0);
+               ucsi_register_partner(con);
+               ucsi_pwr_opmode_change(con);
+               ucsi_partner_change(con);
        } else {
                ucsi_partner_change(con);
                ucsi_port_psy_changed(con);
@@ -1276,6 +1270,28 @@ err:
        return ret;
 }
 
+int ucsi_resume(struct ucsi *ucsi)
+{
+       struct ucsi_connector *con;
+       u64 command;
+       int ret;
+
+       /* Restore UCSI notification enable mask after system resume */
+       command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
+       ret = ucsi_send_command(ucsi, command, NULL, 0);
+       if (ret < 0)
+               return ret;
+
+       for (con = ucsi->connector; con->port; con++) {
+               mutex_lock(&con->lock);
+               ucsi_check_connection(con);
+               mutex_unlock(&con->lock);
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ucsi_resume);
+
 static void ucsi_init_work(struct work_struct *work)
 {
        struct ucsi *ucsi = container_of(work, struct ucsi, work.work);
index 8873c16..ce0c8ef 100644 (file)
@@ -185,6 +185,15 @@ static int ucsi_acpi_remove(struct platform_device *pdev)
        return 0;
 }
 
+static int ucsi_acpi_resume(struct device *dev)
+{
+       struct ucsi_acpi *ua = dev_get_drvdata(dev);
+
+       return ucsi_resume(ua->ucsi);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(ucsi_acpi_pm_ops, NULL, ucsi_acpi_resume);
+
 static const struct acpi_device_id ucsi_acpi_match[] = {
        { "PNP0CA0", 0 },
        { },
@@ -194,6 +203,7 @@ MODULE_DEVICE_TABLE(acpi, ucsi_acpi_match);
 static struct platform_driver ucsi_acpi_platform_driver = {
        .driver = {
                .name = "ucsi_acpi",
+               .pm = pm_ptr(&ucsi_acpi_pm_ops),
                .acpi_match_table = ACPI_PTR(ucsi_acpi_match),
        },
        .probe = ucsi_acpi_probe,
index 9e6bcc0..41e77de 100644 (file)
@@ -340,12 +340,9 @@ int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *na
                size = pci_resource_len(pdev, bar);
                ret = aperture_remove_conflicting_devices(base, size, primary, name);
                if (ret)
-                       break;
+                       return ret;
        }
 
-       if (ret)
-               return ret;
-
        /*
         * WARNING: Apparently we must kick fbdev drivers before vgacon,
         * otherwise the vga fbdev driver falls over.
index 585af90..31ff1da 100644 (file)
@@ -1796,6 +1796,7 @@ failed_ioremap:
 failed_regions:
        cyberpro_free_fb_info(cfb);
 failed_release:
+       pci_disable_device(dev);
        return err;
 }
 
@@ -1812,6 +1813,7 @@ static void cyberpro_pci_remove(struct pci_dev *dev)
                        int_cfb_info = NULL;
 
                pci_release_regions(dev);
+               pci_disable_device(dev);
        }
 }
 
index ae76a21..11922b0 100644 (file)
@@ -1076,7 +1076,8 @@ static int fb_remove(struct platform_device *dev)
        if (par->lcd_supply) {
                ret = regulator_disable(par->lcd_supply);
                if (ret)
-                       return ret;
+                       dev_warn(&dev->dev, "Failed to disable regulator (%pe)\n",
+                                ERR_PTR(ret));
        }
 
        lcd_disable_raster(DA8XX_FRAME_WAIT);
index 1582c71..000b4aa 100644 (file)
@@ -1060,14 +1060,14 @@ static const struct fb_ops gbefb_ops = {
 
 static ssize_t gbefb_show_memsize(struct device *dev, struct device_attribute *attr, char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "%u\n", gbe_mem_size);
+       return sysfs_emit(buf, "%u\n", gbe_mem_size);
 }
 
 static DEVICE_ATTR(size, S_IRUGO, gbefb_show_memsize, NULL);
 
 static ssize_t gbefb_show_rev(struct device *device, struct device_attribute *attr, char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "%d\n", gbe_revision);
+       return sysfs_emit(buf, "%d\n", gbe_revision);
 }
 
 static DEVICE_ATTR(revision, S_IRUGO, gbefb_show_rev, NULL);
index 1914ab5..5850e43 100644 (file)
@@ -202,7 +202,7 @@ SiS310SubsequentScreenToScreenCopy(struct sis_video_info *ivideo, int src_x, int
         * and destination blitting areas overlap and
         * adapt the bitmap addresses synchronously
         * if the coordinates exceed the valid range.
-        * The the areas do not overlap, we do our
+        * The areas do not overlap, we do our
         * normal check.
         */
        if((mymax - mymin) < height) {
index ea94d21..d7a14e6 100644 (file)
@@ -148,7 +148,7 @@ struct SiS_Ext {
        unsigned char  VB_ExtTVYFilterIndex;
        unsigned char  VB_ExtTVYFilterIndexROM661;
        unsigned char  REFindex;
-       char           ROMMODEIDX661;
+       signed char    ROMMODEIDX661;
 };
 
 struct SiS_Ext2 {
index fce6cfb..f743bfb 100644 (file)
@@ -1166,7 +1166,7 @@ static ssize_t sm501fb_crtsrc_show(struct device *dev,
        ctrl = smc501_readl(info->regs + SM501_DC_CRT_CONTROL);
        ctrl &= SM501_DC_CRT_CONTROL_SEL;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n", ctrl ? "crt" : "panel");
+       return sysfs_emit(buf, "%s\n", ctrl ? "crt" : "panel");
 }
 
 /* sm501fb_crtsrc_show
index e65bdc4..9343b7a 100644 (file)
@@ -97,7 +97,6 @@ struct ufx_data {
        struct kref kref;
        int fb_count;
        bool virtualized; /* true when physical usb device not present */
-       struct delayed_work free_framebuffer_work;
        atomic_t usb_active; /* 0 = update virtual buffer, but no usb traffic */
        atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */
        u8 *edid; /* null until we read edid from hw or get from sysfs */
@@ -1117,15 +1116,24 @@ static void ufx_free(struct kref *kref)
 {
        struct ufx_data *dev = container_of(kref, struct ufx_data, kref);
 
-       /* this function will wait for all in-flight urbs to complete */
-       if (dev->urbs.count > 0)
-               ufx_free_urb_list(dev);
+       kfree(dev);
+}
 
-       pr_debug("freeing ufx_data %p", dev);
+static void ufx_ops_destory(struct fb_info *info)
+{
+       struct ufx_data *dev = info->par;
+       int node = info->node;
 
-       kfree(dev);
+       /* Assume info structure is freed after this point */
+       framebuffer_release(info);
+
+       pr_debug("fb_info for /dev/fb%d has been freed", node);
+
+       /* release reference taken by kref_init in probe() */
+       kref_put(&dev->kref, ufx_free);
 }
 
+
 static void ufx_release_urb_work(struct work_struct *work)
 {
        struct urb_node *unode = container_of(work, struct urb_node,
@@ -1134,14 +1142,9 @@ static void ufx_release_urb_work(struct work_struct *work)
        up(&unode->dev->urbs.limit_sem);
 }
 
-static void ufx_free_framebuffer_work(struct work_struct *work)
+static void ufx_free_framebuffer(struct ufx_data *dev)
 {
-       struct ufx_data *dev = container_of(work, struct ufx_data,
-                                           free_framebuffer_work.work);
        struct fb_info *info = dev->info;
-       int node = info->node;
-
-       unregister_framebuffer(info);
 
        if (info->cmap.len != 0)
                fb_dealloc_cmap(&info->cmap);
@@ -1153,11 +1156,6 @@ static void ufx_free_framebuffer_work(struct work_struct *work)
 
        dev->info = NULL;
 
-       /* Assume info structure is freed after this point */
-       framebuffer_release(info);
-
-       pr_debug("fb_info for /dev/fb%d has been freed", node);
-
        /* ref taken in probe() as part of registering framebfufer */
        kref_put(&dev->kref, ufx_free);
 }
@@ -1169,11 +1167,13 @@ static int ufx_ops_release(struct fb_info *info, int user)
 {
        struct ufx_data *dev = info->par;
 
+       mutex_lock(&disconnect_mutex);
+
        dev->fb_count--;
 
        /* We can't free fb_info here - fbmem will touch it when we return */
        if (dev->virtualized && (dev->fb_count == 0))
-               schedule_delayed_work(&dev->free_framebuffer_work, HZ);
+               ufx_free_framebuffer(dev);
 
        if ((dev->fb_count == 0) && (info->fbdefio)) {
                fb_deferred_io_cleanup(info);
@@ -1186,6 +1186,8 @@ static int ufx_ops_release(struct fb_info *info, int user)
 
        kref_put(&dev->kref, ufx_free);
 
+       mutex_unlock(&disconnect_mutex);
+
        return 0;
 }
 
@@ -1292,6 +1294,7 @@ static const struct fb_ops ufx_ops = {
        .fb_blank = ufx_ops_blank,
        .fb_check_var = ufx_ops_check_var,
        .fb_set_par = ufx_ops_set_par,
+       .fb_destroy = ufx_ops_destory,
 };
 
 /* Assumes &info->lock held by caller
@@ -1673,9 +1676,6 @@ static int ufx_usb_probe(struct usb_interface *interface,
                goto destroy_modedb;
        }
 
-       INIT_DELAYED_WORK(&dev->free_framebuffer_work,
-                         ufx_free_framebuffer_work);
-
        retval = ufx_reg_read(dev, 0x3000, &id_rev);
        check_warn_goto_error(retval, "error %d reading 0x3000 register from device", retval);
        dev_dbg(dev->gdev, "ID_REV register value 0x%08x", id_rev);
@@ -1748,10 +1748,12 @@ e_nomem:
 static void ufx_usb_disconnect(struct usb_interface *interface)
 {
        struct ufx_data *dev;
+       struct fb_info *info;
 
        mutex_lock(&disconnect_mutex);
 
        dev = usb_get_intfdata(interface);
+       info = dev->info;
 
        pr_debug("USB disconnect starting\n");
 
@@ -1765,12 +1767,15 @@ static void ufx_usb_disconnect(struct usb_interface *interface)
 
        /* if clients still have us open, will be freed on last close */
        if (dev->fb_count == 0)
-               schedule_delayed_work(&dev->free_framebuffer_work, 0);
+               ufx_free_framebuffer(dev);
 
-       /* release reference taken by kref_init in probe() */
-       kref_put(&dev->kref, ufx_free);
+       /* this function will wait for all in-flight urbs to complete */
+       if (dev->urbs.count > 0)
+               ufx_free_urb_list(dev);
 
-       /* consider ufx_data freed */
+       pr_debug("freeing ufx_data %p", dev);
+
+       unregister_framebuffer(info);
 
        mutex_unlock(&disconnect_mutex);
 }
index 7753e58..3feb6e4 100644 (file)
@@ -1055,7 +1055,8 @@ stifb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 {
        struct stifb_info *fb = container_of(info, struct stifb_info, info);
 
-       if (rect->rop != ROP_COPY)
+       if (rect->rop != ROP_COPY ||
+           (fb->id == S9000_ID_HCRX && fb->info.var.bits_per_pixel == 32))
                return cfb_fillrect(info, rect);
 
        SETUP_HW(fb);
index 438e2c7..1ac8390 100644 (file)
@@ -376,7 +376,7 @@ err_cmap:
        return rc;
 }
 
-static int xilinxfb_release(struct device *dev)
+static void xilinxfb_release(struct device *dev)
 {
        struct xilinxfb_drvdata *drvdata = dev_get_drvdata(dev);
 
@@ -402,8 +402,6 @@ static int xilinxfb_release(struct device *dev)
        if (!(drvdata->flags & BUS_ACCESS_FLAG))
                dcr_unmap(drvdata->dcr_host, drvdata->dcr_len);
 #endif
-
-       return 0;
 }
 
 /* ---------------------------------------------------------------------
@@ -480,7 +478,9 @@ static int xilinxfb_of_probe(struct platform_device *pdev)
 
 static int xilinxfb_of_remove(struct platform_device *op)
 {
-       return xilinxfb_release(&op->dev);
+       xilinxfb_release(&op->dev);
+
+       return 0;
 }
 
 /* Match table for of_platform binding */
index 35058d8..7c61ff3 100644 (file)
@@ -355,8 +355,10 @@ static int __init exar_wdt_register(struct wdt_priv *priv, const int idx)
                                                    &priv->wdt_res, 1,
                                                    priv, sizeof(*priv));
        if (IS_ERR(n->pdev)) {
+               int err = PTR_ERR(n->pdev);
+
                kfree(n);
-               return PTR_ERR(n->pdev);
+               return err;
        }
 
        list_add_tail(&n->list, &pdev_list);
index 78ba366..2756ed5 100644 (file)
@@ -88,7 +88,7 @@ static bool wdt_is_running(struct watchdog_device *wdd)
        return (wdtcontrol & ENABLE_MASK) == ENABLE_MASK;
 }
 
-/* This routine finds load value that will reset system in required timout */
+/* This routine finds load value that will reset system in required timeout */
 static int wdt_setload(struct watchdog_device *wdd, unsigned int timeout)
 {
        struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
index 3fe8a7e..c777a61 100644 (file)
@@ -38,6 +38,9 @@
 
 #include "watchdog_core.h"     /* For watchdog_dev_register/... */
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/watchdog.h>
+
 static DEFINE_IDA(watchdog_ida);
 
 static int stop_on_reboot = -1;
@@ -163,6 +166,7 @@ static int watchdog_reboot_notifier(struct notifier_block *nb,
                        int ret;
 
                        ret = wdd->ops->stop(wdd);
+                       trace_watchdog_stop(wdd, ret);
                        if (ret)
                                return NOTIFY_BAD;
                }
index 744b2ab..55574ed 100644 (file)
@@ -47,6 +47,8 @@
 #include "watchdog_core.h"
 #include "watchdog_pretimeout.h"
 
+#include <trace/events/watchdog.h>
+
 /* the dev_t structure to store the dynamically allocated watchdog devices */
 static dev_t watchdog_devt;
 /* Reference to watchdog device behind /dev/watchdog */
@@ -157,10 +159,13 @@ static int __watchdog_ping(struct watchdog_device *wdd)
 
        wd_data->last_hw_keepalive = now;
 
-       if (wdd->ops->ping)
+       if (wdd->ops->ping) {
                err = wdd->ops->ping(wdd);  /* ping the watchdog */
-       else
+               trace_watchdog_ping(wdd, err);
+       } else {
                err = wdd->ops->start(wdd); /* restart watchdog */
+               trace_watchdog_start(wdd, err);
+       }
 
        if (err == 0)
                watchdog_hrtimer_pretimeout_start(wdd);
@@ -259,6 +264,7 @@ static int watchdog_start(struct watchdog_device *wdd)
                }
        } else {
                err = wdd->ops->start(wdd);
+               trace_watchdog_start(wdd, err);
                if (err == 0) {
                        set_bit(WDOG_ACTIVE, &wdd->status);
                        wd_data->last_keepalive = started_at;
@@ -297,6 +303,7 @@ static int watchdog_stop(struct watchdog_device *wdd)
        if (wdd->ops->stop) {
                clear_bit(WDOG_HW_RUNNING, &wdd->status);
                err = wdd->ops->stop(wdd);
+               trace_watchdog_stop(wdd, err);
        } else {
                set_bit(WDOG_HW_RUNNING, &wdd->status);
        }
@@ -369,6 +376,7 @@ static int watchdog_set_timeout(struct watchdog_device *wdd,
 
        if (wdd->ops->set_timeout) {
                err = wdd->ops->set_timeout(wdd, timeout);
+               trace_watchdog_set_timeout(wdd, timeout, err);
        } else {
                wdd->timeout = timeout;
                /* Disable pretimeout if it doesn't fit the new timeout */
index 860f37c..daa525d 100644 (file)
@@ -31,12 +31,12 @@ static DEFINE_XARRAY_FLAGS(xen_grant_dma_devices, XA_FLAGS_LOCK_IRQ);
 
 static inline dma_addr_t grant_to_dma(grant_ref_t grant)
 {
-       return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT);
+       return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << XEN_PAGE_SHIFT);
 }
 
 static inline grant_ref_t dma_to_grant(dma_addr_t dma)
 {
-       return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> PAGE_SHIFT);
+       return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> XEN_PAGE_SHIFT);
 }
 
 static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev)
@@ -79,7 +79,7 @@ static void *xen_grant_dma_alloc(struct device *dev, size_t size,
                                 unsigned long attrs)
 {
        struct xen_grant_dma_data *data;
-       unsigned int i, n_pages = PFN_UP(size);
+       unsigned int i, n_pages = XEN_PFN_UP(size);
        unsigned long pfn;
        grant_ref_t grant;
        void *ret;
@@ -91,14 +91,14 @@ static void *xen_grant_dma_alloc(struct device *dev, size_t size,
        if (unlikely(data->broken))
                return NULL;
 
-       ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp);
+       ret = alloc_pages_exact(n_pages * XEN_PAGE_SIZE, gfp);
        if (!ret)
                return NULL;
 
        pfn = virt_to_pfn(ret);
 
        if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) {
-               free_pages_exact(ret, n_pages * PAGE_SIZE);
+               free_pages_exact(ret, n_pages * XEN_PAGE_SIZE);
                return NULL;
        }
 
@@ -116,7 +116,7 @@ static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
                               dma_addr_t dma_handle, unsigned long attrs)
 {
        struct xen_grant_dma_data *data;
-       unsigned int i, n_pages = PFN_UP(size);
+       unsigned int i, n_pages = XEN_PFN_UP(size);
        grant_ref_t grant;
 
        data = find_xen_grant_dma_data(dev);
@@ -138,7 +138,7 @@ static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
 
        gnttab_free_grant_reference_seq(grant, n_pages);
 
-       free_pages_exact(vaddr, n_pages * PAGE_SIZE);
+       free_pages_exact(vaddr, n_pages * XEN_PAGE_SIZE);
 }
 
 static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size,
@@ -168,7 +168,9 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
                                         unsigned long attrs)
 {
        struct xen_grant_dma_data *data;
-       unsigned int i, n_pages = PFN_UP(offset + size);
+       unsigned long dma_offset = xen_offset_in_page(offset),
+                       pfn_offset = XEN_PFN_DOWN(offset);
+       unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
        grant_ref_t grant;
        dma_addr_t dma_handle;
 
@@ -187,10 +189,11 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
 
        for (i = 0; i < n_pages; i++) {
                gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
-                               xen_page_to_gfn(page) + i, dir == DMA_TO_DEVICE);
+                               pfn_to_gfn(page_to_xen_pfn(page) + i + pfn_offset),
+                               dir == DMA_TO_DEVICE);
        }
 
-       dma_handle = grant_to_dma(grant) + offset;
+       dma_handle = grant_to_dma(grant) + dma_offset;
 
        return dma_handle;
 }
@@ -200,8 +203,8 @@ static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
                                     unsigned long attrs)
 {
        struct xen_grant_dma_data *data;
-       unsigned long offset = dma_handle & (PAGE_SIZE - 1);
-       unsigned int i, n_pages = PFN_UP(offset + size);
+       unsigned long dma_offset = xen_offset_in_page(dma_handle);
+       unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
        grant_ref_t grant;
 
        if (WARN_ON(dir == DMA_NONE))
index 63c7ebb..6a11025 100644 (file)
@@ -911,7 +911,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
                interp_elf_ex = kmalloc(sizeof(*interp_elf_ex), GFP_KERNEL);
                if (!interp_elf_ex) {
                        retval = -ENOMEM;
-                       goto out_free_ph;
+                       goto out_free_file;
                }
 
                /* Get the exec headers */
@@ -1354,6 +1354,7 @@ out:
 out_free_dentry:
        kfree(interp_elf_ex);
        kfree(interp_elf_phdata);
+out_free_file:
        allow_write_access(interpreter);
        if (interpreter)
                fput(interpreter);
index dce3a16..4ec18ce 100644 (file)
@@ -138,6 +138,7 @@ struct share_check {
        u64 root_objectid;
        u64 inum;
        int share_count;
+       bool have_delayed_delete_refs;
 };
 
 static inline int extent_is_shared(struct share_check *sc)
@@ -820,16 +821,11 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
                            struct preftrees *preftrees, struct share_check *sc)
 {
        struct btrfs_delayed_ref_node *node;
-       struct btrfs_delayed_extent_op *extent_op = head->extent_op;
        struct btrfs_key key;
-       struct btrfs_key tmp_op_key;
        struct rb_node *n;
        int count;
        int ret = 0;
 
-       if (extent_op && extent_op->update_key)
-               btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
-
        spin_lock(&head->lock);
        for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
                node = rb_entry(n, struct btrfs_delayed_ref_node,
@@ -855,10 +851,16 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
                case BTRFS_TREE_BLOCK_REF_KEY: {
                        /* NORMAL INDIRECT METADATA backref */
                        struct btrfs_delayed_tree_ref *ref;
+                       struct btrfs_key *key_ptr = NULL;
+
+                       if (head->extent_op && head->extent_op->update_key) {
+                               btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
+                               key_ptr = &key;
+                       }
 
                        ref = btrfs_delayed_node_to_tree_ref(node);
                        ret = add_indirect_ref(fs_info, preftrees, ref->root,
-                                              &tmp_op_key, ref->level + 1,
+                                              key_ptr, ref->level + 1,
                                               node->bytenr, count, sc,
                                               GFP_ATOMIC);
                        break;
@@ -884,13 +886,22 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
                        key.offset = ref->offset;
 
                        /*
-                        * Found a inum that doesn't match our known inum, we
-                        * know it's shared.
+                        * If we have a share check context and a reference for
+                        * another inode, we can't exit immediately. This is
+                        * because even if this is a BTRFS_ADD_DELAYED_REF
+                        * reference we may find next a BTRFS_DROP_DELAYED_REF
+                        * which cancels out this ADD reference.
+                        *
+                        * If this is a DROP reference and there was no previous
+                        * ADD reference, then we need to signal that when we
+                        * process references from the extent tree (through
+                        * add_inline_refs() and add_keyed_refs()), we should
+                        * not exit early if we find a reference for another
+                        * inode, because one of the delayed DROP references
+                        * may cancel that reference in the extent tree.
                         */
-                       if (sc && sc->inum && ref->objectid != sc->inum) {
-                               ret = BACKREF_FOUND_SHARED;
-                               goto out;
-                       }
+                       if (sc && count < 0)
+                               sc->have_delayed_delete_refs = true;
 
                        ret = add_indirect_ref(fs_info, preftrees, ref->root,
                                               &key, 0, node->bytenr, count, sc,
@@ -920,7 +931,7 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
        }
        if (!ret)
                ret = extent_is_shared(sc);
-out:
+
        spin_unlock(&head->lock);
        return ret;
 }
@@ -1023,7 +1034,8 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
                        key.type = BTRFS_EXTENT_DATA_KEY;
                        key.offset = btrfs_extent_data_ref_offset(leaf, dref);
 
-                       if (sc && sc->inum && key.objectid != sc->inum) {
+                       if (sc && sc->inum && key.objectid != sc->inum &&
+                           !sc->have_delayed_delete_refs) {
                                ret = BACKREF_FOUND_SHARED;
                                break;
                        }
@@ -1033,6 +1045,7 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
                        ret = add_indirect_ref(fs_info, preftrees, root,
                                               &key, 0, bytenr, count,
                                               sc, GFP_NOFS);
+
                        break;
                }
                default:
@@ -1122,7 +1135,8 @@ static int add_keyed_refs(struct btrfs_root *extent_root,
                        key.type = BTRFS_EXTENT_DATA_KEY;
                        key.offset = btrfs_extent_data_ref_offset(leaf, dref);
 
-                       if (sc && sc->inum && key.objectid != sc->inum) {
+                       if (sc && sc->inum && key.objectid != sc->inum &&
+                           !sc->have_delayed_delete_refs) {
                                ret = BACKREF_FOUND_SHARED;
                                break;
                        }
@@ -1522,6 +1536,9 @@ static bool lookup_backref_shared_cache(struct btrfs_backref_shared_cache *cache
 {
        struct btrfs_backref_shared_cache_entry *entry;
 
+       if (!cache->use_cache)
+               return false;
+
        if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
                return false;
 
@@ -1557,6 +1574,19 @@ static bool lookup_backref_shared_cache(struct btrfs_backref_shared_cache *cache
                return false;
 
        *is_shared = entry->is_shared;
+       /*
+        * If the node at this level is shared, than all nodes below are also
+        * shared. Currently some of the nodes below may be marked as not shared
+        * because we have just switched from one leaf to another, and switched
+        * also other nodes above the leaf and below the current level, so mark
+        * them as shared.
+        */
+       if (*is_shared) {
+               for (int i = 0; i < level; i++) {
+                       cache->entries[i].is_shared = true;
+                       cache->entries[i].gen = entry->gen;
+               }
+       }
 
        return true;
 }
@@ -1573,6 +1603,9 @@ static void store_backref_shared_cache(struct btrfs_backref_shared_cache *cache,
        struct btrfs_backref_shared_cache_entry *entry;
        u64 gen;
 
+       if (!cache->use_cache)
+               return;
+
        if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
                return;
 
@@ -1648,6 +1681,7 @@ int btrfs_is_data_extent_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
                .root_objectid = root->root_key.objectid,
                .inum = inum,
                .share_count = 0,
+               .have_delayed_delete_refs = false,
        };
        int level;
 
@@ -1669,6 +1703,7 @@ int btrfs_is_data_extent_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
        /* -1 means we are in the bytenr of the data extent. */
        level = -1;
        ULIST_ITER_INIT(&uiter);
+       cache->use_cache = true;
        while (1) {
                bool is_shared;
                bool cached;
@@ -1698,6 +1733,24 @@ int btrfs_is_data_extent_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
                    extent_gen > btrfs_root_last_snapshot(&root->root_item))
                        break;
 
+               /*
+                * If our data extent was not directly shared (without multiple
+                * reference items), than it might have a single reference item
+                * with a count > 1 for the same offset, which means there are 2
+                * (or more) file extent items that point to the data extent -
+                * this happens when a file extent item needs to be split and
+                * then one item gets moved to another leaf due to a b+tree leaf
+                * split when inserting some item. In this case the file extent
+                * items may be located in different leaves and therefore some
+                * of the leaves may be referenced through shared subtrees while
+                * others are not. Since our extent buffer cache only works for
+                * a single path (by far the most common case and simpler to
+                * deal with), we can not use it if we have multiple leaves
+                * (which implies multiple paths).
+                */
+               if (level == -1 && tmp->nnodes > 1)
+                       cache->use_cache = false;
+
                if (level >= 0)
                        store_backref_shared_cache(cache, root, bytenr,
                                                   level, false);
@@ -1713,6 +1766,7 @@ int btrfs_is_data_extent_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
                        break;
                }
                shared.share_count = 0;
+               shared.have_delayed_delete_refs = false;
                cond_resched();
        }
 
index 52ae695..8e69584 100644 (file)
@@ -29,6 +29,7 @@ struct btrfs_backref_shared_cache {
         * a given data extent should never exceed the maximum b+tree height.
         */
        struct btrfs_backref_shared_cache_entry entries[BTRFS_MAX_LEVEL];
+       bool use_cache;
 };
 
 typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root,
index 32c415c..deebc8d 100644 (file)
@@ -774,10 +774,8 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait)
 
        btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
 out:
-       /* REVIEW */
        if (wait && caching_ctl)
                ret = btrfs_caching_ctl_wait_done(cache, caching_ctl);
-               /* wait_event(caching_ctl->wait, space_cache_v1_done(cache)); */
        if (caching_ctl)
                btrfs_put_caching_control(caching_ctl);
 
index a2da931..4b28263 100644 (file)
@@ -166,11 +166,9 @@ static bool btrfs_supported_super_csum(u16 csum_type)
  * Return 0 if the superblock checksum type matches the checksum value of that
  * algorithm. Pass the raw disk superblock data.
  */
-static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
-                                 char *raw_disk_sb)
+int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
+                          const struct btrfs_super_block *disk_sb)
 {
-       struct btrfs_super_block *disk_sb =
-               (struct btrfs_super_block *)raw_disk_sb;
        char result[BTRFS_CSUM_SIZE];
        SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
 
@@ -181,7 +179,7 @@ static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
         * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is
         * filled with zeros and is included in the checksum.
         */
-       crypto_shash_digest(shash, raw_disk_sb + BTRFS_CSUM_SIZE,
+       crypto_shash_digest(shash, (const u8 *)disk_sb + BTRFS_CSUM_SIZE,
                            BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, result);
 
        if (memcmp(disk_sb->csum, result, fs_info->csum_size))
@@ -3479,7 +3477,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
         * We want to check superblock checksum, the type is stored inside.
         * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
         */
-       if (btrfs_check_super_csum(fs_info, (u8 *)disk_super)) {
+       if (btrfs_check_super_csum(fs_info, disk_super)) {
                btrfs_err(fs_info, "superblock checksum mismatch");
                err = -EINVAL;
                btrfs_release_disk_super(disk_super);
index c67c15d..9fa923e 100644 (file)
@@ -42,6 +42,8 @@ struct extent_buffer *btrfs_find_create_tree_block(
 void btrfs_clean_tree_block(struct extent_buffer *buf);
 void btrfs_clear_oneshot_options(struct btrfs_fs_info *fs_info);
 int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info);
+int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
+                          const struct btrfs_super_block *disk_sb);
 int __cold open_ctree(struct super_block *sb,
               struct btrfs_fs_devices *fs_devices,
               char *options);
index 1d4c239..fab7eb7 100644 (file)
@@ -58,7 +58,7 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
 }
 
 struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
-                               u64 root_objectid, u32 generation,
+                               u64 root_objectid, u64 generation,
                                int check_generation)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(sb);
index f32f411..5afb7ca 100644 (file)
@@ -19,7 +19,7 @@ struct btrfs_fid {
 } __attribute__ ((packed));
 
 struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
-                               u64 root_objectid, u32 generation,
+                               u64 root_objectid, u64 generation,
                                int check_generation);
 struct dentry *btrfs_get_parent(struct dentry *child);
 
index 618275a..83cb037 100644 (file)
@@ -1641,16 +1641,17 @@ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
        int err;
        u64 failed_start;
 
-       while (1) {
+       err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start,
+                              cached_state, NULL, GFP_NOFS);
+       while (err == -EEXIST) {
+               if (failed_start != start)
+                       clear_extent_bit(tree, start, failed_start - 1,
+                                        EXTENT_LOCKED, cached_state);
+
+               wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
                err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,
                                       &failed_start, cached_state, NULL,
                                       GFP_NOFS);
-               if (err == -EEXIST) {
-                       wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
-                       start = failed_start;
-               } else
-                       break;
-               WARN_ON(start > end);
        }
        return err;
 }
index cd2d365..2801c99 100644 (file)
@@ -3295,21 +3295,22 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
                }
 
                /*
-                * If this is a leaf and there are tree mod log users, we may
-                * have recorded mod log operations that point to this leaf.
-                * So we must make sure no one reuses this leaf's extent before
-                * mod log operations are applied to a node, otherwise after
-                * rewinding a node using the mod log operations we get an
-                * inconsistent btree, as the leaf's extent may now be used as
-                * a node or leaf for another different btree.
+                * If there are tree mod log users we may have recorded mod log
+                * operations for this node.  If we re-allocate this node we
+                * could replay operations on this node that happened when it
+                * existed in a completely different root.  For example if it
+                * was part of root A, then was reallocated to root B, and we
+                * are doing a btrfs_old_search_slot(root b), we could replay
+                * operations that happened when the block was part of root A,
+                * giving us an inconsistent view of the btree.
+                *
                 * We are safe from races here because at this point no other
                 * node or root points to this extent buffer, so if after this
-                * check a new tree mod log user joins, it will not be able to
-                * find a node pointing to this leaf and record operations that
-                * point to this leaf.
+                * check a new tree mod log user joins we will not have an
+                * existing log of operations on this node that we have to
+                * contend with.
                 */
-               if (btrfs_header_level(buf) == 0 &&
-                   test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags))
+               if (test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags))
                        must_pin = true;
 
                if (must_pin || btrfs_is_zoned(fs_info)) {
index f6395e8..82c8e99 100644 (file)
@@ -1632,10 +1632,8 @@ static int full_stripe_write(struct btrfs_raid_bio *rbio)
        int ret;
 
        ret = alloc_rbio_parity_pages(rbio);
-       if (ret) {
-               __free_raid_bio(rbio);
+       if (ret)
                return ret;
-       }
 
        ret = lock_stripe_add(rbio);
        if (ret == 0)
@@ -1823,8 +1821,10 @@ void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc)
         */
        if (rbio_is_full(rbio)) {
                ret = full_stripe_write(rbio);
-               if (ret)
+               if (ret) {
+                       __free_raid_bio(rbio);
                        goto fail;
+               }
                return;
        }
 
@@ -1838,8 +1838,10 @@ void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc)
                list_add_tail(&rbio->plug_list, &plug->rbio_list);
        } else {
                ret = __raid56_parity_write(rbio);
-               if (ret)
+               if (ret) {
+                       __free_raid_bio(rbio);
                        goto fail;
+               }
        }
 
        return;
@@ -2742,8 +2744,10 @@ raid56_alloc_missing_rbio(struct bio *bio, struct btrfs_io_context *bioc)
 
        rbio->faila = find_logical_bio_stripe(rbio, bio);
        if (rbio->faila == -1) {
-               BUG();
-               kfree(rbio);
+               btrfs_warn_rl(fs_info,
+       "can not determine the failed stripe number for full stripe %llu",
+                             bioc->raid_map[0]);
+               __free_raid_bio(rbio);
                return NULL;
        }
 
index 4ef4167..145c84b 100644 (file)
@@ -348,6 +348,7 @@ static bool proto_cmd_ok(const struct send_ctx *sctx, int cmd)
        switch (sctx->proto) {
        case 1:  return cmd <= BTRFS_SEND_C_MAX_V1;
        case 2:  return cmd <= BTRFS_SEND_C_MAX_V2;
+       case 3:  return cmd <= BTRFS_SEND_C_MAX_V3;
        default: return false;
        }
 }
@@ -6469,7 +6470,9 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
                if (ret < 0)
                        goto out;
        }
-       if (sctx->cur_inode_needs_verity) {
+
+       if (proto_cmd_ok(sctx, BTRFS_SEND_C_ENABLE_VERITY)
+           && sctx->cur_inode_needs_verity) {
                ret = process_verity(sctx);
                if (ret < 0)
                        goto out;
@@ -6665,17 +6668,19 @@ static int changed_inode(struct send_ctx *sctx,
                        /*
                         * First, process the inode as if it was deleted.
                         */
-                       sctx->cur_inode_gen = right_gen;
-                       sctx->cur_inode_new = false;
-                       sctx->cur_inode_deleted = true;
-                       sctx->cur_inode_size = btrfs_inode_size(
-                                       sctx->right_path->nodes[0], right_ii);
-                       sctx->cur_inode_mode = btrfs_inode_mode(
-                                       sctx->right_path->nodes[0], right_ii);
-                       ret = process_all_refs(sctx,
-                                       BTRFS_COMPARE_TREE_DELETED);
-                       if (ret < 0)
-                               goto out;
+                       if (old_nlinks > 0) {
+                               sctx->cur_inode_gen = right_gen;
+                               sctx->cur_inode_new = false;
+                               sctx->cur_inode_deleted = true;
+                               sctx->cur_inode_size = btrfs_inode_size(
+                                               sctx->right_path->nodes[0], right_ii);
+                               sctx->cur_inode_mode = btrfs_inode_mode(
+                                               sctx->right_path->nodes[0], right_ii);
+                               ret = process_all_refs(sctx,
+                                               BTRFS_COMPARE_TREE_DELETED);
+                               if (ret < 0)
+                                       goto out;
+                       }
 
                        /*
                         * Now process the inode as if it was new.
index 0a45377..f7585cf 100644 (file)
 #include <linux/types.h>
 
 #define BTRFS_SEND_STREAM_MAGIC "btrfs-stream"
+/* Conditional support for the upcoming protocol version. */
+#ifdef CONFIG_BTRFS_DEBUG
+#define BTRFS_SEND_STREAM_VERSION 3
+#else
 #define BTRFS_SEND_STREAM_VERSION 2
+#endif
 
 /*
  * In send stream v1, no command is larger than 64K. In send stream v2, no limit
index 9be4fd2..5942b93 100644 (file)
@@ -2555,6 +2555,7 @@ static int check_dev_super(struct btrfs_device *dev)
 {
        struct btrfs_fs_info *fs_info = dev->fs_info;
        struct btrfs_super_block *sb;
+       u16 csum_type;
        int ret = 0;
 
        /* This should be called with fs still frozen. */
@@ -2569,6 +2570,21 @@ static int check_dev_super(struct btrfs_device *dev)
        if (IS_ERR(sb))
                return PTR_ERR(sb);
 
+       /* Verify the checksum. */
+       csum_type = btrfs_super_csum_type(sb);
+       if (csum_type != btrfs_super_csum_type(fs_info->super_copy)) {
+               btrfs_err(fs_info, "csum type changed, has %u expect %u",
+                         csum_type, btrfs_super_csum_type(fs_info->super_copy));
+               ret = -EUCLEAN;
+               goto out;
+       }
+
+       if (btrfs_check_super_csum(fs_info, sb)) {
+               btrfs_err(fs_info, "csum for on-disk super block no longer matches");
+               ret = -EUCLEAN;
+               goto out;
+       }
+
        /* Btrfs_validate_super() includes fsid check against super->fsid. */
        ret = btrfs_validate_super(fs_info, sb, 0);
        if (ret < 0)
index 94ba46d..a8d4bc6 100644 (file)
@@ -7142,6 +7142,7 @@ static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
        u64 devid;
        u64 type;
        u8 uuid[BTRFS_UUID_SIZE];
+       int index;
        int num_stripes;
        int ret;
        int i;
@@ -7149,6 +7150,7 @@ static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
        logical = key->offset;
        length = btrfs_chunk_length(leaf, chunk);
        type = btrfs_chunk_type(leaf, chunk);
+       index = btrfs_bg_flags_to_raid_index(type);
        num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
 
 #if BITS_PER_LONG == 32
@@ -7202,7 +7204,15 @@ static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
        map->io_align = btrfs_chunk_io_align(leaf, chunk);
        map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
        map->type = type;
-       map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
+       /*
+        * We can't use the sub_stripes value, as for profiles other than
+        * RAID10, they may have 0 as sub_stripes for filesystems created by
+        * older mkfs (<v5.4).
+        * In that case, it can cause divide-by-zero errors later.
+        * Since currently sub_stripes is fixed for each profile, let's
+        * use the trusted value instead.
+        */
+       map->sub_stripes = btrfs_raid_array[index].sub_stripes;
        map->verified_stripes = 0;
        em->orig_block_len = btrfs_calc_stripe_length(em);
        for (i = 0; i < num_stripes; i++) {
index 599b9d5..f8b668d 100644 (file)
@@ -395,6 +395,7 @@ typedef void (*btrfs_bio_end_io_t)(struct btrfs_bio *bbio);
  */
 struct btrfs_bio {
        unsigned int mirror_num;
+       struct bvec_iter iter;
 
        /* for direct I/O */
        u64 file_offset;
@@ -403,7 +404,6 @@ struct btrfs_bio {
        struct btrfs_device *device;
        u8 *csum;
        u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE];
-       struct bvec_iter iter;
 
        /* End I/O information supplied to btrfs_bio_alloc */
        btrfs_bio_end_io_t end_io;
index fe88b67..6039908 100644 (file)
@@ -253,8 +253,10 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
                dentry = dget(cifs_sb->root);
        else {
                dentry = path_to_dentry(cifs_sb, path);
-               if (IS_ERR(dentry))
+               if (IS_ERR(dentry)) {
+                       rc = -ENOENT;
                        goto oshr_free;
+               }
        }
        cfid->dentry = dentry;
        cfid->tcon = tcon;
@@ -338,6 +340,27 @@ smb2_close_cached_fid(struct kref *ref)
        free_cached_dir(cfid);
 }
 
+void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon,
+                            const char *name, struct cifs_sb_info *cifs_sb)
+{
+       struct cached_fid *cfid = NULL;
+       int rc;
+
+       rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid);
+       if (rc) {
+               cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name);
+               return;
+       }
+       spin_lock(&cfid->cfids->cfid_list_lock);
+       if (cfid->has_lease) {
+               cfid->has_lease = false;
+               kref_put(&cfid->refcount, smb2_close_cached_fid);
+       }
+       spin_unlock(&cfid->cfids->cfid_list_lock);
+       close_cached_dir(cfid);
+}
+
+
 void close_cached_dir(struct cached_fid *cfid)
 {
        kref_put(&cfid->refcount, smb2_close_cached_fid);
@@ -378,22 +401,20 @@ void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
 {
        struct cached_fids *cfids = tcon->cfids;
        struct cached_fid *cfid, *q;
-       struct list_head entry;
+       LIST_HEAD(entry);
 
-       INIT_LIST_HEAD(&entry);
        spin_lock(&cfids->cfid_list_lock);
        list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
-               list_del(&cfid->entry);
-               list_add(&cfid->entry, &entry);
+               list_move(&cfid->entry, &entry);
                cfids->num_entries--;
                cfid->is_open = false;
+               cfid->on_list = false;
                /* To prevent race with smb2_cached_lease_break() */
                kref_get(&cfid->refcount);
        }
        spin_unlock(&cfids->cfid_list_lock);
 
        list_for_each_entry_safe(cfid, q, &entry, entry) {
-               cfid->on_list = false;
                list_del(&cfid->entry);
                cancel_work_sync(&cfid->lease_break);
                if (cfid->has_lease) {
@@ -518,15 +539,13 @@ struct cached_fids *init_cached_dirs(void)
 void free_cached_dirs(struct cached_fids *cfids)
 {
        struct cached_fid *cfid, *q;
-       struct list_head entry;
+       LIST_HEAD(entry);
 
-       INIT_LIST_HEAD(&entry);
        spin_lock(&cfids->cfid_list_lock);
        list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
                cfid->on_list = false;
                cfid->is_open = false;
-               list_del(&cfid->entry);
-               list_add(&cfid->entry, &entry);
+               list_move(&cfid->entry, &entry);
        }
        spin_unlock(&cfids->cfid_list_lock);
 
index e536304..2f4e764 100644 (file)
@@ -69,6 +69,10 @@ extern int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
                                     struct dentry *dentry,
                                     struct cached_fid **cfid);
 extern void close_cached_dir(struct cached_fid *cfid);
+extern void drop_cached_dir_by_name(const unsigned int xid,
+                                   struct cifs_tcon *tcon,
+                                   const char *name,
+                                   struct cifs_sb_info *cifs_sb);
 extern void close_all_cached_dirs(struct cifs_sb_info *cifs_sb);
 extern void invalidate_all_cached_dirs(struct cifs_tcon *tcon);
 extern int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]);
index c6ac192..d0b9fec 100644 (file)
@@ -1302,8 +1302,11 @@ static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
        ssize_t rc;
        struct cifsFileInfo *cfile = dst_file->private_data;
 
-       if (cfile->swapfile)
-               return -EOPNOTSUPP;
+       if (cfile->swapfile) {
+               rc = -EOPNOTSUPP;
+               free_xid(xid);
+               return rc;
+       }
 
        rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
                                        len, flags);
index 5b4a7a3..388b745 100644 (file)
@@ -153,6 +153,6 @@ extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
 /* when changing internal version - update following two lines at same time */
-#define SMB3_PRODUCT_BUILD 39
-#define CIFS_VERSION   "2.39"
+#define SMB3_PRODUCT_BUILD 40
+#define CIFS_VERSION   "2.40"
 #endif                         /* _CIFSFS_H */
index ffb2915..1cc47dd 100644 (file)
@@ -1584,6 +1584,7 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
        server->session_key.response = NULL;
        server->session_key.len = 0;
        kfree(server->hostname);
+       server->hostname = NULL;
 
        task = xchg(&server->tsk, NULL);
        if (task)
index a5c73c2..8b1c371 100644 (file)
@@ -543,8 +543,10 @@ int cifs_create(struct user_namespace *mnt_userns, struct inode *inode,
        cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
                 inode, direntry, direntry);
 
-       if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb))))
-               return -EIO;
+       if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb)))) {
+               rc = -EIO;
+               goto out_free_xid;
+       }
 
        tlink = cifs_sb_tlink(CIFS_SB(inode->i_sb));
        rc = PTR_ERR(tlink);
index f6ffee5..cd96982 100644 (file)
@@ -1885,11 +1885,13 @@ int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
        struct cifsFileInfo *cfile;
        __u32 type;
 
-       rc = -EACCES;
        xid = get_xid();
 
-       if (!(fl->fl_flags & FL_FLOCK))
-               return -ENOLCK;
+       if (!(fl->fl_flags & FL_FLOCK)) {
+               rc = -ENOLCK;
+               free_xid(xid);
+               return rc;
+       }
 
        cfile = (struct cifsFileInfo *)file->private_data;
        tcon = tlink_tcon(cfile->tlink);
@@ -1908,8 +1910,9 @@ int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
                 * if no lock or unlock then nothing to do since we do not
                 * know what it is
                 */
+               rc = -EOPNOTSUPP;
                free_xid(xid);
-               return -EOPNOTSUPP;
+               return rc;
        }
 
        rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
@@ -2431,12 +2434,16 @@ cifs_writev_complete(struct work_struct *work)
 struct cifs_writedata *
 cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete)
 {
+       struct cifs_writedata *writedata = NULL;
        struct page **pages =
                kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
-       if (pages)
-               return cifs_writedata_direct_alloc(pages, complete);
+       if (pages) {
+               writedata = cifs_writedata_direct_alloc(pages, complete);
+               if (!writedata)
+                       kvfree(pages);
+       }
 
-       return NULL;
+       return writedata;
 }
 
 struct cifs_writedata *
@@ -3296,6 +3303,9 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
                                             cifs_uncached_writev_complete);
                        if (!wdata) {
                                rc = -ENOMEM;
+                               for (i = 0; i < nr_pages; i++)
+                                       put_page(pagevec[i]);
+                               kvfree(pagevec);
                                add_credits_and_wake_if(server, credits, 0);
                                break;
                        }
index 7cf96e5..9bde08d 100644 (file)
@@ -368,8 +368,10 @@ cifs_get_file_info_unix(struct file *filp)
 
        if (cfile->symlink_target) {
                fattr.cf_symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
-               if (!fattr.cf_symlink_target)
-                       return -ENOMEM;
+               if (!fattr.cf_symlink_target) {
+                       rc = -ENOMEM;
+                       goto cifs_gfiunix_out;
+               }
        }
 
        rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->fid.netfid, &find_data);
index 0435d1d..92e4278 100644 (file)
@@ -496,6 +496,7 @@ out:
                cifs_put_tcp_session(chan->server, 0);
        }
 
+       free_xid(xid);
        return rc;
 }
 
index a6640e6..68e08c8 100644 (file)
@@ -655,6 +655,7 @@ int
 smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
           struct cifs_sb_info *cifs_sb)
 {
+       drop_cached_dir_by_name(xid, tcon, name, cifs_sb);
        return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
                                CREATE_NOT_FILE, ACL_NO_MODE,
                                NULL, SMB2_OP_RMDIR, NULL, NULL, NULL);
@@ -698,6 +699,7 @@ smb2_rename_path(const unsigned int xid, struct cifs_tcon *tcon,
 {
        struct cifsFileInfo *cfile;
 
+       drop_cached_dir_by_name(xid, tcon, from_name, cifs_sb);
        cifs_get_writable_path(tcon, from_name, FIND_WR_WITH_DELETE, &cfile);
 
        return smb2_set_path_attr(xid, tcon, from_name, to_name,
index 17b2515..4f53fa0 100644 (file)
@@ -530,6 +530,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
        p = buf;
 
        spin_lock(&ses->iface_lock);
+       ses->iface_count = 0;
        /*
         * Go through iface_list and do kref_put to remove
         * any unused ifaces. ifaces in use will be removed
@@ -651,9 +652,9 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
                        kref_put(&iface->refcount, release_iface);
                } else
                        list_add_tail(&info->iface_head, &ses->iface_list);
-               spin_unlock(&ses->iface_lock);
 
                ses->iface_count++;
+               spin_unlock(&ses->iface_lock);
                ses->iface_last_update = jiffies;
 next_iface:
                nb_iface++;
index a238450..a569574 100644 (file)
@@ -1341,14 +1341,13 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
 static void
 SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data)
 {
-       int i;
+       struct kvec *iov = sess_data->iov;
 
-       /* zero the session data before freeing, as it might contain sensitive info (keys, etc) */
-       for (i = 0; i < 2; i++)
-               if (sess_data->iov[i].iov_base)
-                       memzero_explicit(sess_data->iov[i].iov_base, sess_data->iov[i].iov_len);
+       /* iov[1] is already freed by caller */
+       if (sess_data->buf0_type != CIFS_NO_BUFFER && iov[0].iov_base)
+               memzero_explicit(iov[0].iov_base, iov[0].iov_len);
 
-       free_rsp_buf(sess_data->buf0_type, sess_data->iov[0].iov_base);
+       free_rsp_buf(sess_data->buf0_type, iov[0].iov_base);
        sess_data->buf0_type = CIFS_NO_BUFFER;
 }
 
@@ -1531,7 +1530,7 @@ SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
                                          &blob_length, ses, server,
                                          sess_data->nls_cp);
        if (rc)
-               goto out_err;
+               goto out;
 
        if (use_spnego) {
                /* BB eventually need to add this */
@@ -1578,7 +1577,7 @@ SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
        }
 
 out:
-       memzero_explicit(ntlmssp_blob, blob_length);
+       kfree_sensitive(ntlmssp_blob);
        SMB2_sess_free_buffer(sess_data);
        if (!rc) {
                sess_data->result = 0;
@@ -1662,7 +1661,7 @@ SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
        }
 #endif
 out:
-       memzero_explicit(ntlmssp_blob, blob_length);
+       kfree_sensitive(ntlmssp_blob);
        SMB2_sess_free_buffer(sess_data);
        kfree_sensitive(ses->ntlmssp);
        ses->ntlmssp = NULL;
index 1cca09a..2a24b1f 100644 (file)
@@ -205,14 +205,19 @@ static int allocate_filesystem_keyring(struct super_block *sb)
 }
 
 /*
- * This is called at unmount time to release all encryption keys that have been
- * added to the filesystem, along with the keyring that contains them.
+ * Release all encryption keys that have been added to the filesystem, along
+ * with the keyring that contains them.
  *
- * Note that besides clearing and freeing memory, this might need to evict keys
- * from the keyslots of an inline crypto engine.  Therefore, this must be called
- * while the filesystem's underlying block device(s) are still available.
+ * This is called at unmount time.  The filesystem's underlying block device(s)
+ * are still available at this time; this is important because after user file
+ * accesses have been allowed, this function may need to evict keys from the
+ * keyslots of an inline crypto engine, which requires the block device(s).
+ *
+ * This is also called when the super_block is being freed.  This is needed to
+ * avoid a memory leak if mounting fails after the "test_dummy_encryption"
+ * option was processed, as in that case the unmount-time call isn't made.
  */
-void fscrypt_sb_delete(struct super_block *sb)
+void fscrypt_destroy_keyring(struct super_block *sb)
 {
        struct fscrypt_keyring *keyring = sb->s_master_keys;
        size_t i;
index a0ef63c..9e4f478 100644 (file)
@@ -651,22 +651,6 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
        if (err)
                return err;
 
-       /*
-        * Ensure that the available space hasn't shrunk below the safe level
-        */
-       status = check_var_size(attributes, *size + ucs2_strsize(name, 1024));
-       if (status != EFI_SUCCESS) {
-               if (status != EFI_UNSUPPORTED) {
-                       err = efi_status_to_err(status);
-                       goto out;
-               }
-
-               if (*size > 65536) {
-                       err = -ENOSPC;
-                       goto out;
-               }
-       }
-
        status = efivar_set_variable_locked(name, vendor, attributes, *size,
                                            data, false);
        if (status != EFI_SUCCESS) {
index 998cd26..fe05bc5 100644 (file)
@@ -590,14 +590,17 @@ struct erofs_fscache *erofs_domain_register_cookie(struct super_block *sb,
        struct super_block *psb = erofs_pseudo_mnt->mnt_sb;
 
        mutex_lock(&erofs_domain_cookies_lock);
+       spin_lock(&psb->s_inode_list_lock);
        list_for_each_entry(inode, &psb->s_inodes, i_sb_list) {
                ctx = inode->i_private;
                if (!ctx || ctx->domain != domain || strcmp(ctx->name, name))
                        continue;
                igrab(inode);
+               spin_unlock(&psb->s_inode_list_lock);
                mutex_unlock(&erofs_domain_cookies_lock);
                return ctx;
        }
+       spin_unlock(&psb->s_inode_list_lock);
        ctx = erofs_fscache_domain_init_cookie(sb, name, need_inode);
        mutex_unlock(&erofs_domain_cookies_lock);
        return ctx;
index 559380a..c7f24fc 100644 (file)
@@ -813,15 +813,14 @@ retry:
        ++spiltted;
        if (fe->pcl->pageofs_out != (map->m_la & ~PAGE_MASK))
                fe->pcl->multibases = true;
-
-       if ((map->m_flags & EROFS_MAP_FULL_MAPPED) &&
-           !(map->m_flags & EROFS_MAP_PARTIAL_REF) &&
-           fe->pcl->length == map->m_llen)
-               fe->pcl->partial = false;
        if (fe->pcl->length < offset + end - map->m_la) {
                fe->pcl->length = offset + end - map->m_la;
                fe->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
        }
+       if ((map->m_flags & EROFS_MAP_FULL_MAPPED) &&
+           !(map->m_flags & EROFS_MAP_PARTIAL_REF) &&
+           fe->pcl->length == map->m_llen)
+               fe->pcl->partial = false;
 next_part:
        /* shorten the remaining extent to update progress */
        map->m_llen = offset + cur - map->m_la;
@@ -888,15 +887,13 @@ static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
 
        if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK)) {
                unsigned int pgnr;
-               struct page *oldpage;
 
                pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT;
                DBG_BUGON(pgnr >= be->nr_pages);
-               oldpage = be->decompressed_pages[pgnr];
-               be->decompressed_pages[pgnr] = bvec->page;
-
-               if (!oldpage)
+               if (!be->decompressed_pages[pgnr]) {
+                       be->decompressed_pages[pgnr] = bvec->page;
                        return;
+               }
        }
 
        /* (cold path) one pcluster is requested multiple times */
index e7f04c4..d98c952 100644 (file)
@@ -126,10 +126,10 @@ static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
 }
 
 /*
- * bit 31: I/O error occurred on this page
- * bit 0 - 30: remaining parts to complete this page
+ * bit 30: I/O error occurred on this page
+ * bit 0 - 29: remaining parts to complete this page
  */
-#define Z_EROFS_PAGE_EIO                       (1 << 31)
+#define Z_EROFS_PAGE_EIO                       (1 << 30)
 
 static inline void z_erofs_onlinepage_init(struct page *page)
 {
index 44c27ef..0bb6692 100644 (file)
@@ -57,8 +57,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
 
        pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
                    vi->xattr_isize, 8);
-       kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos),
-                                  EROFS_KMAP_ATOMIC);
+       kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos), EROFS_KMAP);
        if (IS_ERR(kaddr)) {
                err = PTR_ERR(kaddr);
                goto out_unlock;
@@ -73,7 +72,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
                vi->z_advise = Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
                vi->z_fragmentoff = le64_to_cpu(*(__le64 *)h) ^ (1ULL << 63);
                vi->z_tailextent_headlcn = 0;
-               goto unmap_done;
+               goto done;
        }
        vi->z_advise = le16_to_cpu(h->h_advise);
        vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
@@ -85,7 +84,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
                erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel",
                          headnr + 1, vi->z_algorithmtype[headnr], vi->nid);
                err = -EOPNOTSUPP;
-               goto unmap_done;
+               goto out_put_metabuf;
        }
 
        vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7);
@@ -95,7 +94,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
                erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu",
                          vi->nid);
                err = -EFSCORRUPTED;
-               goto unmap_done;
+               goto out_put_metabuf;
        }
        if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION &&
            !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
@@ -103,12 +102,8 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
                erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu",
                          vi->nid);
                err = -EFSCORRUPTED;
-               goto unmap_done;
+               goto out_put_metabuf;
        }
-unmap_done:
-       erofs_put_metabuf(&buf);
-       if (err)
-               goto out_unlock;
 
        if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) {
                struct erofs_map_blocks map = {
@@ -127,7 +122,7 @@ unmap_done:
                        err = -EFSCORRUPTED;
                }
                if (err < 0)
-                       goto out_unlock;
+                       goto out_put_metabuf;
        }
 
        if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER &&
@@ -141,11 +136,14 @@ unmap_done:
                                            EROFS_GET_BLOCKS_FINDTAIL);
                erofs_put_metabuf(&map.buf);
                if (err < 0)
-                       goto out_unlock;
+                       goto out_put_metabuf;
        }
+done:
        /* paired with smp_mb() at the beginning of the function */
        smp_mb();
        set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
+out_put_metabuf:
+       erofs_put_metabuf(&buf);
 out_unlock:
        clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags);
        return err;
index 349a5da..a0b1f03 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1012,7 +1012,6 @@ static int exec_mmap(struct mm_struct *mm)
        active_mm = tsk->active_mm;
        tsk->active_mm = mm;
        tsk->mm = mm;
-       lru_gen_add_mm(mm);
        /*
         * This prevents preemption while active_mm is being loaded and
         * it and mm are being updated, which could cause problems for
@@ -1025,6 +1024,7 @@ static int exec_mmap(struct mm_struct *mm)
        activate_mm(active_mm, mm);
        if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
                local_irq_enable();
+       lru_gen_add_mm(mm);
        task_unlock(tsk);
        lru_gen_use_mm(mm);
        if (old_mm) {
@@ -1197,11 +1197,11 @@ static int unshare_sighand(struct task_struct *me)
                        return -ENOMEM;
 
                refcount_set(&newsighand->count, 1);
-               memcpy(newsighand->action, oldsighand->action,
-                      sizeof(newsighand->action));
 
                write_lock_irq(&tasklist_lock);
                spin_lock(&oldsighand->siglock);
+               memcpy(newsighand->action, oldsighand->action,
+                      sizeof(newsighand->action));
                rcu_assign_pointer(me->sighand, newsighand);
                spin_unlock(&oldsighand->siglock);
                write_unlock_irq(&tasklist_lock);
index 989365b..7950904 100644 (file)
@@ -1741,10 +1741,6 @@ static const struct fs_parameter_spec ext4_param_specs[] = {
 
 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
 
-static const char deprecated_msg[] =
-       "Mount option \"%s\" will be removed by %s\n"
-       "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
-
 #define MOPT_SET       0x0001
 #define MOPT_CLEAR     0x0002
 #define MOPT_NOSUPPORT 0x0004
index 6a29bcf..dc74a94 100644 (file)
@@ -1458,12 +1458,14 @@ static __net_init int nfsd_init_net(struct net *net)
                goto out_drc_error;
        retval = nfsd_reply_cache_init(nn);
        if (retval)
-               goto out_drc_error;
+               goto out_cache_error;
        get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key));
        seqlock_init(&nn->writeverf_lock);
 
        return 0;
 
+out_cache_error:
+       nfsd4_leases_net_shutdown(nn);
 out_drc_error:
        nfsd_idmap_shutdown(net);
 out_idmap_error:
index d734342..8c52b6c 100644 (file)
@@ -392,8 +392,8 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
 skip_pseudoflavor_check:
        /* Finally, check access permissions. */
        error = nfsd_permission(rqstp, exp, dentry, access);
-       trace_nfsd_fh_verify_err(rqstp, fhp, type, access, error);
 out:
+       trace_nfsd_fh_verify_err(rqstp, fhp, type, access, error);
        if (error == nfserr_stale)
                nfsd_stats_fh_stale_inc(exp);
        return error;
index 961d1cf..05f3298 100644 (file)
@@ -232,6 +232,7 @@ static int ocfs2_mknod(struct user_namespace *mnt_userns,
        handle_t *handle = NULL;
        struct ocfs2_super *osb;
        struct ocfs2_dinode *dirfe;
+       struct ocfs2_dinode *fe = NULL;
        struct buffer_head *new_fe_bh = NULL;
        struct inode *inode = NULL;
        struct ocfs2_alloc_context *inode_ac = NULL;
@@ -382,6 +383,7 @@ static int ocfs2_mknod(struct user_namespace *mnt_userns,
                goto leave;
        }
 
+       fe = (struct ocfs2_dinode *) new_fe_bh->b_data;
        if (S_ISDIR(mode)) {
                status = ocfs2_fill_new_dir(osb, handle, dir, inode,
                                            new_fe_bh, data_ac, meta_ac);
@@ -454,8 +456,11 @@ roll_back:
 leave:
        if (status < 0 && did_quota_inode)
                dquot_free_inode(inode);
-       if (handle)
+       if (handle) {
+               if (status < 0 && fe)
+                       ocfs2_set_links_count(fe, 0);
                ocfs2_commit_trans(osb, handle);
+       }
 
        ocfs2_inode_unlock(dir, 1);
        if (did_block_signals)
@@ -632,18 +637,9 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
                return status;
        }
 
-       status = __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
+       return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
                                    parent_fe_bh, handle, inode_ac,
                                    fe_blkno, suballoc_loc, suballoc_bit);
-       if (status < 0) {
-               u64 bg_blkno = ocfs2_which_suballoc_group(fe_blkno, suballoc_bit);
-               int tmp = ocfs2_free_suballoc_bits(handle, inode_ac->ac_inode,
-                               inode_ac->ac_bh, suballoc_bit, bg_blkno, 1);
-               if (tmp)
-                       mlog_errno(tmp);
-       }
-
-       return status;
 }
 
 static int ocfs2_mkdir(struct user_namespace *mnt_userns,
@@ -2028,8 +2024,11 @@ bail:
                                        ocfs2_clusters_to_bytes(osb->sb, 1));
        if (status < 0 && did_quota_inode)
                dquot_free_inode(inode);
-       if (handle)
+       if (handle) {
+               if (status < 0 && fe)
+                       ocfs2_set_links_count(fe, 0);
                ocfs2_commit_trans(osb, handle);
+       }
 
        ocfs2_inode_unlock(dir, 1);
        if (did_block_signals)
index 8b4f307..8a74cdc 100644 (file)
@@ -902,7 +902,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
                goto out_put_mm;
 
        hold_task_mempolicy(priv);
-       vma = mas_find(&mas, 0);
+       vma = mas_find(&mas, ULONG_MAX);
 
        if (unlikely(!vma))
                goto empty_set;
index e565109..8ba8c4c 100644 (file)
@@ -506,8 +506,9 @@ static int squashfs_readahead_fragment(struct page **page,
                squashfs_i(inode)->fragment_size);
        struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
        unsigned int n, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
+       int error = buffer->error;
 
-       if (buffer->error)
+       if (error)
                goto out;
 
        expected += squashfs_i(inode)->fragment_offset;
@@ -529,7 +530,7 @@ static int squashfs_readahead_fragment(struct page **page,
 
 out:
        squashfs_cache_put(buffer);
-       return buffer->error;
+       return error;
 }
 
 static void squashfs_readahead(struct readahead_control *ractl)
@@ -557,6 +558,13 @@ static void squashfs_readahead(struct readahead_control *ractl)
                int res, bsize;
                u64 block = 0;
                unsigned int expected;
+               struct page *last_page;
+
+               expected = start >> msblk->block_log == file_end ?
+                          (i_size_read(inode) & (msblk->block_size - 1)) :
+                           msblk->block_size;
+
+               max_pages = (expected + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
                nr_pages = __readahead_batch(ractl, pages, max_pages);
                if (!nr_pages)
@@ -566,13 +574,10 @@ static void squashfs_readahead(struct readahead_control *ractl)
                        goto skip_pages;
 
                index = pages[0]->index >> shift;
+
                if ((pages[nr_pages - 1]->index >> shift) != index)
                        goto skip_pages;
 
-               expected = index == file_end ?
-                          (i_size_read(inode) & (msblk->block_size - 1)) :
-                           msblk->block_size;
-
                if (index == file_end && squashfs_i(inode)->fragment_block !=
                                                SQUASHFS_INVALID_BLK) {
                        res = squashfs_readahead_fragment(pages, nr_pages,
@@ -593,15 +598,15 @@ static void squashfs_readahead(struct readahead_control *ractl)
 
                res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
 
-               squashfs_page_actor_free(actor);
+               last_page = squashfs_page_actor_free(actor);
 
                if (res == expected) {
                        int bytes;
 
                        /* Last page (if present) may have trailing bytes not filled */
                        bytes = res % PAGE_SIZE;
-                       if (pages[nr_pages - 1]->index == file_end && bytes)
-                               memzero_page(pages[nr_pages - 1], bytes,
+                       if (index == file_end && bytes && last_page)
+                               memzero_page(last_page, bytes,
                                             PAGE_SIZE - bytes);
 
                        for (i = 0; i < nr_pages; i++) {
index 54b93bf..81af6c4 100644 (file)
@@ -71,11 +71,13 @@ static void *handle_next_page(struct squashfs_page_actor *actor)
                        (actor->next_index != actor->page[actor->next_page]->index)) {
                actor->next_index++;
                actor->returned_pages++;
+               actor->last_page = NULL;
                return actor->alloc_buffer ? actor->tmp_buffer : ERR_PTR(-ENOMEM);
        }
 
        actor->next_index++;
        actor->returned_pages++;
+       actor->last_page = actor->page[actor->next_page];
        return actor->pageaddr = kmap_local_page(actor->page[actor->next_page++]);
 }
 
@@ -125,6 +127,7 @@ struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_
        actor->returned_pages = 0;
        actor->next_index = page[0]->index & ~((1 << (msblk->block_log - PAGE_SHIFT)) - 1);
        actor->pageaddr = NULL;
+       actor->last_page = NULL;
        actor->alloc_buffer = msblk->decompressor->alloc_buffer;
        actor->squashfs_first_page = direct_first_page;
        actor->squashfs_next_page = direct_next_page;
index 95ffbb5..97d4983 100644 (file)
@@ -16,6 +16,7 @@ struct squashfs_page_actor {
        void    *(*squashfs_first_page)(struct squashfs_page_actor *);
        void    *(*squashfs_next_page)(struct squashfs_page_actor *);
        void    (*squashfs_finish_page)(struct squashfs_page_actor *);
+       struct page *last_page;
        int     pages;
        int     length;
        int     next_page;
@@ -29,10 +30,13 @@ extern struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
 extern struct squashfs_page_actor *squashfs_page_actor_init_special(
                                struct squashfs_sb_info *msblk,
                                struct page **page, int pages, int length);
-static inline void squashfs_page_actor_free(struct squashfs_page_actor *actor)
+static inline struct page *squashfs_page_actor_free(struct squashfs_page_actor *actor)
 {
+       struct page *last_page = actor->last_page;
+
        kfree(actor->tmp_buffer);
        kfree(actor);
+       return last_page;
 }
 static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
 {
index 6a82660..8d39e4f 100644 (file)
@@ -291,6 +291,7 @@ static void __put_super(struct super_block *s)
                WARN_ON(s->s_inode_lru.node);
                WARN_ON(!list_empty(&s->s_mounts));
                security_sb_free(s);
+               fscrypt_destroy_keyring(s);
                put_user_ns(s->s_user_ns);
                kfree(s->s_subtype);
                call_rcu(&s->rcu, destroy_super_rcu);
@@ -479,7 +480,7 @@ void generic_shutdown_super(struct super_block *sb)
                evict_inodes(sb);
                /* only nonzero refcount inodes can have marks */
                fsnotify_sb_delete(sb);
-               fscrypt_sb_delete(sb);
+               fscrypt_destroy_keyring(sb);
                security_sb_delete(sb);
 
                if (sb->s_dio_done_wq) {
index 34fb343..292a5c4 100644 (file)
@@ -71,7 +71,7 @@ int ghes_register_vendor_record_notifier(struct notifier_block *nb);
 void ghes_unregister_vendor_record_notifier(struct notifier_block *nb);
 #endif
 
-int ghes_estatus_pool_init(int num_ghes);
+int ghes_estatus_pool_init(unsigned int num_ghes);
 
 /* From drivers/edac/ghes_edac.c */
 
index c15de16..d06ada2 100644 (file)
 #define PATCHABLE_DISCARDS     *(__patchable_function_entries)
 #endif
 
+#ifndef CONFIG_ARCH_SUPPORTS_CFI_CLANG
+/*
+ * Simply points to ftrace_stub, but with the proper protocol.
+ * Defined by the linker script in linux/vmlinux.lds.h
+ */
+#define        FTRACE_STUB_HACK        ftrace_stub_graph = ftrace_stub;
+#else
+#define FTRACE_STUB_HACK
+#endif
+
 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
 /*
  * The ftrace call sites are logged to a section whose name depends on the
  * FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header
  * dependencies for FTRACE_CALLSITE_SECTION's definition.
  *
- * Need to also make ftrace_stub_graph point to ftrace_stub
- * so that the same stub location may have different protocols
- * and not mess up with C verifiers.
- *
  * ftrace_ops_list_func will be defined as arch_ftrace_ops_list_func
  * as some archs will have a different prototype for that function
  * but ftrace_ops_list_func() will have a single prototype.
                        KEEP(*(__mcount_loc))                   \
                        KEEP_PATCHABLE                          \
                        __stop_mcount_loc = .;                  \
-                       ftrace_stub_graph = ftrace_stub;        \
+                       FTRACE_STUB_HACK                        \
                        ftrace_ops_list_func = arch_ftrace_ops_list_func;
 #else
 # ifdef CONFIG_FUNCTION_TRACER
-#  define MCOUNT_REC() ftrace_stub_graph = ftrace_stub;        \
+#  define MCOUNT_REC() FTRACE_STUB_HACK                        \
                        ftrace_ops_list_func = arch_ftrace_ops_list_func;
 # else
 #  define MCOUNT_REC()
index 599855c..2ae4fd6 100644 (file)
 
 #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
 
+/**
+ * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining
+ *
+ * Setting this flag on a scheduler fence prevents pipelining of jobs depending
+ * on this fence. In other words we always insert a full CPU round trip before
+ * dependen jobs are pushed to the hw queue.
+ */
+#define DRM_SCHED_FENCE_DONT_PIPELINE  DMA_FENCE_FLAG_USER_BITS
+
 struct drm_gem_object;
 
 struct drm_gpu_scheduler;
index ba18e9b..d6119c5 100644 (file)
@@ -853,7 +853,8 @@ static inline bool blk_mq_add_to_batch(struct request *req,
                                       struct io_comp_batch *iob, int ioerror,
                                       void (*complete)(struct io_comp_batch *))
 {
-       if (!iob || (req->rq_flags & RQF_ELV) || ioerror)
+       if (!iob || (req->rq_flags & RQF_ELV) || ioerror ||
+                       (req->end_io && !blk_rq_is_passthrough(req)))
                return false;
 
        if (!iob->complete)
index 9e7d46d..0566705 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/bpfptr.h>
 #include <linux/btf.h>
 #include <linux/rcupdate_trace.h>
+#include <linux/init.h>
 
 struct bpf_verifier_env;
 struct bpf_verifier_log;
@@ -970,6 +971,8 @@ struct bpf_trampoline *bpf_trampoline_get(u64 key,
                                          struct bpf_attach_target_info *tgt_info);
 void bpf_trampoline_put(struct bpf_trampoline *tr);
 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
+int __init bpf_arch_init_dispatcher_early(void *ip);
+
 #define BPF_DISPATCHER_INIT(_name) {                           \
        .mutex = __MUTEX_INITIALIZER(_name.mutex),              \
        .func = &_name##_func,                                  \
@@ -983,6 +986,13 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func
        },                                                      \
 }
 
+#define BPF_DISPATCHER_INIT_CALL(_name)                                        \
+       static int __init _name##_init(void)                            \
+       {                                                               \
+               return bpf_arch_init_dispatcher_early(_name##_func);    \
+       }                                                               \
+       early_initcall(_name##_init)
+
 #ifdef CONFIG_X86_64
 #define BPF_DISPATCHER_ATTRIBUTES __attribute__((patchable_function_entry(5)))
 #else
@@ -1000,7 +1010,9 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func
        }                                                               \
        EXPORT_SYMBOL(bpf_dispatcher_##name##_func);                    \
        struct bpf_dispatcher bpf_dispatcher_##name =                   \
-               BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
+               BPF_DISPATCHER_INIT(bpf_dispatcher_##name);             \
+       BPF_DISPATCHER_INIT_CALL(bpf_dispatcher_##name);
+
 #define DECLARE_BPF_DISPATCHER(name)                                   \
        unsigned int bpf_dispatcher_##name##_func(                      \
                const void *ctx,                                        \
index f2a9f22..528bd44 100644 (file)
@@ -106,6 +106,7 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
 
 struct cgroup *cgroup_get_from_path(const char *path);
 struct cgroup *cgroup_get_from_fd(int fd);
+struct cgroup *cgroup_v1v2_get_from_fd(int fd);
 
 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
index c41fa60..b637466 100644 (file)
@@ -542,11 +542,10 @@ struct counter_array {
 #define DEFINE_COUNTER_ARRAY_CAPTURE(_name, _length) \
        DEFINE_COUNTER_ARRAY_U64(_name, _length)
 
-#define DEFINE_COUNTER_ARRAY_POLARITY(_name, _enums, _length) \
-       DEFINE_COUNTER_AVAILABLE(_name##_available, _enums); \
+#define DEFINE_COUNTER_ARRAY_POLARITY(_name, _available, _length) \
        struct counter_array _name = { \
                .type = COUNTER_COMP_SIGNAL_POLARITY, \
-               .avail = &(_name##_available), \
+               .avail = &(_available), \
                .length = (_length), \
        }
 
index 50be7cb..b1b5720 100644 (file)
@@ -61,9 +61,9 @@ struct sk_buff;
 
 /* Special struct emulating a Ethernet header */
 struct qca_mgmt_ethhdr {
-       u32 command;            /* command bit 31:0 */
-       u32 seq;                /* seq 63:32 */
-       u32 mdio_data;          /* first 4byte mdio */
+       __le32 command;         /* command bit 31:0 */
+       __le32 seq;             /* seq 63:32 */
+       __le32 mdio_data;               /* first 4byte mdio */
        __be16 hdr;             /* qca hdr */
 } __packed;
 
@@ -73,7 +73,7 @@ enum mdio_cmd {
 };
 
 struct mib_ethhdr {
-       u32 data[3];            /* first 3 mib counter */
+       __le32 data[3];         /* first 3 mib counter */
        __be16 hdr;             /* qca hdr */
 } __packed;
 
index da3974b..80f3c1c 100644 (file)
@@ -1085,9 +1085,6 @@ efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor,
 efi_status_t efivar_set_variable(efi_char16_t *name, efi_guid_t *vendor,
                                 u32 attr, unsigned long data_size, void *data);
 
-efi_status_t check_var_size(u32 attributes, unsigned long size);
-efi_status_t check_var_size_nonblocking(u32 attributes, unsigned long size);
-
 #if IS_ENABLED(CONFIG_EFI_CAPSULE_LOADER)
 extern bool efi_capsule_pending(int *reset_type);
 
index 0aff76b..bcb8658 100644 (file)
@@ -555,7 +555,7 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
 
 #elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) ||        \
        defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || \
-       defined(__arm__) || defined(__aarch64__)
+       defined(__arm__) || defined(__aarch64__) || defined(__mips__)
 
 #define fb_readb __raw_readb
 #define fb_readw __raw_readw
index 4029fe3..18a31b1 100644 (file)
@@ -43,11 +43,24 @@ extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen);
 extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat);
 extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy);
 #else
-#define __underlying_memchr    __builtin_memchr
-#define __underlying_memcmp    __builtin_memcmp
+
+#if defined(__SANITIZE_MEMORY__)
+/*
+ * For KMSAN builds all memcpy/memset/memmove calls should be replaced by the
+ * corresponding __msan_XXX functions.
+ */
+#include <linux/kmsan_string.h>
+#define __underlying_memcpy    __msan_memcpy
+#define __underlying_memmove   __msan_memmove
+#define __underlying_memset    __msan_memset
+#else
 #define __underlying_memcpy    __builtin_memcpy
 #define __underlying_memmove   __builtin_memmove
 #define __underlying_memset    __builtin_memset
+#endif
+
+#define __underlying_memchr    __builtin_memchr
+#define __underlying_memcmp    __builtin_memcmp
 #define __underlying_strcat    __builtin_strcat
 #define __underlying_strcpy    __builtin_strcpy
 #define __underlying_strlen    __builtin_strlen
index cad78b5..4f5f8a6 100644 (file)
@@ -307,7 +307,7 @@ fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy)
 }
 
 /* keyring.c */
-void fscrypt_sb_delete(struct super_block *sb);
+void fscrypt_destroy_keyring(struct super_block *sb);
 int fscrypt_ioctl_add_key(struct file *filp, void __user *arg);
 int fscrypt_add_test_dummy_key(struct super_block *sb,
                               const struct fscrypt_dummy_policy *dummy_policy);
@@ -521,7 +521,7 @@ fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy)
 }
 
 /* keyring.c */
-static inline void fscrypt_sb_delete(struct super_block *sb)
+static inline void fscrypt_destroy_keyring(struct super_block *sb)
 {
 }
 
index a325532..3c9da1f 100644 (file)
@@ -455,7 +455,7 @@ extern void iommu_set_default_translated(bool cmd_line);
 extern bool iommu_default_passthrough(void);
 extern struct iommu_resv_region *
 iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
-                       enum iommu_resv_type type);
+                       enum iommu_resv_type type, gfp_t gfp);
 extern int iommu_get_group_resv_regions(struct iommu_group *group,
                                        struct list_head *head);
 
diff --git a/include/linux/kmsan_string.h b/include/linux/kmsan_string.h
new file mode 100644 (file)
index 0000000..7287da6
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KMSAN string functions API used in other headers.
+ *
+ * Copyright (C) 2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+#ifndef _LINUX_KMSAN_STRING_H
+#define _LINUX_KMSAN_STRING_H
+
+/*
+ * KMSAN overrides the default memcpy/memset/memmove implementations in the
+ * kernel, which requires having __msan_XXX function prototypes in several other
+ * headers. Keep them in one place instead of open-coding.
+ */
+void *__msan_memcpy(void *dst, const void *src, size_t size);
+void *__msan_memset(void *s, int c, size_t n);
+void *__msan_memmove(void *dest, const void *src, size_t len);
+
+#endif /* _LINUX_KMSAN_STRING_H */
index 32f259f..18592bd 100644 (file)
@@ -1240,8 +1240,18 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
 
 /**
- * kvm_gfn_to_pfn_cache_init - prepare a cached kernel mapping and HPA for a
- *                             given guest physical address.
+ * kvm_gpc_init - initialize gfn_to_pfn_cache.
+ *
+ * @gpc:          struct gfn_to_pfn_cache object.
+ *
+ * This sets up a gfn_to_pfn_cache by initializing locks.  Note, the cache must
+ * be zero-allocated (or zeroed by the caller before init).
+ */
+void kvm_gpc_init(struct gfn_to_pfn_cache *gpc);
+
+/**
+ * kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
+ *                    physical address.
  *
  * @kvm:          pointer to kvm instance.
  * @gpc:          struct gfn_to_pfn_cache object.
@@ -1265,9 +1275,9 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
  * kvm_gfn_to_pfn_cache_check() to ensure that the cache is valid before
  * accessing the target page.
  */
-int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
-                             struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
-                             gpa_t gpa, unsigned long len);
+int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
+                    struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
+                    gpa_t gpa, unsigned long len);
 
 /**
  * kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache.
@@ -1324,7 +1334,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
 void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
 
 /**
- * kvm_gfn_to_pfn_cache_destroy - destroy and unlink a gfn_to_pfn_cache.
+ * kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache.
  *
  * @kvm:          pointer to kvm instance.
  * @gpc:          struct gfn_to_pfn_cache object.
@@ -1332,7 +1342,7 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
  * This removes a cache from the @kvm's list to be processed on MMU notifier
  * invocation.
  */
-void kvm_gfn_to_pfn_cache_destroy(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
+void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
 
 void kvm_sigset_activate(struct kvm_vcpu *vcpu);
 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
@@ -1390,6 +1400,8 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
                            struct kvm_enable_cap *cap);
 long kvm_arch_vm_ioctl(struct file *filp,
                       unsigned int ioctl, unsigned long arg);
+long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
+                             unsigned long arg);
 
 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
index a12929b..af2ceb4 100644 (file)
@@ -970,7 +970,7 @@ void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);
 struct mlx5_async_ctx {
        struct mlx5_core_dev *dev;
        atomic_t num_inflight;
-       struct wait_queue_head wait;
+       struct completion inflight_done;
 };
 
 struct mlx5_async_work;
index 711c359..18d942b 100644 (file)
@@ -41,6 +41,7 @@ struct net;
 #define SOCK_NOSPACE           2
 #define SOCK_PASSCRED          3
 #define SOCK_PASSSEC           4
+#define SOCK_SUPPORT_ZC                5
 
 #ifndef ARCH_HAS_SOCKET_TYPES
 /**
index a36edb0..eddf8ee 100644 (file)
@@ -3663,8 +3663,9 @@ static inline bool netif_attr_test_online(unsigned long j,
 static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
                                               unsigned int nr_bits)
 {
-       /* n is a prior cpu */
-       cpu_max_bits_warn(n + 1, nr_bits);
+       /* -1 is a legal arg here. */
+       if (n != -1)
+               cpu_max_bits_warn(n, nr_bits);
 
        if (srcp)
                return find_next_bit(srcp, nr_bits, n + 1);
@@ -3685,8 +3686,9 @@ static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
                                          const unsigned long *src2p,
                                          unsigned int nr_bits)
 {
-       /* n is a prior cpu */
-       cpu_max_bits_warn(n + 1, nr_bits);
+       /* -1 is a legal arg here. */
+       if (n != -1)
+               cpu_max_bits_warn(n, nr_bits);
 
        if (src1p && src2p)
                return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
index 19dfdd7..1d3be1a 100644 (file)
@@ -51,8 +51,8 @@ static inline bool __must_check __must_check_overflow(bool overflow)
        return unlikely(overflow);
 }
 
-/** check_add_overflow() - Calculate addition with overflow checking
- *
+/**
+ * check_add_overflow() - Calculate addition with overflow checking
  * @a: first addend
  * @b: second addend
  * @d: pointer to store sum
@@ -66,8 +66,8 @@ static inline bool __must_check __must_check_overflow(bool overflow)
 #define check_add_overflow(a, b, d)    \
        __must_check_overflow(__builtin_add_overflow(a, b, d))
 
-/** check_sub_overflow() - Calculate subtraction with overflow checking
- *
+/**
+ * check_sub_overflow() - Calculate subtraction with overflow checking
  * @a: minuend; value to subtract from
  * @b: subtrahend; value to subtract from @a
  * @d: pointer to store difference
@@ -81,8 +81,8 @@ static inline bool __must_check __must_check_overflow(bool overflow)
 #define check_sub_overflow(a, b, d)    \
        __must_check_overflow(__builtin_sub_overflow(a, b, d))
 
-/** check_mul_overflow() - Calculate multiplication with overflow checking
- *
+/**
+ * check_mul_overflow() - Calculate multiplication with overflow checking
  * @a: first factor
  * @b: second factor
  * @d: pointer to store product
@@ -96,23 +96,24 @@ static inline bool __must_check __must_check_overflow(bool overflow)
 #define check_mul_overflow(a, b, d)    \
        __must_check_overflow(__builtin_mul_overflow(a, b, d))
 
-/** check_shl_overflow() - Calculate a left-shifted value and check overflow
- *
+/**
+ * check_shl_overflow() - Calculate a left-shifted value and check overflow
  * @a: Value to be shifted
  * @s: How many bits left to shift
  * @d: Pointer to where to store the result
  *
  * Computes *@d = (@a << @s)
  *
- * Returns true if '*d' cannot hold the result or when 'a << s' doesn't
+ * Returns true if '*@d' cannot hold the result or when '@a << @s' doesn't
  * make sense. Example conditions:
- * - 'a << s' causes bits to be lost when stored in *d.
- * - 's' is garbage (e.g. negative) or so large that the result of
- *   'a << s' is guaranteed to be 0.
- * - 'a' is negative.
- * - 'a << s' sets the sign bit, if any, in '*d'.
  *
- * '*d' will hold the results of the attempted shift, but is not
+ * - '@a << @s' causes bits to be lost when stored in *@d.
+ * - '@s' is garbage (e.g. negative) or so large that the result of
+ *   '@a << @s' is guaranteed to be 0.
+ * - '@a' is negative.
+ * - '@a << @s' sets the sign bit, if any, in '*@d'.
+ *
+ * '*@d' will hold the results of the attempted shift, but is not
  * considered "safe for use" if true is returned.
  */
 #define check_shl_overflow(a, s, d) __must_check_overflow(({           \
@@ -129,7 +130,6 @@ static inline bool __must_check __must_check_overflow(bool overflow)
 
 /**
  * size_mul() - Calculate size_t multiplication with saturation at SIZE_MAX
- *
  * @factor1: first factor
  * @factor2: second factor
  *
@@ -149,7 +149,6 @@ static inline size_t __must_check size_mul(size_t factor1, size_t factor2)
 
 /**
  * size_add() - Calculate size_t addition with saturation at SIZE_MAX
- *
  * @addend1: first addend
  * @addend2: second addend
  *
@@ -169,7 +168,6 @@ static inline size_t __must_check size_add(size_t addend1, size_t addend2)
 
 /**
  * size_sub() - Calculate size_t subtraction with saturation at SIZE_MAX
- *
  * @minuend: value to subtract from
  * @subtrahend: value to subtract from @minuend
  *
@@ -192,7 +190,6 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
 
 /**
  * array_size() - Calculate size of 2-dimensional array.
- *
  * @a: dimension one
  * @b: dimension two
  *
@@ -205,7 +202,6 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
 
 /**
  * array3_size() - Calculate size of 3-dimensional array.
- *
  * @a: dimension one
  * @b: dimension two
  * @c: dimension three
@@ -220,7 +216,6 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
 /**
  * flex_array_size() - Calculate size of a flexible array member
  *                     within an enclosing structure.
- *
  * @p: Pointer to the structure.
  * @member: Name of the flexible array member.
  * @count: Number of elements in the array.
@@ -237,7 +232,6 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
 
 /**
  * struct_size() - Calculate size of structure with trailing flexible array.
- *
  * @p: Pointer to the structure.
  * @member: Name of the array member.
  * @count: Number of elements in the array.
index 853f64b..0031f7b 100644 (file)
@@ -756,11 +756,14 @@ struct perf_event {
        struct fasync_struct            *fasync;
 
        /* delayed work for NMIs and such */
-       int                             pending_wakeup;
-       int                             pending_kill;
-       int                             pending_disable;
+       unsigned int                    pending_wakeup;
+       unsigned int                    pending_kill;
+       unsigned int                    pending_disable;
+       unsigned int                    pending_sigtrap;
        unsigned long                   pending_addr;   /* SIGTRAP */
-       struct irq_work                 pending;
+       struct irq_work                 pending_irq;
+       struct callback_head            pending_task;
+       unsigned int                    pending_work;
 
        atomic_t                        event_limit;
 
@@ -877,6 +880,14 @@ struct perf_event_context {
 #endif
        void                            *task_ctx_data; /* pmu specific data */
        struct rcu_head                 rcu_head;
+
+       /*
+        * Sum (event->pending_sigtrap + event->pending_work)
+        *
+        * The SIGTRAP is targeted at ctx->task, as such it won't do changing
+        * that until the signal is delivered.
+        */
+       local_t                         nr_pending;
 };
 
 /*
index 664dd40..3f01ac8 100644 (file)
@@ -122,6 +122,7 @@ enum phylink_op_type {
  *     (See commit 7cceb599d15d ("net: phylink: avoid mac_config calls")
  * @poll_fixed_state: if true, starts link_poll,
  *                   if MAC link is at %MLO_AN_FIXED mode.
+ * @mac_managed_pm: if true, indicate the MAC driver is responsible for PHY PM.
  * @ovr_an_inband: if true, override PCS to MLO_AN_INBAND
  * @get_fixed_state: callback to execute to determine the fixed link state,
  *                  if MAC link is at %MLO_AN_FIXED mode.
@@ -134,6 +135,7 @@ struct phylink_config {
        enum phylink_op_type type;
        bool legacy_pre_march2020;
        bool poll_fixed_state;
+       bool mac_managed_pm;
        bool ovr_an_inband;
        void (*get_fixed_state)(struct phylink_config *config,
                                struct phylink_link_state *state);
index 2ba044d..8e984d7 100644 (file)
@@ -225,7 +225,7 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
 /**
  * struct spi_controller_mem_ops - SPI memory operations
  * @adjust_op_size: shrink the data xfer of an operation to match controller's
- *                 limitations (can be alignment of max RX/TX size
+ *                 limitations (can be alignment or max RX/TX size
  *                 limitations)
  * @supports_op: check if an operation is supported by the controller
  * @exec_op: execute a SPI memory operation
index f07e699..9df0b9a 100644 (file)
@@ -146,9 +146,9 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma)
 static inline bool vma_can_userfault(struct vm_area_struct *vma,
                                     unsigned long vm_flags)
 {
-       if (vm_flags & VM_UFFD_MINOR)
-               return is_vm_hugetlb_page(vma) || vma_is_shmem(vma);
-
+       if ((vm_flags & VM_UFFD_MINOR) &&
+           (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma)))
+               return false;
 #ifndef CONFIG_PTE_MARKER_UFFD_WP
        /*
         * If user requested uffd-wp but not enabled pte markers for
index 2b1737c..bf7613b 100644 (file)
@@ -10,6 +10,7 @@
 #include <uapi/linux/utsname.h>
 
 enum uts_proc {
+       UTS_PROC_ARCH,
        UTS_PROC_OSTYPE,
        UTS_PROC_OSRELEASE,
        UTS_PROC_VERSION,
index 9f47d6a..0b58f8b 100644 (file)
@@ -35,6 +35,7 @@ enum ir_kbd_get_key_fn {
        IR_KBD_GET_KEY_PIXELVIEW,
        IR_KBD_GET_KEY_HAUP,
        IR_KBD_GET_KEY_KNC1,
+       IR_KBD_GET_KEY_GENIATECH,
        IR_KBD_GET_KEY_FUSIONHDTV,
        IR_KBD_GET_KEY_HAUP_XVR,
        IR_KBD_GET_KEY_AVERMEDIA_CARDBUS,
index a10b305..86716ee 100644 (file)
@@ -192,21 +192,6 @@ struct usb_device;
 #define MEDIA_DEV_NOTIFY_POST_LINK_CH  1
 
 /**
- * media_entity_enum_init - Initialise an entity enumeration
- *
- * @ent_enum: Entity enumeration to be initialised
- * @mdev: The related media device
- *
- * Return: zero on success or a negative error code.
- */
-static inline __must_check int media_entity_enum_init(
-       struct media_entity_enum *ent_enum, struct media_device *mdev)
-{
-       return __media_entity_enum_init(ent_enum,
-                                       mdev->entity_internal_idx_max + 1);
-}
-
-/**
  * media_device_init() - Initializes a media device element
  *
  * @mdev:      pointer to struct &media_device
index f16ffe7..28c9de8 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/fwnode.h>
 #include <linux/list.h>
 #include <linux/media.h>
+#include <linux/minmax.h>
 #include <linux/types.h>
 
 /* Enums used internally at the media controller to represent graphs */
@@ -99,12 +100,34 @@ struct media_graph {
 /**
  * struct media_pipeline - Media pipeline related information
  *
- * @streaming_count:   Streaming start count - streaming stop count
- * @graph:             Media graph walk during pipeline start / stop
+ * @allocated:         Media pipeline allocated and freed by the framework
+ * @mdev:              The media device the pipeline is part of
+ * @pads:              List of media_pipeline_pad
+ * @start_count:       Media pipeline start - stop count
  */
 struct media_pipeline {
-       int streaming_count;
-       struct media_graph graph;
+       bool allocated;
+       struct media_device *mdev;
+       struct list_head pads;
+       int start_count;
+};
+
+/**
+ * struct media_pipeline_pad - A pad part of a media pipeline
+ *
+ * @list:              Entry in the media_pad pads list
+ * @pipe:              The media_pipeline that the pad is part of
+ * @pad:               The media pad
+ *
+ * This structure associate a pad with a media pipeline. Instances of
+ * media_pipeline_pad are created by media_pipeline_start() when it builds the
+ * pipeline, and stored in the &media_pad.pads list. media_pipeline_stop()
+ * removes the entries from the list and deletes them.
+ */
+struct media_pipeline_pad {
+       struct list_head list;
+       struct media_pipeline *pipe;
+       struct media_pad *pad;
 };
 
 /**
@@ -186,6 +209,8 @@ enum media_pad_signal_type {
  * @flags:     Pad flags, as defined in
  *             :ref:`include/uapi/linux/media.h <media_header>`
  *             (seek for ``MEDIA_PAD_FL_*``)
+ * @pipe:      Pipeline this pad belongs to. Use media_entity_pipeline() to
+ *             access this field.
  */
 struct media_pad {
        struct media_gobj graph_obj;    /* must be first field in struct */
@@ -193,6 +218,12 @@ struct media_pad {
        u16 index;
        enum media_pad_signal_type sig_type;
        unsigned long flags;
+
+       /*
+        * The fields below are private, and should only be accessed via
+        * appropriate functions.
+        */
+       struct media_pipeline *pipe;
 };
 
 /**
@@ -206,6 +237,14 @@ struct media_pad {
  * @link_validate:     Return whether a link is valid from the entity point of
  *                     view. The media_pipeline_start() function
  *                     validates all links by calling this operation. Optional.
+ * @has_pad_interdep:  Return whether a two pads inside the entity are
+ *                     interdependent. If two pads are interdependent they are
+ *                     part of the same pipeline and enabling one of the pads
+ *                     means that the other pad will become "locked" and
+ *                     doesn't allow configuration changes. pad0 and pad1 are
+ *                     guaranteed to not both be sinks or sources.
+ *                     Optional: If the operation isn't implemented all pads
+ *                     will be considered as interdependent.
  *
  * .. note::
  *
@@ -219,6 +258,8 @@ struct media_entity_operations {
                          const struct media_pad *local,
                          const struct media_pad *remote, u32 flags);
        int (*link_validate)(struct media_link *link);
+       bool (*has_pad_interdep)(struct media_entity *entity, unsigned int pad0,
+                                unsigned int pad1);
 };
 
 /**
@@ -269,7 +310,6 @@ enum media_entity_type {
  * @links:     List of data links.
  * @ops:       Entity operations.
  * @use_count: Use count for the entity.
- * @pipe:      Pipeline this entity belongs to.
  * @info:      Union with devnode information.  Kept just for backward
  *             compatibility.
  * @info.dev:  Contains device major and minor info.
@@ -305,8 +345,6 @@ struct media_entity {
 
        int use_count;
 
-       struct media_pipeline *pipe;
-
        union {
                struct {
                        u32 major;
@@ -316,6 +354,18 @@ struct media_entity {
 };
 
 /**
+ * media_entity_for_each_pad - Iterate on all pads in an entity
+ * @entity: The entity the pads belong to
+ * @iter: The iterator pad
+ *
+ * Iterate on all pads in a media entity.
+ */
+#define media_entity_for_each_pad(entity, iter)                        \
+       for (iter = (entity)->pads;                             \
+            iter < &(entity)->pads[(entity)->num_pads];        \
+            ++iter)
+
+/**
  * struct media_interface - A media interface graph object.
  *
  * @graph_obj:         embedded graph object
@@ -426,15 +476,15 @@ static inline bool is_media_entity_v4l2_subdev(struct media_entity *entity)
 }
 
 /**
- * __media_entity_enum_init - Initialise an entity enumeration
+ * media_entity_enum_init - Initialise an entity enumeration
  *
  * @ent_enum: Entity enumeration to be initialised
- * @idx_max: Maximum number of entities in the enumeration
+ * @mdev: The related media device
  *
- * Return: Returns zero on success or a negative error code.
+ * Return: zero on success or a negative error code.
  */
-__must_check int __media_entity_enum_init(struct media_entity_enum *ent_enum,
-                                         int idx_max);
+__must_check int media_entity_enum_init(struct media_entity_enum *ent_enum,
+                                       struct media_device *mdev);
 
 /**
  * media_entity_enum_cleanup - Release resources of an entity enumeration
@@ -924,6 +974,18 @@ media_entity_remote_source_pad_unique(const struct media_entity *entity)
 }
 
 /**
+ * media_pad_is_streaming - Test if a pad is part of a streaming pipeline
+ * @pad: The pad
+ *
+ * Return: True if the pad is part of a pipeline started with the
+ * media_pipeline_start() function, false otherwise.
+ */
+static inline bool media_pad_is_streaming(const struct media_pad *pad)
+{
+       return pad->pipe;
+}
+
+/**
  * media_entity_is_streaming - Test if an entity is part of a streaming pipeline
  * @entity: The entity
  *
@@ -932,10 +994,50 @@ media_entity_remote_source_pad_unique(const struct media_entity *entity)
  */
 static inline bool media_entity_is_streaming(const struct media_entity *entity)
 {
-       return entity->pipe;
+       struct media_pad *pad;
+
+       media_entity_for_each_pad(entity, pad) {
+               if (media_pad_is_streaming(pad))
+                       return true;
+       }
+
+       return false;
 }
 
 /**
+ * media_entity_pipeline - Get the media pipeline an entity is part of
+ * @entity: The entity
+ *
+ * DEPRECATED: use media_pad_pipeline() instead.
+ *
+ * This function returns the media pipeline that an entity has been associated
+ * with when constructing the pipeline with media_pipeline_start(). The pointer
+ * remains valid until media_pipeline_stop() is called.
+ *
+ * In general, entities can be part of multiple pipelines, when carrying
+ * multiple streams (either on different pads, or on the same pad using
+ * multiplexed streams). This function is to be used only for entities that
+ * do not support multiple pipelines.
+ *
+ * Return: The media_pipeline the entity is part of, or NULL if the entity is
+ * not part of any pipeline.
+ */
+struct media_pipeline *media_entity_pipeline(struct media_entity *entity);
+
+/**
+ * media_pad_pipeline - Get the media pipeline a pad is part of
+ * @pad: The pad
+ *
+ * This function returns the media pipeline that a pad has been associated
+ * with when constructing the pipeline with media_pipeline_start(). The pointer
+ * remains valid until media_pipeline_stop() is called.
+ *
+ * Return: The media_pipeline the pad is part of, or NULL if the pad is
+ * not part of any pipeline.
+ */
+struct media_pipeline *media_pad_pipeline(struct media_pad *pad);
+
+/**
  * media_entity_get_fwnode_pad - Get pad number from fwnode
  *
  * @entity: The entity
@@ -1013,53 +1115,66 @@ struct media_entity *media_graph_walk_next(struct media_graph *graph);
 
 /**
  * media_pipeline_start - Mark a pipeline as streaming
- * @entity: Starting entity
- * @pipe: Media pipeline to be assigned to all entities in the pipeline.
+ * @pad: Starting pad
+ * @pipe: Media pipeline to be assigned to all pads in the pipeline.
  *
- * Mark all entities connected to a given entity through enabled links, either
+ * Mark all pads connected to a given pad through enabled links, either
  * directly or indirectly, as streaming. The given pipeline object is assigned
- * to every entity in the pipeline and stored in the media_entity pipe field.
+ * to every pad in the pipeline and stored in the media_pad pipe field.
  *
  * Calls to this function can be nested, in which case the same number of
  * media_pipeline_stop() calls will be required to stop streaming. The
  * pipeline pointer must be identical for all nested calls to
  * media_pipeline_start().
  */
-__must_check int media_pipeline_start(struct media_entity *entity,
+__must_check int media_pipeline_start(struct media_pad *pad,
                                      struct media_pipeline *pipe);
 /**
  * __media_pipeline_start - Mark a pipeline as streaming
  *
- * @entity: Starting entity
- * @pipe: Media pipeline to be assigned to all entities in the pipeline.
+ * @pad: Starting pad
+ * @pipe: Media pipeline to be assigned to all pads in the pipeline.
  *
  * ..note:: This is the non-locking version of media_pipeline_start()
  */
-__must_check int __media_pipeline_start(struct media_entity *entity,
+__must_check int __media_pipeline_start(struct media_pad *pad,
                                        struct media_pipeline *pipe);
 
 /**
  * media_pipeline_stop - Mark a pipeline as not streaming
- * @entity: Starting entity
+ * @pad: Starting pad
  *
- * Mark all entities connected to a given entity through enabled links, either
- * directly or indirectly, as not streaming. The media_entity pipe field is
+ * Mark all pads connected to a given pads through enabled links, either
+ * directly or indirectly, as not streaming. The media_pad pipe field is
  * reset to %NULL.
  *
  * If multiple calls to media_pipeline_start() have been made, the same
  * number of calls to this function are required to mark the pipeline as not
  * streaming.
  */
-void media_pipeline_stop(struct media_entity *entity);
+void media_pipeline_stop(struct media_pad *pad);
 
 /**
  * __media_pipeline_stop - Mark a pipeline as not streaming
  *
- * @entity: Starting entity
+ * @pad: Starting pad
  *
  * .. note:: This is the non-locking version of media_pipeline_stop()
  */
-void __media_pipeline_stop(struct media_entity *entity);
+void __media_pipeline_stop(struct media_pad *pad);
+
+/**
+ * media_pipeline_alloc_start - Mark a pipeline as streaming
+ * @pad: Starting pad
+ *
+ * media_pipeline_alloc_start() is similar to media_pipeline_start() but instead
+ * of working on a given pipeline the function will use an existing pipeline if
+ * the pad is already part of a pipeline, or allocate a new pipeline.
+ *
+ * Calls to media_pipeline_alloc_start() must be matched with
+ * media_pipeline_stop().
+ */
+__must_check int media_pipeline_alloc_start(struct media_pad *pad);
 
 /**
  * media_devnode_create() - creates and initializes a device node interface
index 725ff91..1bdaea2 100644 (file)
@@ -175,7 +175,8 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
  *
  * @sd: pointer to &struct v4l2_subdev
  * @client: pointer to struct i2c_client
- * @devname: the name of the device; if NULL, the I²C device's name will be used
+ * @devname: the name of the device; if NULL, the I²C device drivers's name
+ *           will be used
  * @postfix: sub-device specific string to put right after the I²C device name;
  *          may be NULL
  */
index b76a071..e59d9a2 100644 (file)
@@ -121,21 +121,19 @@ struct v4l2_ctrl_ops {
  * struct v4l2_ctrl_type_ops - The control type operations that the driver
  *                            has to provide.
  *
- * @equal: return true if both values are equal.
- * @init: initialize the value.
+ * @equal: return true if all ctrl->elems array elements are equal.
+ * @init: initialize the value for array elements from from_idx to ctrl->elems.
  * @log: log the value.
- * @validate: validate the value. Return 0 on success and a negative value
- *     otherwise.
+ * @validate: validate the value for ctrl->new_elems array elements.
+ *     Return 0 on success and a negative value otherwise.
  */
 struct v4l2_ctrl_type_ops {
-       bool (*equal)(const struct v4l2_ctrl *ctrl, u32 elems,
-                     union v4l2_ctrl_ptr ptr1,
-                     union v4l2_ctrl_ptr ptr2);
-       void (*init)(const struct v4l2_ctrl *ctrl, u32 from_idx, u32 tot_elems,
+       bool (*equal)(const struct v4l2_ctrl *ctrl,
+                     union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2);
+       void (*init)(const struct v4l2_ctrl *ctrl, u32 from_idx,
                     union v4l2_ctrl_ptr ptr);
        void (*log)(const struct v4l2_ctrl *ctrl);
-       int (*validate)(const struct v4l2_ctrl *ctrl, u32 elems,
-                       union v4l2_ctrl_ptr ptr);
+       int (*validate)(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr);
 };
 
 /**
@@ -1543,13 +1541,12 @@ int v4l2_ctrl_new_fwnode_properties(struct v4l2_ctrl_handler *hdl,
  * v4l2_ctrl_type_op_equal - Default v4l2_ctrl_type_ops equal callback.
  *
  * @ctrl: The v4l2_ctrl pointer.
- * @elems: The number of elements to compare.
  * @ptr1: A v4l2 control value.
  * @ptr2: A v4l2 control value.
  *
  * Return: true if values are equal, otherwise false.
  */
-bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, u32 elems,
+bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl,
                             union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2);
 
 /**
@@ -1557,13 +1554,12 @@ bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, u32 elems,
  *
  * @ctrl: The v4l2_ctrl pointer.
  * @from_idx: Starting element index.
- * @elems: The number of elements to initialize.
  * @ptr: The v4l2 control value.
  *
  * Return: void
  */
 void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx,
-                           u32 elems, union v4l2_ctrl_ptr ptr);
+                           union v4l2_ctrl_ptr ptr);
 
 /**
  * v4l2_ctrl_type_op_log - Default v4l2_ctrl_type_ops log callback.
@@ -1578,12 +1574,10 @@ void v4l2_ctrl_type_op_log(const struct v4l2_ctrl *ctrl);
  * v4l2_ctrl_type_op_validate - Default v4l2_ctrl_type_ops validate callback.
  *
  * @ctrl: The v4l2_ctrl pointer.
- * @elems: The number of elements in the control.
  * @ptr: The v4l2 control value.
  *
  * Return: 0 on success, a negative error code on failure.
  */
-int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, u32 elems,
-                              union v4l2_ctrl_ptr ptr);
+int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr);
 
 #endif
index 5cf1ede..e0a1350 100644 (file)
@@ -539,4 +539,106 @@ static inline int video_is_registered(struct video_device *vdev)
        return test_bit(V4L2_FL_REGISTERED, &vdev->flags);
 }
 
+#if defined(CONFIG_MEDIA_CONTROLLER)
+
+/**
+ * video_device_pipeline_start - Mark a pipeline as streaming
+ * @vdev: Starting video device
+ * @pipe: Media pipeline to be assigned to all entities in the pipeline.
+ *
+ * Mark all entities connected to a given video device through enabled links,
+ * either directly or indirectly, as streaming. The given pipeline object is
+ * assigned to every pad in the pipeline and stored in the media_pad pipe
+ * field.
+ *
+ * Calls to this function can be nested, in which case the same number of
+ * video_device_pipeline_stop() calls will be required to stop streaming. The
+ * pipeline pointer must be identical for all nested calls to
+ * video_device_pipeline_start().
+ *
+ * The video device must contain a single pad.
+ *
+ * This is a convenience wrapper around media_pipeline_start().
+ */
+__must_check int video_device_pipeline_start(struct video_device *vdev,
+                                            struct media_pipeline *pipe);
+
+/**
+ * __video_device_pipeline_start - Mark a pipeline as streaming
+ * @vdev: Starting video device
+ * @pipe: Media pipeline to be assigned to all entities in the pipeline.
+ *
+ * ..note:: This is the non-locking version of video_device_pipeline_start()
+ *
+ * The video device must contain a single pad.
+ *
+ * This is a convenience wrapper around __media_pipeline_start().
+ */
+__must_check int __video_device_pipeline_start(struct video_device *vdev,
+                                              struct media_pipeline *pipe);
+
+/**
+ * video_device_pipeline_stop - Mark a pipeline as not streaming
+ * @vdev: Starting video device
+ *
+ * Mark all entities connected to a given video device through enabled links,
+ * either directly or indirectly, as not streaming. The media_pad pipe field
+ * is reset to %NULL.
+ *
+ * If multiple calls to media_pipeline_start() have been made, the same
+ * number of calls to this function are required to mark the pipeline as not
+ * streaming.
+ *
+ * The video device must contain a single pad.
+ *
+ * This is a convenience wrapper around media_pipeline_stop().
+ */
+void video_device_pipeline_stop(struct video_device *vdev);
+
+/**
+ * __video_device_pipeline_stop - Mark a pipeline as not streaming
+ * @vdev: Starting video device
+ *
+ * .. note:: This is the non-locking version of media_pipeline_stop()
+ *
+ * The video device must contain a single pad.
+ *
+ * This is a convenience wrapper around __media_pipeline_stop().
+ */
+void __video_device_pipeline_stop(struct video_device *vdev);
+
+/**
+ * video_device_pipeline_alloc_start - Mark a pipeline as streaming
+ * @vdev: Starting video device
+ *
+ * video_device_pipeline_alloc_start() is similar to video_device_pipeline_start()
+ * but instead of working on a given pipeline the function will use an
+ * existing pipeline if the video device is already part of a pipeline, or
+ * allocate a new pipeline.
+ *
+ * Calls to video_device_pipeline_alloc_start() must be matched with
+ * video_device_pipeline_stop().
+ */
+__must_check int video_device_pipeline_alloc_start(struct video_device *vdev);
+
+/**
+ * video_device_pipeline - Get the media pipeline a video device is part of
+ * @vdev: The video device
+ *
+ * This function returns the media pipeline that a video device has been
+ * associated with when constructing the pipeline with
+ * video_device_pipeline_start(). The pointer remains valid until
+ * video_device_pipeline_stop() is called.
+ *
+ * Return: The media_pipeline the video device is part of, or NULL if the video
+ * device is not part of any pipeline.
+ *
+ * The video device must contain a single pad.
+ *
+ * This is a convenience wrapper around media_entity_pipeline().
+ */
+struct media_pipeline *video_device_pipeline(struct video_device *vdev);
+
+#endif /* CONFIG_MEDIA_CONTROLLER */
+
 #endif /* _V4L2_DEV_H */
index 15e4ab6..394d798 100644 (file)
@@ -45,10 +45,6 @@ struct v4l2_async_subdev;
  */
 struct v4l2_fwnode_endpoint {
        struct fwnode_endpoint base;
-       /*
-        * Fields below this line will be zeroed by
-        * v4l2_fwnode_endpoint_parse()
-        */
        enum v4l2_mbus_type bus_type;
        struct {
                struct v4l2_mbus_config_parallel parallel;
index 9689f38..2f80c9c 100644 (file)
@@ -358,7 +358,11 @@ struct v4l2_mbus_frame_desc_entry {
        } bus;
 };
 
-#define V4L2_FRAME_DESC_ENTRY_MAX      4
+ /*
+  * If this number is too small, it should be dropped altogether and the
+  * API switched to a dynamic number of frame descriptor entries.
+  */
+#define V4L2_FRAME_DESC_ENTRY_MAX      8
 
 /**
  * enum v4l2_mbus_frame_desc_type - media bus frame description type
@@ -1046,6 +1050,8 @@ v4l2_subdev_get_pad_format(struct v4l2_subdev *sd,
                           struct v4l2_subdev_state *state,
                           unsigned int pad)
 {
+       if (WARN_ON(!state))
+               return NULL;
        if (WARN_ON(pad >= sd->entity.num_pads))
                pad = 0;
        return &state->pads[pad].try_fmt;
@@ -1064,6 +1070,8 @@ v4l2_subdev_get_pad_crop(struct v4l2_subdev *sd,
                         struct v4l2_subdev_state *state,
                         unsigned int pad)
 {
+       if (WARN_ON(!state))
+               return NULL;
        if (WARN_ON(pad >= sd->entity.num_pads))
                pad = 0;
        return &state->pads[pad].try_crop;
@@ -1082,6 +1090,8 @@ v4l2_subdev_get_pad_compose(struct v4l2_subdev *sd,
                            struct v4l2_subdev_state *state,
                            unsigned int pad)
 {
+       if (WARN_ON(!state))
+               return NULL;
        if (WARN_ON(pad >= sd->entity.num_pads))
                pad = 0;
        return &state->pads[pad].try_compose;
index 8f78017..9f97f73 100644 (file)
@@ -37,16 +37,25 @@ struct genl_info;
  *     do additional, common, filtering and return an error
  * @post_doit: called after an operation's doit callback, it may
  *     undo operations done by pre_doit, for example release locks
+ * @module: pointer to the owning module (set to THIS_MODULE)
  * @mcgrps: multicast groups used by this family
  * @n_mcgrps: number of multicast groups
  * @resv_start_op: first operation for which reserved fields of the header
- *     can be validated, new families should leave this field at zero
+ *     can be validated and policies are required (see below);
+ *     new families should leave this field at zero
  * @mcgrp_offset: starting number of multicast group IDs in this family
  *     (private)
  * @ops: the operations supported by this family
  * @n_ops: number of operations supported by this family
  * @small_ops: the small-struct operations supported by this family
  * @n_small_ops: number of small-struct operations supported by this family
+ *
+ * Attribute policies (the combination of @policy and @maxattr fields)
+ * can be attached at the family level or at the operation level.
+ * If both are present the per-operation policy takes precedence.
+ * For operations before @resv_start_op lack of policy means that the core
+ * will perform no attribute parsing or validation. For newer operations
+ * if policy is not provided core will reject all TLV attributes.
  */
 struct genl_family {
        int                     id;             /* private */
@@ -173,9 +182,9 @@ struct genl_ops {
 };
 
 /**
- * struct genl_info - info that is available during dumpit op call
+ * struct genl_dumpit_info - info that is available during dumpit op call
  * @family: generic netlink family - for internal genl code usage
- * @ops: generic netlink ops - for internal genl code usage
+ * @op: generic netlink ops - for internal genl code usage
  * @attrs: netlink attributes
  */
 struct genl_dumpit_info {
@@ -354,6 +363,7 @@ int genlmsg_multicast_allns(const struct genl_family *family,
 
 /**
  * genlmsg_unicast - unicast a netlink message
+ * @net: network namespace to look up @portid in
  * @skb: netlink message as socket buffer
  * @portid: netlink portid of the destination socket
  */
@@ -373,7 +383,7 @@ static inline int genlmsg_reply(struct sk_buff *skb, struct genl_info *info)
 }
 
 /**
- * gennlmsg_data - head of message payload
+ * genlmsg_data - head of message payload
  * @gnlh: genetlink message header
  */
 static inline void *genlmsg_data(const struct genlmsghdr *gnlh)
index 9e464f6..22f8bab 100644 (file)
@@ -2585,7 +2585,7 @@ static inline gfp_t gfp_any(void)
 
 static inline gfp_t gfp_memcg_charge(void)
 {
-       return in_softirq() ? GFP_NOWAIT : GFP_KERNEL;
+       return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
 }
 
 static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
index 473b0b0..efc9085 100644 (file)
@@ -43,21 +43,20 @@ struct sock *reuseport_migrate_sock(struct sock *sk,
 extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
 extern int reuseport_detach_prog(struct sock *sk);
 
-static inline bool reuseport_has_conns(struct sock *sk, bool set)
+static inline bool reuseport_has_conns(struct sock *sk)
 {
        struct sock_reuseport *reuse;
        bool ret = false;
 
        rcu_read_lock();
        reuse = rcu_dereference(sk->sk_reuseport_cb);
-       if (reuse) {
-               if (set)
-                       reuse->has_conns = 1;
-               ret = reuse->has_conns;
-       }
+       if (reuse && reuse->has_conns)
+               ret = true;
        rcu_read_unlock();
 
        return ret;
 }
 
+void reuseport_has_conns_set(struct sock *sk);
+
 #endif  /* _SOCK_REUSEPORT_H */
index eae443b..cc3dcc6 100644 (file)
@@ -138,6 +138,7 @@ int snd_ctl_remove(struct snd_card * card, struct snd_kcontrol * kcontrol);
 int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol, bool add_on_replace);
 int snd_ctl_remove_id(struct snd_card * card, struct snd_ctl_elem_id *id);
 int snd_ctl_rename_id(struct snd_card * card, struct snd_ctl_elem_id *src_id, struct snd_ctl_elem_id *dst_id);
+void snd_ctl_rename(struct snd_card *card, struct snd_kcontrol *kctl, const char *name);
 int snd_ctl_activate_id(struct snd_card *card, struct snd_ctl_elem_id *id, int active);
 struct snd_kcontrol *snd_ctl_find_numid(struct snd_card * card, unsigned int numid);
 struct snd_kcontrol *snd_ctl_find_id(struct snd_card * card, struct snd_ctl_elem_id *id);
index a0b827f..25e049f 100644 (file)
@@ -177,6 +177,7 @@ void asoc_simple_convert_fixup(struct asoc_simple_data *data,
                                      struct snd_pcm_hw_params *params);
 void asoc_simple_parse_convert(struct device_node *np, char *prefix,
                               struct asoc_simple_data *data);
+bool asoc_simple_is_convert_required(const struct asoc_simple_data *data);
 
 int asoc_simple_parse_routing(struct snd_soc_card *card,
                                      char *prefix);
diff --git a/include/trace/events/watchdog.h b/include/trace/events/watchdog.h
new file mode 100644 (file)
index 0000000..beb9bb3
--- /dev/null
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM watchdog
+
+#if !defined(_TRACE_WATCHDOG_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_WATCHDOG_H
+
+#include <linux/watchdog.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(watchdog_template,
+
+       TP_PROTO(struct watchdog_device *wdd, int err),
+
+       TP_ARGS(wdd, err),
+
+       TP_STRUCT__entry(
+               __field(int, id)
+               __field(int, err)
+       ),
+
+       TP_fast_assign(
+               __entry->id = wdd->id;
+               __entry->err = err;
+       ),
+
+       TP_printk("watchdog%d err=%d", __entry->id, __entry->err)
+);
+
+DEFINE_EVENT(watchdog_template, watchdog_start,
+       TP_PROTO(struct watchdog_device *wdd, int err),
+       TP_ARGS(wdd, err));
+
+DEFINE_EVENT(watchdog_template, watchdog_ping,
+       TP_PROTO(struct watchdog_device *wdd, int err),
+       TP_ARGS(wdd, err));
+
+DEFINE_EVENT(watchdog_template, watchdog_stop,
+       TP_PROTO(struct watchdog_device *wdd, int err),
+       TP_ARGS(wdd, err));
+
+TRACE_EVENT(watchdog_set_timeout,
+
+       TP_PROTO(struct watchdog_device *wdd, unsigned int timeout, int err),
+
+       TP_ARGS(wdd, timeout, err),
+
+       TP_STRUCT__entry(
+               __field(int, id)
+               __field(unsigned int, timeout)
+               __field(int, err)
+       ),
+
+       TP_fast_assign(
+               __entry->id = wdd->id;
+               __entry->timeout = timeout;
+               __entry->err = err;
+       ),
+
+       TP_printk("watchdog%d timeout=%u err=%d", __entry->id, __entry->timeout, __entry->err)
+);
+
+#endif /* !defined(_TRACE_WATCHDOG_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index 7ee65c0..0d93ec1 100644 (file)
@@ -763,6 +763,8 @@ struct drm_amdgpu_cs_chunk_data {
        #define AMDGPU_INFO_FW_MES_KIQ          0x19
        /* Subquery id: Query MES firmware version */
        #define AMDGPU_INFO_FW_MES              0x1a
+       /* Subquery id: Query IMU firmware version */
+       #define AMDGPU_INFO_FW_IMU              0x1b
 
 /* number of bytes moved for TTM migration */
 #define AMDGPU_INFO_NUM_BYTES_MOVED            0x0f
index eac8731..6f93c91 100644 (file)
@@ -235,25 +235,29 @@ struct drm_panfrost_madvise {
 #define PANFROSTDUMP_BUF_BO (PANFROSTDUMP_BUF_BOMAP + 1)
 #define PANFROSTDUMP_BUF_TRAILER (PANFROSTDUMP_BUF_BO + 1)
 
+/*
+ * This structure is the native endianness of the dumping machine, tools can
+ * detect the endianness by looking at the value in 'magic'.
+ */
 struct panfrost_dump_object_header {
-       __le32 magic;
-       __le32 type;
-       __le32 file_size;
-       __le32 file_offset;
+       __u32 magic;
+       __u32 type;
+       __u32 file_size;
+       __u32 file_offset;
 
        union {
-               struct pan_reg_hdr {
-                       __le64 jc;
-                       __le32 gpu_id;
-                       __le32 major;
-                       __le32 minor;
-                       __le64 nbos;
+               struct {
+                       __u64 jc;
+                       __u32 gpu_id;
+                       __u32 major;
+                       __u32 minor;
+                       __u64 nbos;
                } reghdr;
 
                struct pan_bomap_hdr {
-                       __le32 valid;
-                       __le64 iova;
-                       __le32 data[2];
+                       __u32 valid;
+                       __u64 iova;
+                       __u32 data[2];
                } bomap;
 
                /*
@@ -261,14 +265,14 @@ struct panfrost_dump_object_header {
                 * with new fields and also keep it 512-byte aligned
                 */
 
-               __le32 sizer[496];
+               __u32 sizer[496];
        };
 };
 
 /* Registers object, an array of these */
 struct panfrost_dump_registers {
-       __le32 reg;
-       __le32 value;
+       __u32 reg;
+       __u32 value;
 };
 
 #if defined(__cplusplus)
index c3baaea..d58fa1c 100644 (file)
@@ -1568,6 +1568,20 @@ static inline void cec_ops_request_short_audio_descriptor(const struct cec_msg *
        }
 }
 
+static inline void cec_msg_set_audio_volume_level(struct cec_msg *msg,
+                                                 __u8 audio_volume_level)
+{
+       msg->len = 3;
+       msg->msg[1] = CEC_MSG_SET_AUDIO_VOLUME_LEVEL;
+       msg->msg[2] = audio_volume_level;
+}
+
+static inline void cec_ops_set_audio_volume_level(const struct cec_msg *msg,
+                                                 __u8 *audio_volume_level)
+{
+       *audio_volume_level = msg->msg[2];
+}
+
 
 /* Audio Rate Control Feature */
 static inline void cec_msg_set_audio_rate(struct cec_msg *msg,
index 1d48da9..b8e071a 100644 (file)
@@ -768,6 +768,7 @@ struct cec_event {
 #define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_RATE             0x08
 #define CEC_OP_FEAT_DEV_SINK_HAS_ARC_TX                        0x04
 #define CEC_OP_FEAT_DEV_SOURCE_HAS_ARC_RX              0x02
+#define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_VOLUME_LEVEL     0x01
 
 #define CEC_MSG_GIVE_FEATURES                          0xa5    /* HDMI 2.0 */
 
@@ -1059,6 +1060,7 @@ struct cec_event {
 #define CEC_OP_AUD_FMT_ID_CEA861                       0
 #define CEC_OP_AUD_FMT_ID_CEA861_CXT                   1
 
+#define CEC_MSG_SET_AUDIO_VOLUME_LEVEL                 0x73
 
 /* Audio Rate Control Feature */
 #define CEC_MSG_SET_AUDIO_RATE                         0x9a
index 85be78e..ccb7f5d 100644 (file)
@@ -1337,7 +1337,7 @@ union perf_mem_data_src {
 #define PERF_MEM_LVLNUM_L3     0x03 /* L3 */
 #define PERF_MEM_LVLNUM_L4     0x04 /* L4 */
 /* 5-0x8 available */
-#define PERF_MEM_LVLNUM_EXTN_MEM 0x09 /* Extension memory */
+#define PERF_MEM_LVLNUM_CXL    0x09 /* CXL */
 #define PERF_MEM_LVLNUM_IO     0x0a /* I/O */
 #define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
 #define PERF_MEM_LVLNUM_LFB    0x0c /* LFB */
index 583ca0d..730673e 100644 (file)
 /*
  * Defect Pixel Cluster Correction
  */
-#define RKISP1_CIF_ISP_DPCC_METHODS_MAX       3
+#define RKISP1_CIF_ISP_DPCC_METHODS_MAX                                3
+
+#define RKISP1_CIF_ISP_DPCC_MODE_STAGE1_ENABLE                 (1U << 2)
+
+#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_G_CENTER   (1U << 0)
+#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_RB_CENTER  (1U << 1)
+#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_G_3X3           (1U << 2)
+#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_RB_3X3          (1U << 3)
+
+/* 0-2 for sets 1-3 */
+#define RKISP1_CIF_ISP_DPCC_SET_USE_STAGE1_USE_SET(n)          ((n) << 0)
+#define RKISP1_CIF_ISP_DPCC_SET_USE_STAGE1_USE_FIX_SET         (1U << 3)
+
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_PG_GREEN_ENABLE                (1U << 0)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_GREEN_ENABLE                (1U << 1)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RO_GREEN_ENABLE                (1U << 2)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_GREEN_ENABLE       (1U << 3)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RG_GREEN_ENABLE                (1U << 4)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_PG_RED_BLUE_ENABLE     (1U << 8)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_RED_BLUE_ENABLE     (1U << 9)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RO_RED_BLUE_ENABLE     (1U << 10)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_RED_BLUE_ENABLE    (1U << 11)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RG_RED_BLUE_ENABLE     (1U << 12)
+
+#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_G(v)                   ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_RB(v)                  ((v) << 8)
+#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_G(v)                  ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_RB(v)                 ((v) << 8)
+#define RKISP1_CIF_ISP_DPCC_PG_FAC_G(v)                                ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_PG_FAC_RB(v)                       ((v) << 8)
+#define RKISP1_CIF_ISP_DPCC_RND_THRESH_G(v)                    ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_RND_THRESH_RB(v)                   ((v) << 8)
+#define RKISP1_CIF_ISP_DPCC_RG_FAC_G(v)                                ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_RG_FAC_RB(v)                       ((v) << 8)
+
+#define RKISP1_CIF_ISP_DPCC_RO_LIMITS_n_G(n, v)                        ((v) << ((n) * 4))
+#define RKISP1_CIF_ISP_DPCC_RO_LIMITS_n_RB(n, v)               ((v) << ((n) * 4 + 2))
+
+#define RKISP1_CIF_ISP_DPCC_RND_OFFS_n_G(n, v)                 ((v) << ((n) * 4))
+#define RKISP1_CIF_ISP_DPCC_RND_OFFS_n_RB(n, v)                        ((v) << ((n) * 4 + 2))
 
 /*
  * Denoising pre filter
@@ -249,16 +288,20 @@ struct rkisp1_cif_isp_bls_config {
 };
 
 /**
- * struct rkisp1_cif_isp_dpcc_methods_config - Methods Configuration used by DPCC
+ * struct rkisp1_cif_isp_dpcc_methods_config - DPCC methods set configuration
  *
- * Methods Configuration used by Defect Pixel Cluster Correction
+ * This structure stores the configuration of one set of methods for the DPCC
+ * algorithm. Multiple methods can be selected in each set (independently for
+ * the Green and Red/Blue components) through the @method field, the result is
+ * the logical AND of all enabled methods. The remaining fields set thresholds
+ * and factors for each method.
  *
- * @method: Method enable bits
- * @line_thresh: Line threshold
- * @line_mad_fac: Line MAD factor
- * @pg_fac: Peak gradient factor
- * @rnd_thresh: Rank Neighbor Difference threshold
- * @rg_fac: Rank gradient factor
+ * @method: Method enable bits (RKISP1_CIF_ISP_DPCC_METHODS_SET_*)
+ * @line_thresh: Line threshold (RKISP1_CIF_ISP_DPCC_LINE_THRESH_*)
+ * @line_mad_fac: Line Mean Absolute Difference factor (RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_*)
+ * @pg_fac: Peak gradient factor (RKISP1_CIF_ISP_DPCC_PG_FAC_*)
+ * @rnd_thresh: Rank Neighbor Difference threshold (RKISP1_CIF_ISP_DPCC_RND_THRESH_*)
+ * @rg_fac: Rank gradient factor (RKISP1_CIF_ISP_DPCC_RG_FAC_*)
  */
 struct rkisp1_cif_isp_dpcc_methods_config {
        __u32 method;
@@ -272,14 +315,16 @@ struct rkisp1_cif_isp_dpcc_methods_config {
 /**
  * struct rkisp1_cif_isp_dpcc_config - Configuration used by DPCC
  *
- * Configuration used by Defect Pixel Cluster Correction
+ * Configuration used by Defect Pixel Cluster Correction. Three sets of methods
+ * can be configured and selected through the @set_use field. The result is the
+ * logical OR of all enabled sets.
  *
- * @mode: dpcc output mode
- * @output_mode: whether use hard coded methods
- * @set_use: stage1 methods set
- * @methods: methods config
- * @ro_limits: rank order limits
- * @rnd_offs: differential rank offsets for rank neighbor difference
+ * @mode: DPCC mode (RKISP1_CIF_ISP_DPCC_MODE_*)
+ * @output_mode: Interpolation output mode (RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_*)
+ * @set_use: Methods sets selection (RKISP1_CIF_ISP_DPCC_SET_USE_*)
+ * @methods: Methods sets configuration
+ * @ro_limits: Rank order limits (RKISP1_CIF_ISP_DPCC_RO_LIMITS_*)
+ * @rnd_offs: Differential rank offsets for rank neighbor difference (RKISP1_CIF_ISP_DPCC_RND_OFFS_*)
  */
 struct rkisp1_cif_isp_dpcc_config {
        __u32 mode;
index 86cae23..29da1f4 100644 (file)
@@ -1601,7 +1601,8 @@ struct v4l2_bt_timings {
        ((bt)->width + V4L2_DV_BT_BLANKING_WIDTH(bt))
 #define V4L2_DV_BT_BLANKING_HEIGHT(bt) \
        ((bt)->vfrontporch + (bt)->vsync + (bt)->vbackporch + \
-        (bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch)
+        ((bt)->interlaced ? \
+         ((bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch) : 0))
 #define V4L2_DV_BT_FRAME_HEIGHT(bt) \
        ((bt)->height + V4L2_DV_BT_BLANKING_HEIGHT(bt))
 
index 694f7c1..abf6509 100644 (file)
@@ -66,7 +66,7 @@ config RUST_IS_AVAILABLE
          This shows whether a suitable Rust toolchain is available (found).
 
          Please see Documentation/rust/quick-start.rst for instructions on how
-         to satify the build requirements of Rust support.
+         to satisfy the build requirements of Rust support.
 
          In particular, the Makefile target 'rustavailable' is useful to check
          why the Rust toolchain is not being detected.
index ff3a712..351111f 100644 (file)
@@ -5,22 +5,9 @@
 #include <linux/file.h>
 #include <linux/io_uring_types.h>
 
-/*
- * FFS_SCM is only available on 64-bit archs, for 32-bit we just define it as 0
- * and define IO_URING_SCM_ALL. For this case, we use SCM for all files as we
- * can't safely always dereference the file when the task has exited and ring
- * cleanup is done. If a file is tracked and part of SCM, then unix gc on
- * process exit may reap it before __io_sqe_files_unregister() is run.
- */
 #define FFS_NOWAIT             0x1UL
 #define FFS_ISREG              0x2UL
-#if defined(CONFIG_64BIT)
-#define FFS_SCM                        0x4UL
-#else
-#define IO_URING_SCM_ALL
-#define FFS_SCM                        0x0UL
-#endif
-#define FFS_MASK               ~(FFS_NOWAIT|FFS_ISREG|FFS_SCM)
+#define FFS_MASK               ~(FFS_NOWAIT|FFS_ISREG)
 
 bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files);
 void io_free_file_tables(struct io_file_table *table);
@@ -38,6 +25,7 @@ unsigned int io_file_get_flags(struct file *file);
 
 static inline void io_file_bitmap_clear(struct io_file_table *table, int bit)
 {
+       WARN_ON_ONCE(!test_bit(bit, table->bitmap));
        __clear_bit(bit, table->bitmap);
        table->alloc_hint = bit;
 }
index c6536d4..6f1d0e5 100644 (file)
@@ -1164,10 +1164,10 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
                wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
                if (!wqe)
                        goto err;
+               wq->wqes[node] = wqe;
                if (!alloc_cpumask_var(&wqe->cpu_mask, GFP_KERNEL))
                        goto err;
                cpumask_copy(wqe->cpu_mask, cpumask_of_node(node));
-               wq->wqes[node] = wqe;
                wqe->node = alloc_node;
                wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
                wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
index de08d99..ac8c488 100644 (file)
@@ -1173,7 +1173,7 @@ static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
        }
 }
 
-int __io_run_local_work(struct io_ring_ctx *ctx, bool locked)
+int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked)
 {
        struct llist_node *node;
        struct llist_node fake;
@@ -1192,7 +1192,7 @@ again:
                struct io_kiocb *req = container_of(node, struct io_kiocb,
                                                    io_task_work.node);
                prefetch(container_of(next, struct io_kiocb, io_task_work.node));
-               req->io_task_work.func(req, &locked);
+               req->io_task_work.func(req, locked);
                ret++;
                node = next;
        }
@@ -1208,7 +1208,7 @@ again:
                goto again;
        }
 
-       if (locked)
+       if (*locked)
                io_submit_flush_completions(ctx);
        trace_io_uring_local_work_run(ctx, ret, loops);
        return ret;
@@ -1225,7 +1225,7 @@ int io_run_local_work(struct io_ring_ctx *ctx)
 
        __set_current_state(TASK_RUNNING);
        locked = mutex_trylock(&ctx->uring_lock);
-       ret = __io_run_local_work(ctx, locked);
+       ret = __io_run_local_work(ctx, &locked);
        if (locked)
                mutex_unlock(&ctx->uring_lock);
 
@@ -1446,8 +1446,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
                    io_task_work_pending(ctx)) {
                        u32 tail = ctx->cached_cq_tail;
 
-                       if (!llist_empty(&ctx->work_llist))
-                               __io_run_local_work(ctx, true);
+                       (void) io_run_local_work_locked(ctx);
 
                        if (task_work_pending(current) ||
                            wq_list_empty(&ctx->iopoll_list)) {
@@ -1587,8 +1586,6 @@ unsigned int io_file_get_flags(struct file *file)
                res |= FFS_ISREG;
        if (__io_file_supports_nowait(file, mode))
                res |= FFS_NOWAIT;
-       if (io_file_need_scm(file))
-               res |= FFS_SCM;
        return res;
 }
 
@@ -1860,7 +1857,6 @@ inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
        /* mask in overlapping REQ_F and FFS bits */
        req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT);
        io_req_set_rsrc_node(req, ctx, 0);
-       WARN_ON_ONCE(file && !test_bit(fd, ctx->file_table.bitmap));
 out:
        io_ring_submit_unlock(ctx, issue_flags);
        return file;
@@ -2563,18 +2559,14 @@ static int io_eventfd_unregister(struct io_ring_ctx *ctx)
 
 static void io_req_caches_free(struct io_ring_ctx *ctx)
 {
-       struct io_submit_state *state = &ctx->submit_state;
        int nr = 0;
 
        mutex_lock(&ctx->uring_lock);
-       io_flush_cached_locked_reqs(ctx, state);
+       io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
 
        while (!io_req_cache_empty(ctx)) {
-               struct io_wq_work_node *node;
-               struct io_kiocb *req;
+               struct io_kiocb *req = io_alloc_req(ctx);
 
-               node = wq_stack_extract(&state->free_list);
-               req = container_of(node, struct io_kiocb, comp_list);
                kmem_cache_free(req_cachep, req);
                nr++;
        }
@@ -2811,15 +2803,12 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
                io_poll_remove_all(ctx, NULL, true);
        mutex_unlock(&ctx->uring_lock);
 
-       /* failed during ring init, it couldn't have issued any requests */
-       if (ctx->rings) {
+       /*
+        * If we failed setting up the ctx, we might not have any rings
+        * and therefore did not submit any requests
+        */
+       if (ctx->rings)
                io_kill_timeouts(ctx, NULL, true);
-               /* if we failed setting up the ctx, we might not have any rings */
-               io_iopoll_try_reap_events(ctx);
-               /* drop cached put refs after potentially doing completions */
-               if (current->io_uring)
-                       io_uring_drop_tctx_refs(current);
-       }
 
        INIT_WORK(&ctx->exit_work, io_ring_exit_work);
        /*
index ef77d2a..e99a79f 100644 (file)
@@ -27,7 +27,7 @@ enum {
 struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow);
 bool io_req_cqe_overflow(struct io_kiocb *req);
 int io_run_task_work_sig(struct io_ring_ctx *ctx);
-int __io_run_local_work(struct io_ring_ctx *ctx, bool locked);
+int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked);
 int io_run_local_work(struct io_ring_ctx *ctx);
 void io_req_complete_failed(struct io_kiocb *req, s32 res);
 void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
@@ -277,9 +277,18 @@ static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx)
 
 static inline int io_run_local_work_locked(struct io_ring_ctx *ctx)
 {
+       bool locked;
+       int ret;
+
        if (llist_empty(&ctx->work_llist))
                return 0;
-       return __io_run_local_work(ctx, true);
+
+       locked = true;
+       ret = __io_run_local_work(ctx, &locked);
+       /* shouldn't happen! */
+       if (WARN_ON_ONCE(!locked))
+               mutex_lock(&ctx->uring_lock);
+       return ret;
 }
 
 static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
index 4a7e5d0..90d2fc6 100644 (file)
@@ -95,6 +95,9 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
 
        msg->src_fd = array_index_nospec(msg->src_fd, ctx->nr_user_files);
        file_ptr = io_fixed_file_slot(&ctx->file_table, msg->src_fd)->file_ptr;
+       if (!file_ptr)
+               goto out_unlock;
+
        src_file = (struct file *) (file_ptr & FFS_MASK);
        get_file(src_file);
 
index 8c7226b..15dea91 100644 (file)
@@ -1056,6 +1056,8 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
        sock = sock_from_file(req->file);
        if (unlikely(!sock))
                return -ENOTSOCK;
+       if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
+               return -EOPNOTSUPP;
 
        msg.msg_name = NULL;
        msg.msg_control = NULL;
@@ -1151,6 +1153,8 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
        sock = sock_from_file(req->file);
        if (unlikely(!sock))
                return -ENOTSOCK;
+       if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
+               return -EOPNOTSUPP;
 
        if (req_has_async_data(req)) {
                kmsg = req->async_data;
index 012fdb0..55d4ab9 100644 (file)
@@ -757,20 +757,17 @@ int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
 
 void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
 {
-#if !defined(IO_URING_SCM_ALL)
        int i;
 
        for (i = 0; i < ctx->nr_user_files; i++) {
                struct file *file = io_file_from_index(&ctx->file_table, i);
 
-               if (!file)
-                       continue;
-               if (io_fixed_file_slot(&ctx->file_table, i)->file_ptr & FFS_SCM)
+               /* skip scm accounted files, they'll be freed by ->ring_sock */
+               if (!file || io_file_need_scm(file))
                        continue;
                io_file_bitmap_clear(&ctx->file_table, i);
                fput(file);
        }
-#endif
 
 #if defined(CONFIG_UNIX)
        if (ctx->ring_sock) {
index 9bce156..81445a4 100644 (file)
@@ -82,11 +82,7 @@ int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file);
 #if defined(CONFIG_UNIX)
 static inline bool io_file_need_scm(struct file *filp)
 {
-#if defined(IO_URING_SCM_ALL)
-       return true;
-#else
        return !!unix_get_socket(filp);
-#endif
 }
 #else
 static inline bool io_file_need_scm(struct file *filp)
index 100de26..bb47cc4 100644 (file)
@@ -242,8 +242,6 @@ static void io_req_io_end(struct io_kiocb *req)
 {
        struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 
-       WARN_ON(!in_task());
-
        if (rw->kiocb.ki_flags & IOCB_WRITE) {
                kiocb_end_write(req);
                fsnotify_modify(req->file);
index e4e0990..fd08b3c 100644 (file)
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -1329,11 +1329,11 @@ fail_msg_bytes:
 #ifdef CONFIG_IPC_NS
 void msg_exit_ns(struct ipc_namespace *ns)
 {
-       percpu_counter_destroy(&ns->percpu_msg_bytes);
-       percpu_counter_destroy(&ns->percpu_msg_hdrs);
        free_ipcs(ns, &msg_ids(ns), freeque);
        idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr);
        rhashtable_destroy(&ns->ids[IPC_MSG_IDS].key_ht);
+       percpu_counter_destroy(&ns->percpu_msg_bytes);
+       percpu_counter_destroy(&ns->percpu_msg_hdrs);
 }
 #endif
 
index eba603c..35c07af 100644 (file)
@@ -4436,6 +4436,11 @@ static int btf_func_proto_check(struct btf_verifier_env *env,
                        return -EINVAL;
                }
 
+               if (btf_type_is_resolve_source_only(ret_type)) {
+                       btf_verifier_log_type(env, t, "Invalid return type");
+                       return -EINVAL;
+               }
+
                if (btf_type_needs_resolve(ret_type) &&
                    !env_type_is_resolved(env, ret_type_id)) {
                        err = btf_resolve(env, ret_type, ret_type_id);
index 0d200a9..9fcf09f 100644 (file)
@@ -196,7 +196,7 @@ static int bpf_iter_attach_cgroup(struct bpf_prog *prog,
                return -EINVAL;
 
        if (fd)
-               cgrp = cgroup_get_from_fd(fd);
+               cgrp = cgroup_v1v2_get_from_fd(fd);
        else if (id)
                cgrp = cgroup_get_from_id(id);
        else /* walk the entire hierarchy by default. */
index fa64b80..04f0a04 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/hash.h>
 #include <linux/bpf.h>
 #include <linux/filter.h>
+#include <linux/init.h>
 
 /* The BPF dispatcher is a multiway branch code generator. The
  * dispatcher is a mechanism to avoid the performance penalty of an
@@ -90,6 +91,11 @@ int __weak arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int n
        return -ENOTSUPP;
 }
 
+int __weak __init bpf_arch_init_dispatcher_early(void *ip)
+{
+       return -ENOTSUPP;
+}
+
 static int bpf_dispatcher_prepare(struct bpf_dispatcher *d, void *image, void *buf)
 {
        s64 ips[BPF_DISPATCHER_MAX] = {}, *ipsp = &ips[0];
index 5f83be1..4901fa1 100644 (file)
@@ -418,14 +418,17 @@ static void drain_mem_cache(struct bpf_mem_cache *c)
        /* No progs are using this bpf_mem_cache, but htab_map_free() called
         * bpf_mem_cache_free() for all remaining elements and they can be in
         * free_by_rcu or in waiting_for_gp lists, so drain those lists now.
+        *
+        * Except for waiting_for_gp list, there are no concurrent operations
+        * on these lists, so it is safe to use __llist_del_all().
         */
        llist_for_each_safe(llnode, t, __llist_del_all(&c->free_by_rcu))
                free_one(c, llnode);
        llist_for_each_safe(llnode, t, llist_del_all(&c->waiting_for_gp))
                free_one(c, llnode);
-       llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist))
+       llist_for_each_safe(llnode, t, __llist_del_all(&c->free_llist))
                free_one(c, llnode);
-       llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra))
+       llist_for_each_safe(llnode, t, __llist_del_all(&c->free_llist_extra))
                free_one(c, llnode);
 }
 
@@ -493,6 +496,16 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
                rcu_in_progress = 0;
                for_each_possible_cpu(cpu) {
                        c = per_cpu_ptr(ma->cache, cpu);
+                       /*
+                        * refill_work may be unfinished for PREEMPT_RT kernel
+                        * in which irq work is invoked in a per-CPU RT thread.
+                        * It is also possible for kernel with
+                        * arch_irq_work_has_interrupt() being false and irq
+                        * work is invoked in timer interrupt. So waiting for
+                        * the completion of irq work to ease the handling of
+                        * concurrency.
+                        */
+                       irq_work_sync(&c->refill_work);
                        drain_mem_cache(c);
                        rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
                }
@@ -507,6 +520,7 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
                        cc = per_cpu_ptr(ma->caches, cpu);
                        for (i = 0; i < NUM_CACHES; i++) {
                                c = &cc->cache[i];
+                               irq_work_sync(&c->refill_work);
                                drain_mem_cache(c);
                                rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
                        }
index 014ee09..7f0a9f6 100644 (file)
@@ -6946,6 +6946,7 @@ static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env,
        __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
 
        callee->in_callback_fn = true;
+       callee->callback_ret_range = tnum_range(0, 1);
        return 0;
 }
 
index 7f48667..2319946 100644 (file)
@@ -1392,6 +1392,9 @@ static void cgroup_destroy_root(struct cgroup_root *root)
        cgroup_free_root(root);
 }
 
+/*
+ * Returned cgroup is without refcount but it's valid as long as cset pins it.
+ */
 static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset,
                                            struct cgroup_root *root)
 {
@@ -1403,6 +1406,7 @@ static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset,
                res_cgroup = cset->dfl_cgrp;
        } else {
                struct cgrp_cset_link *link;
+               lockdep_assert_held(&css_set_lock);
 
                list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
                        struct cgroup *c = link->cgrp;
@@ -1414,6 +1418,7 @@ static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset,
                }
        }
 
+       BUG_ON(!res_cgroup);
        return res_cgroup;
 }
 
@@ -1436,23 +1441,36 @@ current_cgns_cgroup_from_root(struct cgroup_root *root)
 
        rcu_read_unlock();
 
-       BUG_ON(!res);
        return res;
 }
 
+/*
+ * Look up cgroup associated with current task's cgroup namespace on the default
+ * hierarchy.
+ *
+ * Unlike current_cgns_cgroup_from_root(), this doesn't need locks:
+ * - Internal rcu_read_lock is unnecessary because we don't dereference any rcu
+ *   pointers.
+ * - css_set_lock is not needed because we just read cset->dfl_cgrp.
+ * - As a bonus returned cgrp is pinned with the current because it cannot
+ *   switch cgroup_ns asynchronously.
+ */
+static struct cgroup *current_cgns_cgroup_dfl(void)
+{
+       struct css_set *cset;
+
+       cset = current->nsproxy->cgroup_ns->root_cset;
+       return __cset_cgroup_from_root(cset, &cgrp_dfl_root);
+}
+
 /* look up cgroup associated with given css_set on the specified hierarchy */
 static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
                                            struct cgroup_root *root)
 {
-       struct cgroup *res = NULL;
-
        lockdep_assert_held(&cgroup_mutex);
        lockdep_assert_held(&css_set_lock);
 
-       res = __cset_cgroup_from_root(cset, root);
-
-       BUG_ON(!res);
-       return res;
+       return __cset_cgroup_from_root(cset, root);
 }
 
 /*
@@ -6191,9 +6209,7 @@ struct cgroup *cgroup_get_from_id(u64 id)
        if (!cgrp)
                return ERR_PTR(-ENOENT);
 
-       spin_lock_irq(&css_set_lock);
-       root_cgrp = current_cgns_cgroup_from_root(&cgrp_dfl_root);
-       spin_unlock_irq(&css_set_lock);
+       root_cgrp = current_cgns_cgroup_dfl();
        if (!cgroup_is_descendant(cgrp, root_cgrp)) {
                cgroup_put(cgrp);
                return ERR_PTR(-ENOENT);
@@ -6294,16 +6310,42 @@ void cgroup_fork(struct task_struct *child)
        INIT_LIST_HEAD(&child->cg_list);
 }
 
-static struct cgroup *cgroup_get_from_file(struct file *f)
+/**
+ * cgroup_v1v2_get_from_file - get a cgroup pointer from a file pointer
+ * @f: file corresponding to cgroup_dir
+ *
+ * Find the cgroup from a file pointer associated with a cgroup directory.
+ * Returns a pointer to the cgroup on success. ERR_PTR is returned if the
+ * cgroup cannot be found.
+ */
+static struct cgroup *cgroup_v1v2_get_from_file(struct file *f)
 {
        struct cgroup_subsys_state *css;
-       struct cgroup *cgrp;
 
        css = css_tryget_online_from_dir(f->f_path.dentry, NULL);
        if (IS_ERR(css))
                return ERR_CAST(css);
 
-       cgrp = css->cgroup;
+       return css->cgroup;
+}
+
+/**
+ * cgroup_get_from_file - same as cgroup_v1v2_get_from_file, but only supports
+ * cgroup2.
+ * @f: file corresponding to cgroup2_dir
+ */
+static struct cgroup *cgroup_get_from_file(struct file *f)
+{
+       struct cgroup *cgrp = cgroup_v1v2_get_from_file(f);
+
+       if (IS_ERR(cgrp))
+               return ERR_CAST(cgrp);
+
+       if (!cgroup_on_dfl(cgrp)) {
+               cgroup_put(cgrp);
+               return ERR_PTR(-EBADF);
+       }
+
        return cgrp;
 }
 
@@ -6772,10 +6814,8 @@ struct cgroup *cgroup_get_from_path(const char *path)
        struct cgroup *cgrp = ERR_PTR(-ENOENT);
        struct cgroup *root_cgrp;
 
-       spin_lock_irq(&css_set_lock);
-       root_cgrp = current_cgns_cgroup_from_root(&cgrp_dfl_root);
+       root_cgrp = current_cgns_cgroup_dfl();
        kn = kernfs_walk_and_get(root_cgrp->kn, path);
-       spin_unlock_irq(&css_set_lock);
        if (!kn)
                goto out;
 
@@ -6800,15 +6840,15 @@ out:
 EXPORT_SYMBOL_GPL(cgroup_get_from_path);
 
 /**
- * cgroup_get_from_fd - get a cgroup pointer from a fd
- * @fd: fd obtained by open(cgroup2_dir)
+ * cgroup_v1v2_get_from_fd - get a cgroup pointer from a fd
+ * @fd: fd obtained by open(cgroup_dir)
  *
  * Find the cgroup from a fd which should be obtained
  * by opening a cgroup directory.  Returns a pointer to the
  * cgroup on success. ERR_PTR is returned if the cgroup
  * cannot be found.
  */
-struct cgroup *cgroup_get_from_fd(int fd)
+struct cgroup *cgroup_v1v2_get_from_fd(int fd)
 {
        struct cgroup *cgrp;
        struct file *f;
@@ -6817,10 +6857,29 @@ struct cgroup *cgroup_get_from_fd(int fd)
        if (!f)
                return ERR_PTR(-EBADF);
 
-       cgrp = cgroup_get_from_file(f);
+       cgrp = cgroup_v1v2_get_from_file(f);
        fput(f);
        return cgrp;
 }
+
+/**
+ * cgroup_get_from_fd - same as cgroup_v1v2_get_from_fd, but only supports
+ * cgroup2.
+ * @fd: fd obtained by open(cgroup2_dir)
+ */
+struct cgroup *cgroup_get_from_fd(int fd)
+{
+       struct cgroup *cgrp = cgroup_v1v2_get_from_fd(fd);
+
+       if (IS_ERR(cgrp))
+               return ERR_CAST(cgrp);
+
+       if (!cgroup_on_dfl(cgrp)) {
+               cgroup_put(cgrp);
+               return ERR_PTR(-EBADF);
+       }
+       return cgrp;
+}
 EXPORT_SYMBOL_GPL(cgroup_get_from_fd);
 
 static u64 power_of_ten(int power)
index aefc1e0..4ec3717 100644 (file)
@@ -54,6 +54,7 @@
 #include <linux/highmem.h>
 #include <linux/pgtable.h>
 #include <linux/buildid.h>
+#include <linux/task_work.h>
 
 #include "internal.h"
 
@@ -2276,11 +2277,26 @@ event_sched_out(struct perf_event *event,
        event->pmu->del(event, 0);
        event->oncpu = -1;
 
-       if (READ_ONCE(event->pending_disable) >= 0) {
-               WRITE_ONCE(event->pending_disable, -1);
+       if (event->pending_disable) {
+               event->pending_disable = 0;
                perf_cgroup_event_disable(event, ctx);
                state = PERF_EVENT_STATE_OFF;
        }
+
+       if (event->pending_sigtrap) {
+               bool dec = true;
+
+               event->pending_sigtrap = 0;
+               if (state != PERF_EVENT_STATE_OFF &&
+                   !event->pending_work) {
+                       event->pending_work = 1;
+                       dec = false;
+                       task_work_add(current, &event->pending_task, TWA_RESUME);
+               }
+               if (dec)
+                       local_dec(&event->ctx->nr_pending);
+       }
+
        perf_event_set_state(event, state);
 
        if (!is_software_event(event))
@@ -2432,7 +2448,7 @@ static void __perf_event_disable(struct perf_event *event,
  * hold the top-level event's child_mutex, so any descendant that
  * goes to exit will block in perf_event_exit_event().
  *
- * When called from perf_pending_event it's OK because event->ctx
+ * When called from perf_pending_irq it's OK because event->ctx
  * is the current context on this CPU and preemption is disabled,
  * hence we can't get into perf_event_task_sched_out for this context.
  */
@@ -2471,9 +2487,8 @@ EXPORT_SYMBOL_GPL(perf_event_disable);
 
 void perf_event_disable_inatomic(struct perf_event *event)
 {
-       WRITE_ONCE(event->pending_disable, smp_processor_id());
-       /* can fail, see perf_pending_event_disable() */
-       irq_work_queue(&event->pending);
+       event->pending_disable = 1;
+       irq_work_queue(&event->pending_irq);
 }
 
 #define MAX_INTERRUPTS (~0ULL)
@@ -3428,11 +3443,23 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
                raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
                if (context_equiv(ctx, next_ctx)) {
 
+                       perf_pmu_disable(pmu);
+
+                       /* PMIs are disabled; ctx->nr_pending is stable. */
+                       if (local_read(&ctx->nr_pending) ||
+                           local_read(&next_ctx->nr_pending)) {
+                               /*
+                                * Must not swap out ctx when there's pending
+                                * events that rely on the ctx->task relation.
+                                */
+                               raw_spin_unlock(&next_ctx->lock);
+                               rcu_read_unlock();
+                               goto inside_switch;
+                       }
+
                        WRITE_ONCE(ctx->task, next);
                        WRITE_ONCE(next_ctx->task, task);
 
-                       perf_pmu_disable(pmu);
-
                        if (cpuctx->sched_cb_usage && pmu->sched_task)
                                pmu->sched_task(ctx, false);
 
@@ -3473,6 +3500,7 @@ unlock:
                raw_spin_lock(&ctx->lock);
                perf_pmu_disable(pmu);
 
+inside_switch:
                if (cpuctx->sched_cb_usage && pmu->sched_task)
                        pmu->sched_task(ctx, false);
                task_ctx_sched_out(cpuctx, ctx, EVENT_ALL);
@@ -4939,7 +4967,7 @@ static void perf_addr_filters_splice(struct perf_event *event,
 
 static void _free_event(struct perf_event *event)
 {
-       irq_work_sync(&event->pending);
+       irq_work_sync(&event->pending_irq);
 
        unaccount_event(event);
 
@@ -6439,7 +6467,8 @@ static void perf_sigtrap(struct perf_event *event)
                return;
 
        /*
-        * perf_pending_event() can race with the task exiting.
+        * Both perf_pending_task() and perf_pending_irq() can race with the
+        * task exiting.
         */
        if (current->flags & PF_EXITING)
                return;
@@ -6448,23 +6477,33 @@ static void perf_sigtrap(struct perf_event *event)
                      event->attr.type, event->attr.sig_data);
 }
 
-static void perf_pending_event_disable(struct perf_event *event)
+/*
+ * Deliver the pending work in-event-context or follow the context.
+ */
+static void __perf_pending_irq(struct perf_event *event)
 {
-       int cpu = READ_ONCE(event->pending_disable);
+       int cpu = READ_ONCE(event->oncpu);
 
+       /*
+        * If the event isn't running; we done. event_sched_out() will have
+        * taken care of things.
+        */
        if (cpu < 0)
                return;
 
+       /*
+        * Yay, we hit home and are in the context of the event.
+        */
        if (cpu == smp_processor_id()) {
-               WRITE_ONCE(event->pending_disable, -1);
-
-               if (event->attr.sigtrap) {
+               if (event->pending_sigtrap) {
+                       event->pending_sigtrap = 0;
                        perf_sigtrap(event);
-                       atomic_set_release(&event->event_limit, 1); /* rearm event */
-                       return;
+                       local_dec(&event->ctx->nr_pending);
+               }
+               if (event->pending_disable) {
+                       event->pending_disable = 0;
+                       perf_event_disable_local(event);
                }
-
-               perf_event_disable_local(event);
                return;
        }
 
@@ -6484,35 +6523,62 @@ static void perf_pending_event_disable(struct perf_event *event)
         *                                irq_work_queue(); // FAILS
         *
         *  irq_work_run()
-        *    perf_pending_event()
+        *    perf_pending_irq()
         *
         * But the event runs on CPU-B and wants disabling there.
         */
-       irq_work_queue_on(&event->pending, cpu);
+       irq_work_queue_on(&event->pending_irq, cpu);
 }
 
-static void perf_pending_event(struct irq_work *entry)
+static void perf_pending_irq(struct irq_work *entry)
 {
-       struct perf_event *event = container_of(entry, struct perf_event, pending);
+       struct perf_event *event = container_of(entry, struct perf_event, pending_irq);
        int rctx;
 
-       rctx = perf_swevent_get_recursion_context();
        /*
         * If we 'fail' here, that's OK, it means recursion is already disabled
         * and we won't recurse 'further'.
         */
+       rctx = perf_swevent_get_recursion_context();
 
-       perf_pending_event_disable(event);
-
+       /*
+        * The wakeup isn't bound to the context of the event -- it can happen
+        * irrespective of where the event is.
+        */
        if (event->pending_wakeup) {
                event->pending_wakeup = 0;
                perf_event_wakeup(event);
        }
 
+       __perf_pending_irq(event);
+
        if (rctx >= 0)
                perf_swevent_put_recursion_context(rctx);
 }
 
+static void perf_pending_task(struct callback_head *head)
+{
+       struct perf_event *event = container_of(head, struct perf_event, pending_task);
+       int rctx;
+
+       /*
+        * If we 'fail' here, that's OK, it means recursion is already disabled
+        * and we won't recurse 'further'.
+        */
+       preempt_disable_notrace();
+       rctx = perf_swevent_get_recursion_context();
+
+       if (event->pending_work) {
+               event->pending_work = 0;
+               perf_sigtrap(event);
+               local_dec(&event->ctx->nr_pending);
+       }
+
+       if (rctx >= 0)
+               perf_swevent_put_recursion_context(rctx);
+       preempt_enable_notrace();
+}
+
 #ifdef CONFIG_GUEST_PERF_EVENTS
 struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
 
@@ -9212,8 +9278,8 @@ int perf_event_account_interrupt(struct perf_event *event)
  */
 
 static int __perf_event_overflow(struct perf_event *event,
-                                  int throttle, struct perf_sample_data *data,
-                                  struct pt_regs *regs)
+                                int throttle, struct perf_sample_data *data,
+                                struct pt_regs *regs)
 {
        int events = atomic_read(&event->event_limit);
        int ret = 0;
@@ -9236,24 +9302,36 @@ static int __perf_event_overflow(struct perf_event *event,
        if (events && atomic_dec_and_test(&event->event_limit)) {
                ret = 1;
                event->pending_kill = POLL_HUP;
-               event->pending_addr = data->addr;
-
                perf_event_disable_inatomic(event);
        }
 
+       if (event->attr.sigtrap) {
+               /*
+                * Should not be able to return to user space without processing
+                * pending_sigtrap (kernel events can overflow multiple times).
+                */
+               WARN_ON_ONCE(event->pending_sigtrap && event->attr.exclude_kernel);
+               if (!event->pending_sigtrap) {
+                       event->pending_sigtrap = 1;
+                       local_inc(&event->ctx->nr_pending);
+               }
+               event->pending_addr = data->addr;
+               irq_work_queue(&event->pending_irq);
+       }
+
        READ_ONCE(event->overflow_handler)(event, data, regs);
 
        if (*perf_event_fasync(event) && event->pending_kill) {
                event->pending_wakeup = 1;
-               irq_work_queue(&event->pending);
+               irq_work_queue(&event->pending_irq);
        }
 
        return ret;
 }
 
 int perf_event_overflow(struct perf_event *event,
-                         struct perf_sample_data *data,
-                         struct pt_regs *regs)
+                       struct perf_sample_data *data,
+                       struct pt_regs *regs)
 {
        return __perf_event_overflow(event, 1, data, regs);
 }
@@ -9768,6 +9846,7 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
 
        perf_sample_data_init(&data, 0, 0);
        data.raw = &raw;
+       data.sample_flags |= PERF_SAMPLE_RAW;
 
        perf_trace_buf_update(record, event_type);
 
@@ -11570,8 +11649,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 
 
        init_waitqueue_head(&event->waitq);
-       event->pending_disable = -1;
-       init_irq_work(&event->pending, perf_pending_event);
+       init_irq_work(&event->pending_irq, perf_pending_irq);
+       init_task_work(&event->pending_task, perf_pending_task);
 
        mutex_init(&event->mmap_mutex);
        raw_spin_lock_init(&event->addr_filters.lock);
@@ -11593,9 +11672,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
        if (parent_event)
                event->event_caps = parent_event->event_caps;
 
-       if (event->attr.sigtrap)
-               atomic_set(&event->event_limit, 1);
-
        if (task) {
                event->attach_state = PERF_ATTACH_TASK;
                /*
index 7261320..273a0fe 100644 (file)
@@ -22,7 +22,7 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
        atomic_set(&handle->rb->poll, EPOLLIN);
 
        handle->event->pending_wakeup = 1;
-       irq_work_queue(&handle->event->pending);
+       irq_work_queue(&handle->event->pending_irq);
 }
 
 /*
index 460c12b..7971e98 100644 (file)
 
 #define GCOV_TAG_FUNCTION_LENGTH       3
 
+/* Since GCC 12.1 sizes are in BYTES and not in WORDS (4B). */
+#if (__GNUC__ >= 12)
+#define GCOV_UNIT_SIZE                         4
+#else
+#define GCOV_UNIT_SIZE                         1
+#endif
+
 static struct gcov_info *gcov_info_head;
 
 /**
@@ -383,12 +390,18 @@ size_t convert_to_gcda(char *buffer, struct gcov_info *info)
        pos += store_gcov_u32(buffer, pos, info->version);
        pos += store_gcov_u32(buffer, pos, info->stamp);
 
+#if (__GNUC__ >= 12)
+       /* Use zero as checksum of the compilation unit. */
+       pos += store_gcov_u32(buffer, pos, 0);
+#endif
+
        for (fi_idx = 0; fi_idx < info->n_functions; fi_idx++) {
                fi_ptr = info->functions[fi_idx];
 
                /* Function record. */
                pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION);
-               pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION_LENGTH);
+               pos += store_gcov_u32(buffer, pos,
+                       GCOV_TAG_FUNCTION_LENGTH * GCOV_UNIT_SIZE);
                pos += store_gcov_u32(buffer, pos, fi_ptr->ident);
                pos += store_gcov_u32(buffer, pos, fi_ptr->lineno_checksum);
                pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum);
@@ -402,7 +415,8 @@ size_t convert_to_gcda(char *buffer, struct gcov_info *info)
                        /* Counter record. */
                        pos += store_gcov_u32(buffer, pos,
                                              GCOV_TAG_FOR_COUNTER(ct_idx));
-                       pos += store_gcov_u32(buffer, pos, ci_ptr->num * 2);
+                       pos += store_gcov_u32(buffer, pos,
+                               ci_ptr->num * 2 * GCOV_UNIT_SIZE);
 
                        for (cv_idx = 0; cv_idx < ci_ptr->num; cv_idx++) {
                                pos += store_gcov_u64(buffer, pos,
index f58a0aa..793c55a 100644 (file)
@@ -645,7 +645,7 @@ static void power_down(void)
        int error;
 
        if (hibernation_mode == HIBERNATION_SUSPEND) {
-               error = suspend_devices_and_enter(PM_SUSPEND_MEM);
+               error = suspend_devices_and_enter(mem_sleep_current);
                if (error) {
                        hibernation_mode = hibernation_ops ?
                                                HIBERNATION_PLATFORM :
index 6bb8e72..93416af 100644 (file)
@@ -1403,30 +1403,32 @@ static void rcu_poll_gp_seq_end(unsigned long *snap)
 // where caller does not hold the root rcu_node structure's lock.
 static void rcu_poll_gp_seq_start_unlocked(unsigned long *snap)
 {
+       unsigned long flags;
        struct rcu_node *rnp = rcu_get_root();
 
        if (rcu_init_invoked()) {
                lockdep_assert_irqs_enabled();
-               raw_spin_lock_irq_rcu_node(rnp);
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
        }
        rcu_poll_gp_seq_start(snap);
        if (rcu_init_invoked())
-               raw_spin_unlock_irq_rcu_node(rnp);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 }
 
 // Make the polled API aware of the end of a grace period, but where
 // caller does not hold the root rcu_node structure's lock.
 static void rcu_poll_gp_seq_end_unlocked(unsigned long *snap)
 {
+       unsigned long flags;
        struct rcu_node *rnp = rcu_get_root();
 
        if (rcu_init_invoked()) {
                lockdep_assert_irqs_enabled();
-               raw_spin_lock_irq_rcu_node(rnp);
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
        }
        rcu_poll_gp_seq_end(snap);
        if (rcu_init_invoked())
-               raw_spin_unlock_irq_rcu_node(rnp);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 }
 
 /*
index 5800b06..cb2aa2b 100644 (file)
@@ -4823,10 +4823,10 @@ static inline void finish_task(struct task_struct *prev)
 
 #ifdef CONFIG_SMP
 
-static void do_balance_callbacks(struct rq *rq, struct callback_head *head)
+static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
 {
        void (*func)(struct rq *rq);
-       struct callback_head *next;
+       struct balance_callback *next;
 
        lockdep_assert_rq_held(rq);
 
@@ -4853,15 +4853,15 @@ static void balance_push(struct rq *rq);
  * This abuse is tolerated because it places all the unlikely/odd cases behind
  * a single test, namely: rq->balance_callback == NULL.
  */
-struct callback_head balance_push_callback = {
+struct balance_callback balance_push_callback = {
        .next = NULL,
-       .func = (void (*)(struct callback_head *))balance_push,
+       .func = balance_push,
 };
 
-static inline struct callback_head *
+static inline struct balance_callback *
 __splice_balance_callbacks(struct rq *rq, bool split)
 {
-       struct callback_head *head = rq->balance_callback;
+       struct balance_callback *head = rq->balance_callback;
 
        if (likely(!head))
                return NULL;
@@ -4883,7 +4883,7 @@ __splice_balance_callbacks(struct rq *rq, bool split)
        return head;
 }
 
-static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
+static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
 {
        return __splice_balance_callbacks(rq, true);
 }
@@ -4893,7 +4893,7 @@ static void __balance_callbacks(struct rq *rq)
        do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
 }
 
-static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
+static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
 {
        unsigned long flags;
 
@@ -4910,12 +4910,12 @@ static inline void __balance_callbacks(struct rq *rq)
 {
 }
 
-static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
+static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
 {
        return NULL;
 }
 
-static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
+static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
 {
 }
 
@@ -6188,7 +6188,7 @@ static void sched_core_balance(struct rq *rq)
        preempt_enable();
 }
 
-static DEFINE_PER_CPU(struct callback_head, core_balance_head);
+static DEFINE_PER_CPU(struct balance_callback, core_balance_head);
 
 static void queue_core_balance(struct rq *rq)
 {
@@ -7419,7 +7419,7 @@ static int __sched_setscheduler(struct task_struct *p,
        int oldpolicy = -1, policy = attr->sched_policy;
        int retval, oldprio, newprio, queued, running;
        const struct sched_class *prev_class;
-       struct callback_head *head;
+       struct balance_callback *head;
        struct rq_flags rf;
        int reset_on_fork;
        int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
index 86dea6a..9ae8f41 100644 (file)
@@ -644,8 +644,8 @@ static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
        return rq->online && dl_task(prev);
 }
 
-static DEFINE_PER_CPU(struct callback_head, dl_push_head);
-static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
+static DEFINE_PER_CPU(struct balance_callback, dl_push_head);
+static DEFINE_PER_CPU(struct balance_callback, dl_pull_head);
 
 static void push_dl_tasks(struct rq *);
 static void pull_dl_task(struct rq *);
index d869bcf..ed2a47e 100644 (file)
@@ -410,8 +410,8 @@ static inline int has_pushable_tasks(struct rq *rq)
        return !plist_head_empty(&rq->rt.pushable_tasks);
 }
 
-static DEFINE_PER_CPU(struct callback_head, rt_push_head);
-static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
+static DEFINE_PER_CPU(struct balance_callback, rt_push_head);
+static DEFINE_PER_CPU(struct balance_callback, rt_pull_head);
 
 static void push_rt_tasks(struct rq *);
 static void pull_rt_task(struct rq *);
index 1644242..a4a2004 100644 (file)
@@ -938,6 +938,12 @@ struct uclamp_rq {
 DECLARE_STATIC_KEY_FALSE(sched_uclamp_used);
 #endif /* CONFIG_UCLAMP_TASK */
 
+struct rq;
+struct balance_callback {
+       struct balance_callback *next;
+       void (*func)(struct rq *rq);
+};
+
 /*
  * This is the main, per-CPU runqueue data structure.
  *
@@ -1036,7 +1042,7 @@ struct rq {
        unsigned long           cpu_capacity;
        unsigned long           cpu_capacity_orig;
 
-       struct callback_head    *balance_callback;
+       struct balance_callback *balance_callback;
 
        unsigned char           nohz_idle_balance;
        unsigned char           idle_balance;
@@ -1182,6 +1188,14 @@ static inline bool is_migration_disabled(struct task_struct *p)
 #endif
 }
 
+DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+
+#define cpu_rq(cpu)            (&per_cpu(runqueues, (cpu)))
+#define this_rq()              this_cpu_ptr(&runqueues)
+#define task_rq(p)             cpu_rq(task_cpu(p))
+#define cpu_curr(cpu)          (cpu_rq(cpu)->curr)
+#define raw_rq()               raw_cpu_ptr(&runqueues)
+
 struct sched_group;
 #ifdef CONFIG_SCHED_CORE
 static inline struct cpumask *sched_group_span(struct sched_group *sg);
@@ -1269,7 +1283,7 @@ static inline bool sched_group_cookie_match(struct rq *rq,
                return true;
 
        for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) {
-               if (sched_core_cookie_match(rq, p))
+               if (sched_core_cookie_match(cpu_rq(cpu), p))
                        return true;
        }
        return false;
@@ -1384,14 +1398,6 @@ static inline void update_idle_core(struct rq *rq)
 static inline void update_idle_core(struct rq *rq) { }
 #endif
 
-DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-
-#define cpu_rq(cpu)            (&per_cpu(runqueues, (cpu)))
-#define this_rq()              this_cpu_ptr(&runqueues)
-#define task_rq(p)             cpu_rq(task_cpu(p))
-#define cpu_curr(cpu)          (cpu_rq(cpu)->curr)
-#define raw_rq()               raw_cpu_ptr(&runqueues)
-
 #ifdef CONFIG_FAIR_GROUP_SCHED
 static inline struct task_struct *task_of(struct sched_entity *se)
 {
@@ -1544,7 +1550,7 @@ struct rq_flags {
 #endif
 };
 
-extern struct callback_head balance_push_callback;
+extern struct balance_callback balance_push_callback;
 
 /*
  * Lockdep annotation that avoids accidental unlocks; it's like a
@@ -1724,7 +1730,7 @@ init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
 
 static inline void
 queue_balance_callback(struct rq *rq,
-                      struct callback_head *head,
+                      struct balance_callback *head,
                       void (*func)(struct rq *rq))
 {
        lockdep_assert_rq_held(rq);
@@ -1737,7 +1743,7 @@ queue_balance_callback(struct rq *rq,
        if (unlikely(head->next || rq->balance_callback == &balance_push_callback))
                return;
 
-       head->func = (void (*)(struct callback_head *))func;
+       head->func = func;
        head->next = rq->balance_callback;
        rq->balance_callback = head;
 }
index 7f5eb29..a995ea1 100644 (file)
@@ -346,8 +346,40 @@ static void put_probe_ref(void)
        mutex_unlock(&blk_probe_mutex);
 }
 
+static int blk_trace_start(struct blk_trace *bt)
+{
+       if (bt->trace_state != Blktrace_setup &&
+           bt->trace_state != Blktrace_stopped)
+               return -EINVAL;
+
+       blktrace_seq++;
+       smp_mb();
+       bt->trace_state = Blktrace_running;
+       raw_spin_lock_irq(&running_trace_lock);
+       list_add(&bt->running_list, &running_trace_list);
+       raw_spin_unlock_irq(&running_trace_lock);
+       trace_note_time(bt);
+
+       return 0;
+}
+
+static int blk_trace_stop(struct blk_trace *bt)
+{
+       if (bt->trace_state != Blktrace_running)
+               return -EINVAL;
+
+       bt->trace_state = Blktrace_stopped;
+       raw_spin_lock_irq(&running_trace_lock);
+       list_del_init(&bt->running_list);
+       raw_spin_unlock_irq(&running_trace_lock);
+       relay_flush(bt->rchan);
+
+       return 0;
+}
+
 static void blk_trace_cleanup(struct request_queue *q, struct blk_trace *bt)
 {
+       blk_trace_stop(bt);
        synchronize_rcu();
        blk_trace_free(q, bt);
        put_probe_ref();
@@ -362,8 +394,7 @@ static int __blk_trace_remove(struct request_queue *q)
        if (!bt)
                return -EINVAL;
 
-       if (bt->trace_state != Blktrace_running)
-               blk_trace_cleanup(q, bt);
+       blk_trace_cleanup(q, bt);
 
        return 0;
 }
@@ -658,7 +689,6 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name,
 
 static int __blk_trace_startstop(struct request_queue *q, int start)
 {
-       int ret;
        struct blk_trace *bt;
 
        bt = rcu_dereference_protected(q->blk_trace,
@@ -666,36 +696,10 @@ static int __blk_trace_startstop(struct request_queue *q, int start)
        if (bt == NULL)
                return -EINVAL;
 
-       /*
-        * For starting a trace, we can transition from a setup or stopped
-        * trace. For stopping a trace, the state must be running
-        */
-       ret = -EINVAL;
-       if (start) {
-               if (bt->trace_state == Blktrace_setup ||
-                   bt->trace_state == Blktrace_stopped) {
-                       blktrace_seq++;
-                       smp_mb();
-                       bt->trace_state = Blktrace_running;
-                       raw_spin_lock_irq(&running_trace_lock);
-                       list_add(&bt->running_list, &running_trace_list);
-                       raw_spin_unlock_irq(&running_trace_lock);
-
-                       trace_note_time(bt);
-                       ret = 0;
-               }
-       } else {
-               if (bt->trace_state == Blktrace_running) {
-                       bt->trace_state = Blktrace_stopped;
-                       raw_spin_lock_irq(&running_trace_lock);
-                       list_del_init(&bt->running_list);
-                       raw_spin_unlock_irq(&running_trace_lock);
-                       relay_flush(bt->rchan);
-                       ret = 0;
-               }
-       }
-
-       return ret;
+       if (start)
+               return blk_trace_start(bt);
+       else
+               return blk_trace_stop(bt);
 }
 
 int blk_trace_startstop(struct request_queue *q, int start)
@@ -772,10 +776,8 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
 void blk_trace_shutdown(struct request_queue *q)
 {
        if (rcu_dereference_protected(q->blk_trace,
-                                     lockdep_is_held(&q->debugfs_mutex))) {
-               __blk_trace_startstop(q, 0);
+                                     lockdep_is_held(&q->debugfs_mutex)))
                __blk_trace_remove(q);
-       }
 }
 
 #ifdef CONFIG_BLK_CGROUP
@@ -1614,13 +1616,7 @@ static int blk_trace_remove_queue(struct request_queue *q)
        if (bt == NULL)
                return -EINVAL;
 
-       if (bt->trace_state == Blktrace_running) {
-               bt->trace_state = Blktrace_stopped;
-               raw_spin_lock_irq(&running_trace_lock);
-               list_del_init(&bt->running_list);
-               raw_spin_unlock_irq(&running_trace_lock);
-               relay_flush(bt->rchan);
-       }
+       blk_trace_stop(bt);
 
        put_probe_ref();
        synchronize_rcu();
index 49fb9ec..1ed0896 100644 (file)
@@ -687,6 +687,7 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
 
        perf_sample_data_init(sd, 0, 0);
        sd->raw = &raw;
+       sd->sample_flags |= PERF_SAMPLE_RAW;
 
        err = __bpf_perf_event_output(regs, map, flags, sd);
 
@@ -745,6 +746,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
        perf_fetch_caller_regs(regs);
        perf_sample_data_init(sd, 0, 0);
        sd->raw = &raw;
+       sd->sample_flags |= PERF_SAMPLE_RAW;
 
        ret = __bpf_perf_event_output(regs, map, flags, sd);
 out:
index 064072c..f50398c 100644 (file)
@@ -74,6 +74,7 @@ static int proc_do_uts_string(struct ctl_table *table, int write,
 static DEFINE_CTL_TABLE_POLL(hostname_poll);
 static DEFINE_CTL_TABLE_POLL(domainname_poll);
 
+// Note: update 'enum uts_proc' to match any changes to this table
 static struct ctl_table uts_kern_table[] = {
        {
                .procname       = "arch",
index 3fc7abf..2928007 100644 (file)
@@ -400,8 +400,9 @@ config FRAME_WARN
        default 1536 if (!64BIT && XTENSA)
        default 1024 if !64BIT
        default 2048 if 64BIT
+       default 0 if KMSAN
        help
-         Tell gcc to warn at build time for stack frames larger than this.
+         Tell the compiler to warn at build time for stack frames larger than this.
          Setting this too low will cause a lot of warnings.
          Setting it to 0 disables the warning.
 
index f5ae79c..a608746 100644 (file)
@@ -56,8 +56,8 @@ int string_stream_vadd(struct string_stream *stream,
        frag_container = alloc_string_stream_fragment(stream->test,
                                                      len,
                                                      stream->gfp);
-       if (!frag_container)
-               return -ENOMEM;
+       if (IS_ERR(frag_container))
+               return PTR_ERR(frag_container);
 
        len = vsnprintf(frag_container->fragment, len, fmt, args);
        spin_lock(&stream->lock);
index 90640a4..2a6992f 100644 (file)
@@ -265,7 +265,7 @@ static void kunit_fail(struct kunit *test, const struct kunit_loc *loc,
        kunit_set_failure(test);
 
        stream = alloc_string_stream(test, GFP_KERNEL);
-       if (!stream) {
+       if (IS_ERR(stream)) {
                WARN(true,
                     "Could not allocate stream to print failed assertion in %s:%d\n",
                     loc->file,
index e174380..fbde494 100644 (file)
@@ -2903,8 +2903,8 @@ static inline void *mtree_range_walk(struct ma_state *mas)
        unsigned long max, min;
        unsigned long prev_max, prev_min;
 
-       last = next = mas->node;
-       prev_min = min = mas->min;
+       next = mas->node;
+       min = mas->min;
        max = mas->max;
        do {
                offset = 0;
index 5369634..b8556a2 100644 (file)
 #include <linux/types.h>
 #include <linux/vmalloc.h>
 
+#define SKIP(cond, reason)             do {                    \
+       if (cond) {                                             \
+               kunit_skip(test, reason);                       \
+               return;                                         \
+       }                                                       \
+} while (0)
+
+/*
+ * Clang 11 and earlier generate unwanted libcalls for signed output
+ * on unsigned input.
+ */
+#if defined(CONFIG_CC_IS_CLANG) && __clang_major__ <= 11
+# define SKIP_SIGN_MISMATCH(t) SKIP(t, "Clang 11 unwanted libcalls")
+#else
+# define SKIP_SIGN_MISMATCH(t) do { } while (0)
+#endif
+
+/*
+ * Clang 13 and earlier generate unwanted libcalls for 64-bit tests on
+ * 32-bit hosts.
+ */
+#if defined(CONFIG_CC_IS_CLANG) && __clang_major__ <= 13 &&    \
+    BITS_PER_LONG != 64
+# define SKIP_64_ON_32(t)      SKIP(t, "Clang 13 unwanted libcalls")
+#else
+# define SKIP_64_ON_32(t)      do { } while (0)
+#endif
+
 #define DEFINE_TEST_ARRAY_TYPED(t1, t2, t)                     \
        static const struct test_ ## t1 ## _ ## t2 ## __ ## t { \
                t1 a;                                           \
@@ -94,7 +122,6 @@ DEFINE_TEST_ARRAY(u32) = {
        {-4U, 5U, 1U, -9U, -20U, true, false, true},
 };
 
-#if BITS_PER_LONG == 64
 DEFINE_TEST_ARRAY(u64) = {
        {0, 0, 0, 0, 0, false, false, false},
        {1, 1, 2, 0, 1, false, false, false},
@@ -118,7 +145,6 @@ DEFINE_TEST_ARRAY(u64) = {
         false, true, false},
        {-15ULL, 10ULL, -5ULL, -25ULL, -150ULL, false, false, true},
 };
-#endif
 
 DEFINE_TEST_ARRAY(s8) = {
        {0, 0, 0, 0, 0, false, false, false},
@@ -194,7 +220,6 @@ DEFINE_TEST_ARRAY(s32) = {
        {S32_MAX, S32_MAX, -2, 0, 1, true, false, true},
 };
 
-#if BITS_PER_LONG == 64
 DEFINE_TEST_ARRAY(s64) = {
        {0, 0, 0, 0, 0, false, false, false},
 
@@ -223,7 +248,6 @@ DEFINE_TEST_ARRAY(s64) = {
        {-128, -1, -129, -127, 128, false, false, false},
        {0, -S64_MAX, -S64_MAX, S64_MAX, 0, false, false, false},
 };
-#endif
 
 #define check_one_op(t, fmt, op, sym, a, b, r, of) do {                        \
        int _a_orig = a, _a_bump = a + 1;                               \
@@ -246,7 +270,7 @@ DEFINE_TEST_ARRAY(s64) = {
 
 #define DEFINE_TEST_FUNC_TYPED(n, t, fmt)                              \
 static void do_test_ ## n(struct kunit *test, const struct test_ ## n *p) \
-{                                                                      \
+{                                                                      \
        check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of);    \
        check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of);    \
        check_one_op(t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of);   \
@@ -257,6 +281,12 @@ static void do_test_ ## n(struct kunit *test, const struct test_ ## n *p) \
 static void n ## _overflow_test(struct kunit *test) {                  \
        unsigned i;                                                     \
                                                                        \
+       SKIP_64_ON_32(__same_type(t, u64));                             \
+       SKIP_64_ON_32(__same_type(t, s64));                             \
+       SKIP_SIGN_MISMATCH(__same_type(n ## _tests[0].a, u32) &&        \
+                          __same_type(n ## _tests[0].b, u32) &&        \
+                          __same_type(n ## _tests[0].sum, int));       \
+                                                                       \
        for (i = 0; i < ARRAY_SIZE(n ## _tests); ++i)                   \
                do_test_ ## n(test, &n ## _tests[i]);                   \
        kunit_info(test, "%zu %s arithmetic tests finished\n",          \
@@ -272,10 +302,8 @@ DEFINE_TEST_FUNC(u16, "%d");
 DEFINE_TEST_FUNC(s16, "%d");
 DEFINE_TEST_FUNC(u32, "%u");
 DEFINE_TEST_FUNC(s32, "%d");
-#if BITS_PER_LONG == 64
 DEFINE_TEST_FUNC(u64, "%llu");
 DEFINE_TEST_FUNC(s64, "%lld");
-#endif
 
 DEFINE_TEST_ARRAY_TYPED(u32, u32, u8) = {
        {0, 0, 0, 0, 0, false, false, false},
@@ -715,13 +743,10 @@ static struct kunit_case overflow_test_cases[] = {
        KUNIT_CASE(s16_s16__s16_overflow_test),
        KUNIT_CASE(u32_u32__u32_overflow_test),
        KUNIT_CASE(s32_s32__s32_overflow_test),
-/* Clang 13 and earlier generate unwanted libcalls on 32-bit. */
-#if BITS_PER_LONG == 64
        KUNIT_CASE(u64_u64__u64_overflow_test),
        KUNIT_CASE(s64_s64__s64_overflow_test),
-#endif
-       KUNIT_CASE(u32_u32__u8_overflow_test),
        KUNIT_CASE(u32_u32__int_overflow_test),
+       KUNIT_CASE(u32_u32__u8_overflow_test),
        KUNIT_CASE(u8_u8__int_overflow_test),
        KUNIT_CASE(int_int__u8_overflow_test),
        KUNIT_CASE(shift_sane_test),
index b358a74..f2ba578 100644 (file)
@@ -369,18 +369,10 @@ static int __init test_rhltable(unsigned int entries)
        pr_info("test %d random rhlist add/delete operations\n", entries);
        for (j = 0; j < entries; j++) {
                u32 i = prandom_u32_max(entries);
-               u32 prand = get_random_u32();
+               u32 prand = prandom_u32_max(4);
 
                cond_resched();
 
-               if (prand == 0)
-                       prand = get_random_u32();
-
-               if (prand & 1) {
-                       prand >>= 1;
-                       continue;
-               }
-
                err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
                if (test_bit(i, obj_in_table)) {
                        clear_bit(i, obj_in_table);
@@ -393,35 +385,29 @@ static int __init test_rhltable(unsigned int entries)
                }
 
                if (prand & 1) {
-                       prand >>= 1;
-                       continue;
-               }
-
-               err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
-               if (err == 0) {
-                       if (WARN(test_and_set_bit(i, obj_in_table), "succeeded to insert same object %d", i))
-                               continue;
-               } else {
-                       if (WARN(!test_bit(i, obj_in_table), "failed to insert object %d", i))
-                               continue;
-               }
-
-               if (prand & 1) {
-                       prand >>= 1;
-                       continue;
+                       err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
+                       if (err == 0) {
+                               if (WARN(test_and_set_bit(i, obj_in_table), "succeeded to insert same object %d", i))
+                                       continue;
+                       } else {
+                               if (WARN(!test_bit(i, obj_in_table), "failed to insert object %d", i))
+                                       continue;
+                       }
                }
 
-               i = prandom_u32_max(entries);
-               if (test_bit(i, obj_in_table)) {
-                       err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
-                       WARN(err, "cannot remove element at slot %d", i);
-                       if (err == 0)
-                               clear_bit(i, obj_in_table);
-               } else {
-                       err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
-                       WARN(err, "failed to insert object %d", i);
-                       if (err == 0)
-                               set_bit(i, obj_in_table);
+               if (prand & 2) {
+                       i = prandom_u32_max(entries);
+                       if (test_bit(i, obj_in_table)) {
+                               err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
+                               WARN(err, "cannot remove element at slot %d", i);
+                               if (err == 0)
+                                       clear_bit(i, obj_in_table);
+                       } else {
+                               err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
+                               WARN(err, "failed to insert object %d", i);
+                               if (err == 0)
+                                       set_bit(i, obj_in_table);
+                       }
                }
        }
 
index 1cc4a5f..561a425 100644 (file)
@@ -2455,7 +2455,16 @@ static void __split_huge_page_tail(struct page *head, int tail,
                        page_tail);
        page_tail->mapping = head->mapping;
        page_tail->index = head->index + tail;
-       page_tail->private = 0;
+
+       /*
+        * page->private should not be set in tail pages with the exception
+        * of swap cache pages that store the swp_entry_t in tail pages.
+        * Fix up and warn once if private is unexpectedly set.
+        */
+       if (!folio_test_swapcache(page_folio(head))) {
+               VM_WARN_ON_ONCE_PAGE(page_tail->private != 0, page_tail);
+               page_tail->private = 0;
+       }
 
        /* Page flags must be visible before we make the page non-compound. */
        smp_wmb();
index b586cdd..546df97 100644 (file)
@@ -1014,15 +1014,23 @@ void hugetlb_dup_vma_private(struct vm_area_struct *vma)
        VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
        /*
         * Clear vm_private_data
+        * - For shared mappings this is a per-vma semaphore that may be
+        *   allocated in a subsequent call to hugetlb_vm_op_open.
+        *   Before clearing, make sure pointer is not associated with vma
+        *   as this will leak the structure.  This is the case when called
+        *   via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already
+        *   been called to allocate a new structure.
         * - For MAP_PRIVATE mappings, this is the reserve map which does
         *   not apply to children.  Faults generated by the children are
         *   not guaranteed to succeed, even if read-only.
-        * - For shared mappings this is a per-vma semaphore that may be
-        *   allocated in a subsequent call to hugetlb_vm_op_open.
         */
-       vma->vm_private_data = (void *)0;
-       if (!(vma->vm_flags & VM_MAYSHARE))
-               return;
+       if (vma->vm_flags & VM_MAYSHARE) {
+               struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+               if (vma_lock && vma_lock->vma != vma)
+                       vma->vm_private_data = NULL;
+       } else
+               vma->vm_private_data = NULL;
 }
 
 /*
@@ -2924,11 +2932,11 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
                page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
                if (!page)
                        goto out_uncharge_cgroup;
+               spin_lock_irq(&hugetlb_lock);
                if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
                        SetHPageRestoreReserve(page);
                        h->resv_huge_pages--;
                }
-               spin_lock_irq(&hugetlb_lock);
                list_add(&page->lru, &h->hugepage_activelist);
                set_page_refcounted(page);
                /* Fall through */
@@ -4601,6 +4609,7 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
        struct resv_map *resv = vma_resv_map(vma);
 
        /*
+        * HPAGE_RESV_OWNER indicates a private mapping.
         * This new VMA should share its siblings reservation map if present.
         * The VMA will only ever have a valid reservation map pointer where
         * it is being copied for another still existing VMA.  As that VMA
@@ -4615,11 +4624,21 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
 
        /*
         * vma_lock structure for sharable mappings is vma specific.
-        * Clear old pointer (if copied via vm_area_dup) and create new.
+        * Clear old pointer (if copied via vm_area_dup) and allocate
+        * new structure.  Before clearing, make sure vma_lock is not
+        * for this vma.
         */
        if (vma->vm_flags & VM_MAYSHARE) {
-               vma->vm_private_data = NULL;
-               hugetlb_vma_lock_alloc(vma);
+               struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+               if (vma_lock) {
+                       if (vma_lock->vma != vma) {
+                               vma->vm_private_data = NULL;
+                               hugetlb_vma_lock_alloc(vma);
+                       } else
+                               pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__);
+               } else
+                       hugetlb_vma_lock_alloc(vma);
        }
 }
 
index 37af2dc..646e297 100644 (file)
@@ -1461,6 +1461,27 @@ static void scan_gray_list(void)
 }
 
 /*
+ * Conditionally call resched() in a object iteration loop while making sure
+ * that the given object won't go away without RCU read lock by performing a
+ * get_object() if !pinned.
+ *
+ * Return: false if can't do a cond_resched() due to get_object() failure
+ *        true otherwise
+ */
+static bool kmemleak_cond_resched(struct kmemleak_object *object, bool pinned)
+{
+       if (!pinned && !get_object(object))
+               return false;
+
+       rcu_read_unlock();
+       cond_resched();
+       rcu_read_lock();
+       if (!pinned)
+               put_object(object);
+       return true;
+}
+
+/*
  * Scan data sections and all the referenced memory blocks allocated via the
  * kernel's standard allocators. This function must be called with the
  * scan_mutex held.
@@ -1471,7 +1492,7 @@ static void kmemleak_scan(void)
        struct zone *zone;
        int __maybe_unused i;
        int new_leaks = 0;
-       int loop1_cnt = 0;
+       int loop_cnt = 0;
 
        jiffies_last_scan = jiffies;
 
@@ -1480,7 +1501,6 @@ static void kmemleak_scan(void)
        list_for_each_entry_rcu(object, &object_list, object_list) {
                bool obj_pinned = false;
 
-               loop1_cnt++;
                raw_spin_lock_irq(&object->lock);
 #ifdef DEBUG
                /*
@@ -1514,24 +1534,11 @@ static void kmemleak_scan(void)
                raw_spin_unlock_irq(&object->lock);
 
                /*
-                * Do a cond_resched() to avoid soft lockup every 64k objects.
-                * Make sure a reference has been taken so that the object
-                * won't go away without RCU read lock.
+                * Do a cond_resched() every 64k objects to avoid soft lockup.
                 */
-               if (!(loop1_cnt & 0xffff)) {
-                       if (!obj_pinned && !get_object(object)) {
-                               /* Try the next object instead */
-                               loop1_cnt--;
-                               continue;
-                       }
-
-                       rcu_read_unlock();
-                       cond_resched();
-                       rcu_read_lock();
-
-                       if (!obj_pinned)
-                               put_object(object);
-               }
+               if (!(++loop_cnt & 0xffff) &&
+                   !kmemleak_cond_resched(object, obj_pinned))
+                       loop_cnt--; /* Try again on next object */
        }
        rcu_read_unlock();
 
@@ -1598,8 +1605,16 @@ static void kmemleak_scan(void)
         * scan and color them gray until the next scan.
         */
        rcu_read_lock();
+       loop_cnt = 0;
        list_for_each_entry_rcu(object, &object_list, object_list) {
                /*
+                * Do a cond_resched() every 64k objects to avoid soft lockup.
+                */
+               if (!(++loop_cnt & 0xffff) &&
+                   !kmemleak_cond_resched(object, false))
+                       loop_cnt--;     /* Try again on next object */
+
+               /*
                 * This is racy but we can save the overhead of lock/unlock
                 * calls. The missed objects, if any, should be caught in
                 * the next scan.
@@ -1632,8 +1647,16 @@ static void kmemleak_scan(void)
         * Scanning result reporting.
         */
        rcu_read_lock();
+       loop_cnt = 0;
        list_for_each_entry_rcu(object, &object_list, object_list) {
                /*
+                * Do a cond_resched() every 64k objects to avoid soft lockup.
+                */
+               if (!(++loop_cnt & 0xffff) &&
+                   !kmemleak_cond_resched(object, false))
+                       loop_cnt--;     /* Try again on next object */
+
+               /*
                 * This is racy but we can save the overhead of lock/unlock
                 * calls. The missed objects, if any, should be caught in
                 * the next scan.
index 280d154..271f135 100644 (file)
@@ -14,6 +14,7 @@
 
 #include "kmsan.h"
 #include <linux/gfp.h>
+#include <linux/kmsan_string.h>
 #include <linux/mm.h>
 #include <linux/uaccess.h>
 
index 21e3e19..a787c04 100644 (file)
@@ -167,6 +167,7 @@ void kmsan_copy_page_meta(struct page *dst, struct page *src)
        __memcpy(origin_ptr_for(dst), origin_ptr_for(src), PAGE_SIZE);
        kmsan_leave_runtime();
 }
+EXPORT_SYMBOL(kmsan_copy_page_meta);
 
 void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags)
 {
index 2baa93c..c7105ec 100644 (file)
@@ -813,7 +813,14 @@ static bool madvise_dontneed_free_valid_vma(struct vm_area_struct *vma,
        if (start & ~huge_page_mask(hstate_vma(vma)))
                return false;
 
-       *end = ALIGN(*end, huge_page_size(hstate_vma(vma)));
+       /*
+        * Madvise callers expect the length to be rounded up to PAGE_SIZE
+        * boundaries, and may be unaware that this VMA uses huge pages.
+        * Avoid unexpected data loss by rounding down the number of
+        * huge pages freed.
+        */
+       *end = ALIGN_DOWN(*end, huge_page_size(hstate_vma(vma)));
+
        return true;
 }
 
@@ -828,6 +835,9 @@ static long madvise_dontneed_free(struct vm_area_struct *vma,
        if (!madvise_dontneed_free_valid_vma(vma, start, &end, behavior))
                return -EINVAL;
 
+       if (start == end)
+               return 0;
+
        if (!userfaultfd_remove(vma, start, end)) {
                *prev = NULL; /* mmap_lock has been dropped, prev is stale */
 
index f116b7b..fa8c9d0 100644 (file)
@@ -131,8 +131,8 @@ static void memory_tier_device_release(struct device *dev)
        kfree(tier);
 }
 
-static ssize_t nodes_show(struct device *dev,
-                         struct device_attribute *attr, char *buf)
+static ssize_t nodelist_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
 {
        int ret;
        nodemask_t nmask;
@@ -143,10 +143,10 @@ static ssize_t nodes_show(struct device *dev,
        mutex_unlock(&memory_tier_lock);
        return ret;
 }
-static DEVICE_ATTR_RO(nodes);
+static DEVICE_ATTR_RO(nodelist);
 
 static struct attribute *memtier_dev_attrs[] = {
-       &dev_attr_nodes.attr,
+       &dev_attr_nodelist.attr,
        NULL
 };
 
index a937eae..61aa9ae 100644 (file)
@@ -787,17 +787,22 @@ static int vma_replace_policy(struct vm_area_struct *vma,
 static int mbind_range(struct mm_struct *mm, unsigned long start,
                       unsigned long end, struct mempolicy *new_pol)
 {
-       MA_STATE(mas, &mm->mm_mt, start - 1, start - 1);
+       MA_STATE(mas, &mm->mm_mt, start, start);
        struct vm_area_struct *prev;
        struct vm_area_struct *vma;
        int err = 0;
        pgoff_t pgoff;
 
-       prev = mas_find_rev(&mas, 0);
-       if (prev && (start < prev->vm_end))
-               vma = prev;
-       else
-               vma = mas_next(&mas, end - 1);
+       prev = mas_prev(&mas, 0);
+       if (unlikely(!prev))
+               mas_set(&mas, start);
+
+       vma = mas_find(&mas, end - 1);
+       if (WARN_ON(!vma))
+               return 0;
+
+       if (start > vma->vm_start)
+               prev = vma;
 
        for (; vma; vma = mas_next(&mas, end - 1)) {
                unsigned long vmstart = max(start, vma->vm_start);
index 1379e19..dff3335 100644 (file)
@@ -1582,6 +1582,13 @@ out:
         */
        list_splice(&ret_pages, from);
 
+       /*
+        * Return 0 in case all subpages of fail-to-migrate THPs are
+        * migrated successfully.
+        */
+       if (list_empty(from))
+               rc = 0;
+
        count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
        count_vm_events(PGMIGRATE_FAIL, nr_failed_pages);
        count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded);
index bf2122a..2def555 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -618,7 +618,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
        struct vm_area_struct *expand)
 {
        struct mm_struct *mm = vma->vm_mm;
-       struct vm_area_struct *next_next, *next = find_vma(mm, vma->vm_end);
+       struct vm_area_struct *next_next = NULL;        /* uninit var warning */
+       struct vm_area_struct *next = find_vma(mm, vma->vm_end);
        struct vm_area_struct *orig_vma = vma;
        struct address_space *mapping = NULL;
        struct rb_root_cached *root = NULL;
@@ -2625,14 +2626,14 @@ cannot_expand:
                if (error)
                        goto unmap_and_free_vma;
 
-               /* Can addr have changed??
-                *
-                * Answer: Yes, several device drivers can do it in their
-                *         f_op->mmap method. -DaveM
+               /*
+                * Expansion is handled above, merging is handled below.
+                * Drivers should not alter the address of the VMA.
                 */
-               WARN_ON_ONCE(addr != vma->vm_start);
-
-               addr = vma->vm_start;
+               if (WARN_ON((addr != vma->vm_start))) {
+                       error = -EINVAL;
+                       goto close_and_free_vma;
+               }
                mas_reset(&mas);
 
                /*
@@ -2654,7 +2655,6 @@ cannot_expand:
                                vm_area_free(vma);
                                vma = merge;
                                /* Update vm_flags to pick up the change. */
-                               addr = vma->vm_start;
                                vm_flags = vma->vm_flags;
                                goto unmap_writable;
                        }
@@ -2681,7 +2681,7 @@ cannot_expand:
        if (mas_preallocate(&mas, vma, GFP_KERNEL)) {
                error = -ENOMEM;
                if (file)
-                       goto unmap_and_free_vma;
+                       goto close_and_free_vma;
                else
                        goto free_vma;
        }
@@ -2852,6 +2852,9 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
                        if (next->vm_flags != vma->vm_flags)
                                goto out;
 
+                       if (start + size <= next->vm_end)
+                               break;
+
                        prev = next;
                }
 
index e20ade8..218b28e 100644 (file)
@@ -807,6 +807,7 @@ static void prep_compound_tail(struct page *head, int tail_idx)
 
        p->mapping = TAIL_MAPPING;
        set_compound_head(p, head);
+       set_page_private(p, 0);
 }
 
 void prep_compound_page(struct page *page, unsigned int order)
@@ -5784,14 +5785,18 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order,
                size_t size)
 {
        if (addr) {
-               unsigned long alloc_end = addr + (PAGE_SIZE << order);
-               unsigned long used = addr + PAGE_ALIGN(size);
-
-               split_page(virt_to_page((void *)addr), order);
-               while (used < alloc_end) {
-                       free_page(used);
-                       used += PAGE_SIZE;
-               }
+               unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE);
+               struct page *page = virt_to_page((void *)addr);
+               struct page *last = page + nr;
+
+               split_page_owner(page, 1 << order);
+               split_page_memcg(page, 1 << order);
+               while (page < --last)
+                       set_page_refcounted(last);
+
+               last = page + (1UL << order);
+               for (page += nr; page < last; page++)
+                       __free_pages_ok(page, 0, FPI_TO_TAIL);
        }
        return (void *)addr;
 }
index 04141a9..47fbc16 100644 (file)
@@ -330,7 +330,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
                                      zone->zone_start_pfn);
 
        if (skip_isolation) {
-               int mt = get_pageblock_migratetype(pfn_to_page(isolate_pageblock));
+               int mt __maybe_unused = get_pageblock_migratetype(pfn_to_page(isolate_pageblock));
 
                VM_BUG_ON(!is_migrate_isolate(mt));
        } else {
index 8280a5c..c1d8b8a 100644 (file)
@@ -2424,9 +2424,26 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
 
                if (!zeropage) {        /* COPY */
                        page_kaddr = kmap_local_folio(folio, 0);
+                       /*
+                        * The read mmap_lock is held here.  Despite the
+                        * mmap_lock being read recursive a deadlock is still
+                        * possible if a writer has taken a lock.  For example:
+                        *
+                        * process A thread 1 takes read lock on own mmap_lock
+                        * process A thread 2 calls mmap, blocks taking write lock
+                        * process B thread 1 takes page fault, read lock on own mmap lock
+                        * process B thread 2 calls mmap, blocks taking write lock
+                        * process A thread 1 blocks taking read lock on process B
+                        * process B thread 1 blocks taking read lock on process A
+                        *
+                        * Disable page faults to prevent potential deadlock
+                        * and retry the copy outside the mmap_lock.
+                        */
+                       pagefault_disable();
                        ret = copy_from_user(page_kaddr,
                                             (const void __user *)src_addr,
                                             PAGE_SIZE);
+                       pagefault_enable();
                        kunmap_local(page_kaddr);
 
                        /* fallback to copy_from_user outside mmap_lock */
index e24e8a4..3d0fef3 100644 (file)
@@ -157,11 +157,28 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
                if (!page)
                        goto out;
 
-               page_kaddr = kmap_atomic(page);
+               page_kaddr = kmap_local_page(page);
+               /*
+                * The read mmap_lock is held here.  Despite the
+                * mmap_lock being read recursive a deadlock is still
+                * possible if a writer has taken a lock.  For example:
+                *
+                * process A thread 1 takes read lock on own mmap_lock
+                * process A thread 2 calls mmap, blocks taking write lock
+                * process B thread 1 takes page fault, read lock on own mmap lock
+                * process B thread 2 calls mmap, blocks taking write lock
+                * process A thread 1 blocks taking read lock on process B
+                * process B thread 1 blocks taking read lock on process A
+                *
+                * Disable page faults to prevent potential deadlock
+                * and retry the copy outside the mmap_lock.
+                */
+               pagefault_disable();
                ret = copy_from_user(page_kaddr,
                                     (const void __user *) src_addr,
                                     PAGE_SIZE);
-               kunmap_atomic(page_kaddr);
+               pagefault_enable();
+               kunmap_local(page_kaddr);
 
                /* fallback to copy_from_user outside mmap_lock */
                if (unlikely(ret)) {
@@ -646,11 +663,11 @@ retry:
                        mmap_read_unlock(dst_mm);
                        BUG_ON(!page);
 
-                       page_kaddr = kmap(page);
+                       page_kaddr = kmap_local_page(page);
                        err = copy_from_user(page_kaddr,
                                             (const void __user *) src_addr,
                                             PAGE_SIZE);
-                       kunmap(page);
+                       kunmap_local(page_kaddr);
                        if (unlikely(err)) {
                                err = -EFAULT;
                                goto out;
index 5257587..d03941c 100644 (file)
@@ -2311,6 +2311,9 @@ void zs_destroy_pool(struct zs_pool *pool)
                int fg;
                struct size_class *class = pool->size_class[i];
 
+               if (!class)
+                       continue;
+
                if (class->index != i)
                        continue;
 
index 829db9e..aaf64b9 100644 (file)
@@ -219,11 +219,12 @@ static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
        if (!page)
                return -ENOMEM;
 
-       for (p = page, len = 0; len < nbytes; p++, len++) {
+       for (p = page, len = 0; len < nbytes; p++) {
                if (get_user(*p, buff++)) {
                        free_page((unsigned long)page);
                        return -EFAULT;
                }
+               len += 1;
                if (*p == '\0' || *p == '\n')
                        break;
        }
index d7d86c9..55f29c9 100644 (file)
@@ -342,10 +342,12 @@ static void j1939_session_skb_drop_old(struct j1939_session *session)
                __skb_unlink(do_skb, &session->skb_queue);
                /* drop ref taken in j1939_session_skb_queue() */
                skb_unref(do_skb);
+               spin_unlock_irqrestore(&session->skb_queue.lock, flags);
 
                kfree_skb(do_skb);
+       } else {
+               spin_unlock_irqrestore(&session->skb_queue.lock, flags);
        }
-       spin_unlock_irqrestore(&session->skb_queue.lock, flags);
 }
 
 void j1939_session_skb_queue(struct j1939_session *session,
index fa53830..3be2560 100644 (file)
@@ -5136,11 +5136,13 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
        case TC_ACT_SHOT:
                mini_qdisc_qstats_cpu_drop(miniq);
                kfree_skb_reason(skb, SKB_DROP_REASON_TC_INGRESS);
+               *ret = NET_RX_DROP;
                return NULL;
        case TC_ACT_STOLEN:
        case TC_ACT_QUEUED:
        case TC_ACT_TRAP:
                consume_skb(skb);
+               *ret = NET_RX_SUCCESS;
                return NULL;
        case TC_ACT_REDIRECT:
                /* skb_mac_header check was done by cls/act_bpf, so
@@ -5153,8 +5155,10 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
                        *another = true;
                        break;
                }
+               *ret = NET_RX_SUCCESS;
                return NULL;
        case TC_ACT_CONSUMED:
+               *ret = NET_RX_SUCCESS;
                return NULL;
        default:
                break;
index 0ec2f59..f64654d 100644 (file)
@@ -117,6 +117,7 @@ static int net_assign_generic(struct net *net, unsigned int id, void *data)
 
 static int ops_init(const struct pernet_operations *ops, struct net *net)
 {
+       struct net_generic *ng;
        int err = -ENOMEM;
        void *data = NULL;
 
@@ -135,7 +136,13 @@ static int ops_init(const struct pernet_operations *ops, struct net *net)
        if (!err)
                return 0;
 
+       if (ops->id && ops->size) {
 cleanup:
+               ng = rcu_dereference_protected(net->gen,
+                                              lockdep_is_held(&pernet_ops_rwsem));
+               ng->ptr[*ops->id] = NULL;
+       }
+
        kfree(data);
 
 out:
index 1d9719e..d1a3fa6 100644 (file)
@@ -3971,7 +3971,7 @@ int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
        } else if (i < MAX_SKB_FRAGS) {
                skb_zcopy_downgrade_managed(skb);
                get_page(page);
-               skb_fill_page_desc(skb, i, page, offset, size);
+               skb_fill_page_desc_noacc(skb, i, page, offset, size);
        } else {
                return -EMSGSIZE;
        }
index ca70525..1efdc47 100644 (file)
@@ -500,11 +500,11 @@ bool sk_msg_is_readable(struct sock *sk)
 }
 EXPORT_SYMBOL_GPL(sk_msg_is_readable);
 
-static struct sk_msg *alloc_sk_msg(void)
+static struct sk_msg *alloc_sk_msg(gfp_t gfp)
 {
        struct sk_msg *msg;
 
-       msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL);
+       msg = kzalloc(sizeof(*msg), gfp | __GFP_NOWARN);
        if (unlikely(!msg))
                return NULL;
        sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
@@ -520,7 +520,7 @@ static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
        if (!sk_rmem_schedule(sk, skb, skb->truesize))
                return NULL;
 
-       return alloc_sk_msg();
+       return alloc_sk_msg(GFP_KERNEL);
 }
 
 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
@@ -597,7 +597,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
                                     u32 off, u32 len)
 {
-       struct sk_msg *msg = alloc_sk_msg();
+       struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC);
        struct sock *sk = psock->sk;
        int err;
 
index 5daa1fa..fb90e1e 100644 (file)
@@ -21,6 +21,22 @@ static DEFINE_IDA(reuseport_ida);
 static int reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse,
                               struct sock_reuseport *reuse, bool bind_inany);
 
+void reuseport_has_conns_set(struct sock *sk)
+{
+       struct sock_reuseport *reuse;
+
+       if (!rcu_access_pointer(sk->sk_reuseport_cb))
+               return;
+
+       spin_lock_bh(&reuseport_lock);
+       reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
+                                         lockdep_is_held(&reuseport_lock));
+       if (likely(reuse))
+               reuse->has_conns = 1;
+       spin_unlock_bh(&reuseport_lock);
+}
+EXPORT_SYMBOL(reuseport_has_conns_set);
+
 static int reuseport_sock_index(struct sock *sk,
                                const struct sock_reuseport *reuse,
                                bool closed)
index 1a59918..a9fde48 100644 (file)
@@ -3145,7 +3145,7 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
        case NETDEV_CHANGELOWERSTATE: {
                struct netdev_notifier_changelowerstate_info *info = ptr;
                struct dsa_port *dp;
-               int err;
+               int err = 0;
 
                if (dsa_slave_dev_check(dev)) {
                        dp = dsa_slave_to_port(dev);
index 1c94bb8..49c0a2a 100644 (file)
@@ -124,7 +124,7 @@ static int eeprom_prepare_data(const struct ethnl_req_info *req_base,
        if (ret)
                goto err_free;
 
-       ret = get_module_eeprom_by_page(dev, &page_data, info->extack);
+       ret = get_module_eeprom_by_page(dev, &page_data, info ? info->extack : NULL);
        if (ret < 0)
                goto err_ops;
 
index 5a471e1..e8683e4 100644 (file)
@@ -64,7 +64,7 @@ static int pse_prepare_data(const struct ethnl_req_info *req_base,
        if (ret < 0)
                return ret;
 
-       ret = pse_get_pse_attributes(dev, info->extack, data);
+       ret = pse_get_pse_attributes(dev, info ? info->extack : NULL, data);
 
        ethnl_ops_complete(dev);
 
index 5bf3577..a50429a 100644 (file)
@@ -150,15 +150,15 @@ struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame,
                                       struct hsr_port *port)
 {
        if (!frame->skb_std) {
-               if (frame->skb_hsr) {
+               if (frame->skb_hsr)
                        frame->skb_std =
                                create_stripped_skb_hsr(frame->skb_hsr, frame);
-               } else {
-                       /* Unexpected */
-                       WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
-                                 __FILE__, __LINE__, port->dev->name);
+               else
+                       netdev_warn_once(port->dev,
+                                        "Unexpected frame received in hsr_get_untagged_frame()\n");
+
+               if (!frame->skb_std)
                        return NULL;
-               }
        }
 
        return skb_clone(frame->skb_std, GFP_ATOMIC);
index 6e55fae..1fa2fe0 100644 (file)
@@ -502,8 +502,10 @@ static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len)
        if (err < 0)
                goto out;
 
-       if (addr->family != AF_IEEE802154)
+       if (addr->family != AF_IEEE802154) {
+               err = -EINVAL;
                goto out;
+       }
 
        ieee802154_addr_from_sa(&haddr, &addr->addr);
        dev = ieee802154_get_dev(sock_net(sk), &haddr);
index 0ee7fd2..4d1af0c 100644 (file)
@@ -70,7 +70,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
        }
        inet->inet_daddr = fl4->daddr;
        inet->inet_dport = usin->sin_port;
-       reuseport_has_conns(sk, true);
+       reuseport_has_conns_set(sk);
        sk->sk_state = TCP_ESTABLISHED;
        sk_set_txhash(sk);
        inet->inet_id = get_random_u16();
index 943edf4..f361d3d 100644 (file)
@@ -389,7 +389,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
        dev_match = dev_match || (res.type == RTN_LOCAL &&
                                  dev == net->loopback_dev);
        if (dev_match) {
-               ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_LINK;
+               ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST;
                return ret;
        }
        if (no_addr)
@@ -401,7 +401,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
        ret = 0;
        if (fib_lookup(net, &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE) == 0) {
                if (res.type == RTN_UNICAST)
-                       ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_LINK;
+                       ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST;
        }
        return ret;
 
index e9a7f70..f721c30 100644 (file)
@@ -1231,7 +1231,7 @@ static int fib_check_nh_nongw(struct net *net, struct fib_nh *nh,
 
        nh->fib_nh_dev = in_dev->dev;
        netdev_hold(nh->fib_nh_dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC);
-       nh->fib_nh_scope = RT_SCOPE_LINK;
+       nh->fib_nh_scope = RT_SCOPE_HOST;
        if (!netif_carrier_ok(nh->fib_nh_dev))
                nh->fib_nh_flags |= RTNH_F_LINKDOWN;
        err = 0;
index ff85db5..ded5bef 100644 (file)
@@ -78,6 +78,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
        flow.flowi4_tos = iph->tos & IPTOS_RT_MASK;
        flow.flowi4_scope = RT_SCOPE_UNIVERSE;
        flow.flowi4_l3mdev = l3mdev_master_ifindex_rcu(xt_in(par));
+       flow.flowi4_uid = sock_net_uid(xt_net(par), NULL);
 
        return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert;
 }
index e886147..fc65d69 100644 (file)
@@ -65,6 +65,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
        struct flowi4 fl4 = {
                .flowi4_scope = RT_SCOPE_UNIVERSE,
                .flowi4_iif = LOOPBACK_IFINDEX,
+               .flowi4_uid = sock_net_uid(nft_net(pkt), NULL),
        };
        const struct net_device *oif;
        const struct net_device *found;
index 853a75a..d8ef053 100644 (file)
@@ -2534,7 +2534,7 @@ static int nh_create_ipv4(struct net *net, struct nexthop *nh,
        if (!err) {
                nh->nh_flags = fib_nh->fib_nh_flags;
                fib_info_update_nhc_saddr(net, &fib_nh->nh_common,
-                                         fib_nh->fib_nh_scope);
+                                         !fib_nh->fib_nh_scope ? 0 : fib_nh->fib_nh_scope - 1);
        } else {
                fib_nh_release(net, fib_nh);
        }
index f823281..ef14efa 100644 (file)
@@ -457,6 +457,7 @@ void tcp_init_sock(struct sock *sk)
        WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1]));
        WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1]));
 
+       set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
        sk_sockets_allocated_inc(sk);
 }
 EXPORT_SYMBOL(tcp_init_sock);
index bc2ea12..0640453 100644 (file)
@@ -2192,7 +2192,8 @@ void tcp_enter_loss(struct sock *sk)
  */
 static bool tcp_check_sack_reneging(struct sock *sk, int flag)
 {
-       if (flag & FLAG_SACK_RENEGING) {
+       if (flag & FLAG_SACK_RENEGING &&
+           flag & FLAG_SND_UNA_ADVANCED) {
                struct tcp_sock *tp = tcp_sk(sk);
                unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4),
                                          msecs_to_jiffies(10));
index 7a250ef..87d440f 100644 (file)
@@ -1874,11 +1874,13 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
        __skb_push(skb, hdrlen);
 
 no_coalesce:
+       limit = (u32)READ_ONCE(sk->sk_rcvbuf) + (u32)(READ_ONCE(sk->sk_sndbuf) >> 1);
+
        /* Only socket owner can try to collapse/prune rx queues
         * to reduce memory overhead, so add a little headroom here.
         * Few sockets backlog are possibly concurrently non empty.
         */
-       limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf) + 64*1024;
+       limit += 64 * 1024;
 
        if (unlikely(sk_add_backlog(sk, skb, limit))) {
                bh_unlock_sock(sk);
index 662d717..6a320a6 100644 (file)
@@ -448,7 +448,7 @@ static struct sock *udp4_lib_lookup2(struct net *net,
                        result = lookup_reuseport(net, sk, skb,
                                                  saddr, sport, daddr, hnum);
                        /* Fall back to scoring if group has connections */
-                       if (result && !reuseport_has_conns(sk, false))
+                       if (result && !reuseport_has_conns(sk))
                                return result;
 
                        result = result ? : sk;
@@ -1624,6 +1624,7 @@ int udp_init_sock(struct sock *sk)
 {
        skb_queue_head_init(&udp_sk(sk)->reader_queue);
        sk->sk_destruct = udp_destruct_sock;
+       set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
        return 0;
 }
 
index 417834b..9c3f520 100644 (file)
@@ -7214,9 +7214,11 @@ err_reg_dflt:
        __addrconf_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
 err_reg_all:
        kfree(dflt);
+       net->ipv6.devconf_dflt = NULL;
 #endif
 err_alloc_dflt:
        kfree(all);
+       net->ipv6.devconf_all = NULL;
 err_alloc_all:
        kfree(net->ipv6.inet6_addr_lst);
 err_alloc_addr:
index df665d4..5ecb565 100644 (file)
@@ -256,7 +256,7 @@ ipv4_connected:
                goto out;
        }
 
-       reuseport_has_conns(sk, true);
+       reuseport_has_conns_set(sk);
        sk->sk_state = TCP_ESTABLISHED;
        sk_set_txhash(sk);
 out:
index 48b4ff0..c035a96 100644 (file)
@@ -1175,14 +1175,16 @@ static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
                                dev->needed_headroom = dst_len;
 
                        if (set_mtu) {
-                               dev->mtu = rt->dst.dev->mtu - t_hlen;
+                               int mtu = rt->dst.dev->mtu - t_hlen;
+
                                if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
-                                       dev->mtu -= 8;
+                                       mtu -= 8;
                                if (dev->type == ARPHRD_ETHER)
-                                       dev->mtu -= ETH_HLEN;
+                                       mtu -= ETH_HLEN;
 
-                               if (dev->mtu < IPV6_MIN_MTU)
-                                       dev->mtu = IPV6_MIN_MTU;
+                               if (mtu < IPV6_MIN_MTU)
+                                       mtu = IPV6_MIN_MTU;
+                               WRITE_ONCE(dev->mtu, mtu);
                        }
                }
                ip6_rt_put(rt);
index cc5d5e7..2fb4c6a 100644 (file)
@@ -1450,8 +1450,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
        struct net_device *tdev = NULL;
        struct __ip6_tnl_parm *p = &t->parms;
        struct flowi6 *fl6 = &t->fl.u.ip6;
-       unsigned int mtu;
        int t_hlen;
+       int mtu;
 
        __dev_addr_set(dev, &p->laddr, sizeof(struct in6_addr));
        memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
@@ -1498,12 +1498,13 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
                        dev->hard_header_len = tdev->hard_header_len + t_hlen;
                        mtu = min_t(unsigned int, tdev->mtu, IP6_MAX_MTU);
 
-                       dev->mtu = mtu - t_hlen;
+                       mtu = mtu - t_hlen;
                        if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
-                               dev->mtu -= 8;
+                               mtu -= 8;
 
-                       if (dev->mtu < IPV6_MIN_MTU)
-                               dev->mtu = IPV6_MIN_MTU;
+                       if (mtu < IPV6_MIN_MTU)
+                               mtu = IPV6_MIN_MTU;
+                       WRITE_ONCE(dev->mtu, mtu);
                }
        }
 }
index 69d86b0..a01d9b8 100644 (file)
@@ -40,6 +40,7 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
                .flowi6_l3mdev = l3mdev_master_ifindex_rcu(dev),
                .flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK,
                .flowi6_proto = iph->nexthdr,
+               .flowi6_uid = sock_net_uid(net, NULL),
                .daddr = iph->saddr,
        };
        int lookup_flags;
index 91faac6..36dc14b 100644 (file)
@@ -66,6 +66,7 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv,
        struct flowi6 fl6 = {
                .flowi6_iif = LOOPBACK_IFINDEX,
                .flowi6_proto = pkt->tprot,
+               .flowi6_uid = sock_net_uid(nft_net(pkt), NULL),
        };
        u32 ret = 0;
 
@@ -163,6 +164,7 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
        struct flowi6 fl6 = {
                .flowi6_iif = LOOPBACK_IFINDEX,
                .flowi6_proto = pkt->tprot,
+               .flowi6_uid = sock_net_uid(nft_net(pkt), NULL),
        };
        struct rt6_info *rt;
        int lookup_flags;
index d27683e..5703d3c 100644 (file)
@@ -1124,10 +1124,12 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
 
        if (tdev && !netif_is_l3_master(tdev)) {
                int t_hlen = tunnel->hlen + sizeof(struct iphdr);
+               int mtu;
 
-               dev->mtu = tdev->mtu - t_hlen;
-               if (dev->mtu < IPV6_MIN_MTU)
-                       dev->mtu = IPV6_MIN_MTU;
+               mtu = tdev->mtu - t_hlen;
+               if (mtu < IPV6_MIN_MTU)
+                       mtu = IPV6_MIN_MTU;
+               WRITE_ONCE(dev->mtu, mtu);
        }
 }
 
index 8d09f0e..129ec5a 100644 (file)
@@ -195,7 +195,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
                        result = lookup_reuseport(net, sk, skb,
                                                  saddr, sport, daddr, hnum);
                        /* Fall back to scoring if group has connections */
-                       if (result && !reuseport_has_conns(sk, false))
+                       if (result && !reuseport_has_conns(sk))
                                return result;
 
                        result = result ? : sk;
index 2772546..a500422 100644 (file)
@@ -162,7 +162,8 @@ static void kcm_rcv_ready(struct kcm_sock *kcm)
        /* Buffer limit is okay now, add to ready list */
        list_add_tail(&kcm->wait_rx_list,
                      &kcm->mux->kcm_rx_waiters);
-       kcm->rx_wait = true;
+       /* paired with lockless reads in kcm_rfree() */
+       WRITE_ONCE(kcm->rx_wait, true);
 }
 
 static void kcm_rfree(struct sk_buff *skb)
@@ -178,7 +179,7 @@ static void kcm_rfree(struct sk_buff *skb)
        /* For reading rx_wait and rx_psock without holding lock */
        smp_mb__after_atomic();
 
-       if (!kcm->rx_wait && !kcm->rx_psock &&
+       if (!READ_ONCE(kcm->rx_wait) && !READ_ONCE(kcm->rx_psock) &&
            sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
                spin_lock_bh(&mux->rx_lock);
                kcm_rcv_ready(kcm);
@@ -237,7 +238,8 @@ try_again:
                if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
                        /* Should mean socket buffer full */
                        list_del(&kcm->wait_rx_list);
-                       kcm->rx_wait = false;
+                       /* paired with lockless reads in kcm_rfree() */
+                       WRITE_ONCE(kcm->rx_wait, false);
 
                        /* Commit rx_wait to read in kcm_free */
                        smp_wmb();
@@ -280,10 +282,12 @@ static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
        kcm = list_first_entry(&mux->kcm_rx_waiters,
                               struct kcm_sock, wait_rx_list);
        list_del(&kcm->wait_rx_list);
-       kcm->rx_wait = false;
+       /* paired with lockless reads in kcm_rfree() */
+       WRITE_ONCE(kcm->rx_wait, false);
 
        psock->rx_kcm = kcm;
-       kcm->rx_psock = psock;
+       /* paired with lockless reads in kcm_rfree() */
+       WRITE_ONCE(kcm->rx_psock, psock);
 
        spin_unlock_bh(&mux->rx_lock);
 
@@ -310,7 +314,8 @@ static void unreserve_rx_kcm(struct kcm_psock *psock,
        spin_lock_bh(&mux->rx_lock);
 
        psock->rx_kcm = NULL;
-       kcm->rx_psock = NULL;
+       /* paired with lockless reads in kcm_rfree() */
+       WRITE_ONCE(kcm->rx_psock, NULL);
 
        /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
         * kcm_rfree
@@ -834,7 +839,7 @@ static ssize_t kcm_sendpage(struct socket *sock, struct page *page,
        }
 
        get_page(page);
-       skb_fill_page_desc(skb, i, page, offset, size);
+       skb_fill_page_desc_noacc(skb, i, page, offset, size);
        skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG;
 
 coalesced:
@@ -1240,7 +1245,8 @@ static void kcm_recv_disable(struct kcm_sock *kcm)
        if (!kcm->rx_psock) {
                if (kcm->rx_wait) {
                        list_del(&kcm->wait_rx_list);
-                       kcm->rx_wait = false;
+                       /* paired with lockless reads in kcm_rfree() */
+                       WRITE_ONCE(kcm->rx_wait, false);
                }
 
                requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
@@ -1793,7 +1799,8 @@ static void kcm_done(struct kcm_sock *kcm)
 
        if (kcm->rx_wait) {
                list_del(&kcm->wait_rx_list);
-               kcm->rx_wait = false;
+               /* paired with lockless reads in kcm_rfree() */
+               WRITE_ONCE(kcm->rx_wait, false);
        }
        /* Move any pending receive messages to other kcm sockets */
        requeue_rx_msgs(mux, &sk->sk_receive_queue);
index c439125..726b47a 100644 (file)
@@ -132,7 +132,7 @@ static int
 ieee802154_parse_frame_start(struct sk_buff *skb, struct ieee802154_hdr *hdr)
 {
        int hlen;
-       struct ieee802154_mac_cb *cb = mac_cb_init(skb);
+       struct ieee802154_mac_cb *cb = mac_cb(skb);
 
        skb_reset_mac_header(skb);
 
@@ -294,8 +294,9 @@ void
 ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi)
 {
        struct ieee802154_local *local = hw_to_local(hw);
+       struct ieee802154_mac_cb *cb = mac_cb_init(skb);
 
-       mac_cb(skb)->lqi = lqi;
+       cb->lqi = lqi;
        skb->pkt_type = IEEE802154_RX_MSG;
        skb_queue_tail(&local->skb_queue, skb);
        tasklet_schedule(&local->tasklet);
index f599ad4..b6dc6e2 100644 (file)
@@ -1673,6 +1673,37 @@ static void mptcp_set_nospace(struct sock *sk)
        set_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags);
 }
 
+static int mptcp_sendmsg_fastopen(struct sock *sk, struct sock *ssk, struct msghdr *msg,
+                                 size_t len, int *copied_syn)
+{
+       unsigned int saved_flags = msg->msg_flags;
+       struct mptcp_sock *msk = mptcp_sk(sk);
+       int ret;
+
+       lock_sock(ssk);
+       msg->msg_flags |= MSG_DONTWAIT;
+       msk->connect_flags = O_NONBLOCK;
+       msk->is_sendmsg = 1;
+       ret = tcp_sendmsg_fastopen(ssk, msg, copied_syn, len, NULL);
+       msk->is_sendmsg = 0;
+       msg->msg_flags = saved_flags;
+       release_sock(ssk);
+
+       /* do the blocking bits of inet_stream_connect outside the ssk socket lock */
+       if (ret == -EINPROGRESS && !(msg->msg_flags & MSG_DONTWAIT)) {
+               ret = __inet_stream_connect(sk->sk_socket, msg->msg_name,
+                                           msg->msg_namelen, msg->msg_flags, 1);
+
+               /* Keep the same behaviour of plain TCP: zero the copied bytes in
+                * case of any error, except timeout or signal
+                */
+               if (ret && ret != -EINPROGRESS && ret != -ERESTARTSYS && ret != -EINTR)
+                       *copied_syn = 0;
+       }
+
+       return ret;
+}
+
 static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 {
        struct mptcp_sock *msk = mptcp_sk(sk);
@@ -1693,23 +1724,14 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 
        ssock = __mptcp_nmpc_socket(msk);
        if (unlikely(ssock && inet_sk(ssock->sk)->defer_connect)) {
-               struct sock *ssk = ssock->sk;
                int copied_syn = 0;
 
-               lock_sock(ssk);
-
-               ret = tcp_sendmsg_fastopen(ssk, msg, &copied_syn, len, NULL);
+               ret = mptcp_sendmsg_fastopen(sk, ssock->sk, msg, len, &copied_syn);
                copied += copied_syn;
-               if (ret == -EINPROGRESS && copied_syn > 0) {
-                       /* reflect the new state on the MPTCP socket */
-                       inet_sk_state_store(sk, inet_sk_state_load(ssk));
-                       release_sock(ssk);
+               if (ret == -EINPROGRESS && copied_syn > 0)
                        goto out;
-               } else if (ret) {
-                       release_sock(ssk);
+               else if (ret)
                        goto do_error;
-               }
-               release_sock(ssk);
        }
 
        timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
@@ -2952,7 +2974,7 @@ static void mptcp_close(struct sock *sk, long timeout)
        sock_put(sk);
 }
 
-static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
+void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
 {
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
        const struct ipv6_pinfo *ssk6 = inet6_sk(ssk);
@@ -3507,10 +3529,73 @@ static int mptcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
        return put_user(answ, (int __user *)arg);
 }
 
+static void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
+                                        struct mptcp_subflow_context *subflow)
+{
+       subflow->request_mptcp = 0;
+       __mptcp_do_fallback(msk);
+}
+
+static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+       struct mptcp_subflow_context *subflow;
+       struct mptcp_sock *msk = mptcp_sk(sk);
+       struct socket *ssock;
+       int err = -EINVAL;
+
+       ssock = __mptcp_nmpc_socket(msk);
+       if (!ssock)
+               return -EINVAL;
+
+       mptcp_token_destroy(msk);
+       inet_sk_state_store(sk, TCP_SYN_SENT);
+       subflow = mptcp_subflow_ctx(ssock->sk);
+#ifdef CONFIG_TCP_MD5SIG
+       /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
+        * TCP option space.
+        */
+       if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info))
+               mptcp_subflow_early_fallback(msk, subflow);
+#endif
+       if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk)) {
+               MPTCP_INC_STATS(sock_net(ssock->sk), MPTCP_MIB_TOKENFALLBACKINIT);
+               mptcp_subflow_early_fallback(msk, subflow);
+       }
+       if (likely(!__mptcp_check_fallback(msk)))
+               MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVE);
+
+       /* if reaching here via the fastopen/sendmsg path, the caller already
+        * acquired the subflow socket lock, too.
+        */
+       if (msk->is_sendmsg)
+               err = __inet_stream_connect(ssock, uaddr, addr_len, msk->connect_flags, 1);
+       else
+               err = inet_stream_connect(ssock, uaddr, addr_len, msk->connect_flags);
+       inet_sk(sk)->defer_connect = inet_sk(ssock->sk)->defer_connect;
+
+       /* on successful connect, the msk state will be moved to established by
+        * subflow_finish_connect()
+        */
+       if (unlikely(err && err != -EINPROGRESS)) {
+               inet_sk_state_store(sk, inet_sk_state_load(ssock->sk));
+               return err;
+       }
+
+       mptcp_copy_inaddrs(sk, ssock->sk);
+
+       /* unblocking connect, mptcp-level inet_stream_connect will error out
+        * without changing the socket state, update it here.
+        */
+       if (err == -EINPROGRESS)
+               sk->sk_socket->state = ssock->state;
+       return err;
+}
+
 static struct proto mptcp_prot = {
        .name           = "MPTCP",
        .owner          = THIS_MODULE,
        .init           = mptcp_init_sock,
+       .connect        = mptcp_connect,
        .disconnect     = mptcp_disconnect,
        .close          = mptcp_close,
        .accept         = mptcp_accept,
@@ -3562,78 +3647,16 @@ unlock:
        return err;
 }
 
-static void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
-                                        struct mptcp_subflow_context *subflow)
-{
-       subflow->request_mptcp = 0;
-       __mptcp_do_fallback(msk);
-}
-
 static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
                                int addr_len, int flags)
 {
-       struct mptcp_sock *msk = mptcp_sk(sock->sk);
-       struct mptcp_subflow_context *subflow;
-       struct socket *ssock;
-       int err = -EINVAL;
+       int ret;
 
        lock_sock(sock->sk);
-       if (uaddr) {
-               if (addr_len < sizeof(uaddr->sa_family))
-                       goto unlock;
-
-               if (uaddr->sa_family == AF_UNSPEC) {
-                       err = mptcp_disconnect(sock->sk, flags);
-                       sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
-                       goto unlock;
-               }
-       }
-
-       if (sock->state != SS_UNCONNECTED && msk->subflow) {
-               /* pending connection or invalid state, let existing subflow
-                * cope with that
-                */
-               ssock = msk->subflow;
-               goto do_connect;
-       }
-
-       ssock = __mptcp_nmpc_socket(msk);
-       if (!ssock)
-               goto unlock;
-
-       mptcp_token_destroy(msk);
-       inet_sk_state_store(sock->sk, TCP_SYN_SENT);
-       subflow = mptcp_subflow_ctx(ssock->sk);
-#ifdef CONFIG_TCP_MD5SIG
-       /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
-        * TCP option space.
-        */
-       if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info))
-               mptcp_subflow_early_fallback(msk, subflow);
-#endif
-       if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk)) {
-               MPTCP_INC_STATS(sock_net(ssock->sk), MPTCP_MIB_TOKENFALLBACKINIT);
-               mptcp_subflow_early_fallback(msk, subflow);
-       }
-       if (likely(!__mptcp_check_fallback(msk)))
-               MPTCP_INC_STATS(sock_net(sock->sk), MPTCP_MIB_MPCAPABLEACTIVE);
-
-do_connect:
-       err = ssock->ops->connect(ssock, uaddr, addr_len, flags);
-       inet_sk(sock->sk)->defer_connect = inet_sk(ssock->sk)->defer_connect;
-       sock->state = ssock->state;
-
-       /* on successful connect, the msk state will be moved to established by
-        * subflow_finish_connect()
-        */
-       if (!err || err == -EINPROGRESS)
-               mptcp_copy_inaddrs(sock->sk, ssock->sk);
-       else
-               inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
-
-unlock:
+       mptcp_sk(sock->sk)->connect_flags = flags;
+       ret = __inet_stream_connect(sock, uaddr, addr_len, flags, 0);
        release_sock(sock->sk);
-       return err;
+       return ret;
 }
 
 static int mptcp_listen(struct socket *sock, int backlog)
@@ -3699,7 +3722,6 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
                if (mptcp_is_fully_established(newsk))
                        mptcp_pm_fully_established(msk, msk->first, GFP_KERNEL);
 
-               mptcp_copy_inaddrs(newsk, msk->first);
                mptcp_rcv_space_init(msk, msk->first);
                mptcp_propagate_sndbuf(newsk, msk->first);
 
index c0b5b46..6a09ab9 100644 (file)
@@ -285,7 +285,9 @@ struct mptcp_sock {
        u8              mpc_endpoint_id;
        u8              recvmsg_inq:1,
                        cork:1,
-                       nodelay:1;
+                       nodelay:1,
+                       is_sendmsg:1;
+       int             connect_flags;
        struct work_struct work;
        struct sk_buff  *ooo_last_skb;
        struct rb_root  out_of_order_queue;
@@ -599,6 +601,7 @@ int mptcp_is_checksum_enabled(const struct net *net);
 int mptcp_allow_join_id0(const struct net *net);
 unsigned int mptcp_stale_loss_cnt(const struct net *net);
 int mptcp_get_pm_type(const struct net *net);
+void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk);
 void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
                                     struct mptcp_options_received *mp_opt);
 bool __mptcp_retransmit_pending_data(struct sock *sk);
index 07dd23d..02a54d5 100644 (file)
@@ -723,6 +723,8 @@ create_child:
                                goto dispose_child;
                        }
 
+                       if (new_msk)
+                               mptcp_copy_inaddrs(new_msk, child);
                        subflow_drop_ctx(child);
                        goto out;
                }
@@ -750,6 +752,11 @@ create_child:
                        ctx->conn = new_msk;
                        new_msk = NULL;
 
+                       /* set msk addresses early to ensure mptcp_pm_get_local_id()
+                        * uses the correct data
+                        */
+                       mptcp_copy_inaddrs(ctx->conn, child);
+
                        /* with OoO packets we can reach here without ingress
                         * mpc option
                         */
index a0653a8..58d9cbc 100644 (file)
@@ -5865,8 +5865,9 @@ static bool nft_setelem_valid_key_end(const struct nft_set *set,
                          (NFT_SET_CONCAT | NFT_SET_INTERVAL)) {
                if (flags & NFT_SET_ELEM_INTERVAL_END)
                        return false;
-               if (!nla[NFTA_SET_ELEM_KEY_END] &&
-                   !(flags & NFT_SET_ELEM_CATCHALL))
+
+               if (nla[NFTA_SET_ELEM_KEY_END] &&
+                   flags & NFT_SET_ELEM_CATCHALL)
                        return false;
        } else {
                if (nla[NFTA_SET_ELEM_KEY_END])
index 39b7c00..3e16527 100644 (file)
@@ -78,10 +78,29 @@ static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) |
 static unsigned long *mc_groups = &mc_group_start;
 static unsigned long mc_groups_longs = 1;
 
+/* We need the last attribute with non-zero ID therefore a 2-entry array */
+static struct nla_policy genl_policy_reject_all[] = {
+       { .type = NLA_REJECT },
+       { .type = NLA_REJECT },
+};
+
 static int genl_ctrl_event(int event, const struct genl_family *family,
                           const struct genl_multicast_group *grp,
                           int grp_id);
 
+static void
+genl_op_fill_in_reject_policy(const struct genl_family *family,
+                             struct genl_ops *op)
+{
+       BUILD_BUG_ON(ARRAY_SIZE(genl_policy_reject_all) - 1 != 1);
+
+       if (op->policy || op->cmd < family->resv_start_op)
+               return;
+
+       op->policy = genl_policy_reject_all;
+       op->maxattr = 1;
+}
+
 static const struct genl_family *genl_family_find_byid(unsigned int id)
 {
        return idr_find(&genl_fam_idr, id);
@@ -113,6 +132,8 @@ static void genl_op_from_full(const struct genl_family *family,
                op->maxattr = family->maxattr;
        if (!op->policy)
                op->policy = family->policy;
+
+       genl_op_fill_in_reject_policy(family, op);
 }
 
 static int genl_get_cmd_full(u32 cmd, const struct genl_family *family,
@@ -142,6 +163,8 @@ static void genl_op_from_small(const struct genl_family *family,
 
        op->maxattr = family->maxattr;
        op->policy = family->policy;
+
+       genl_op_fill_in_reject_policy(family, op);
 }
 
 static int genl_get_cmd_small(u32 cmd, const struct genl_family *family,
@@ -357,6 +380,8 @@ static int genl_validate_ops(const struct genl_family *family)
                genl_get_cmd_by_index(i, family, &op);
                if (op.dumpit == NULL && op.doit == NULL)
                        return -EINVAL;
+               if (WARN_ON(op.cmd >= family->resv_start_op && op.validate))
+                       return -EINVAL;
                for (j = i + 1; j < genl_get_cmd_cnt(family); j++) {
                        struct genl_ops op2;
 
index c8a9075..155263e 100644 (file)
@@ -1616,7 +1616,8 @@ static void ovs_dp_reset_user_features(struct sk_buff *skb,
        if (IS_ERR(dp))
                return;
 
-       WARN(dp->user_features, "Dropping previously announced user features\n");
+       pr_warn("%s: Dropping previously announced user features\n",
+               ovs_dp_name(dp));
        dp->user_features = 0;
 }
 
index c98af0a..4a27dfb 100644 (file)
@@ -1099,12 +1099,13 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
 
 skip:
                if (!ingress) {
-                       notify_and_destroy(net, skb, n, classid,
-                                          rtnl_dereference(dev->qdisc), new);
+                       old = rtnl_dereference(dev->qdisc);
                        if (new && !new->ops->attach)
                                qdisc_refcount_inc(new);
                        rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
 
+                       notify_and_destroy(net, skb, n, classid, old, new);
+
                        if (new && new->ops->attach)
                                new->ops->attach(new);
                } else {
index 817cd06..3ed0c33 100644 (file)
@@ -2224,8 +2224,12 @@ retry:
 
 static void cake_reset(struct Qdisc *sch)
 {
+       struct cake_sched_data *q = qdisc_priv(sch);
        u32 c;
 
+       if (!q->tins)
+               return;
+
        for (c = 0; c < CAKE_MAX_TINS; c++)
                cake_clear_tin(sch, c);
 }
index 99d318b..8c4fee0 100644 (file)
@@ -478,24 +478,26 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
        if (opt) {
                err = fq_codel_change(sch, opt, extack);
                if (err)
-                       return err;
+                       goto init_failure;
        }
 
        err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
        if (err)
-               return err;
+               goto init_failure;
 
        if (!q->flows) {
                q->flows = kvcalloc(q->flows_cnt,
                                    sizeof(struct fq_codel_flow),
                                    GFP_KERNEL);
-               if (!q->flows)
-                       return -ENOMEM;
-
+               if (!q->flows) {
+                       err = -ENOMEM;
+                       goto init_failure;
+               }
                q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
-               if (!q->backlogs)
-                       return -ENOMEM;
-
+               if (!q->backlogs) {
+                       err = -ENOMEM;
+                       goto alloc_failure;
+               }
                for (i = 0; i < q->flows_cnt; i++) {
                        struct fq_codel_flow *flow = q->flows + i;
 
@@ -508,6 +510,13 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
        else
                sch->flags &= ~TCQ_F_CAN_BYPASS;
        return 0;
+
+alloc_failure:
+       kvfree(q->flows);
+       q->flows = NULL;
+init_failure:
+       q->flows_cnt = 0;
+       return err;
 }
 
 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
index 0366a1a..1871a1c 100644 (file)
@@ -455,7 +455,8 @@ static void sfb_reset(struct Qdisc *sch)
 {
        struct sfb_sched_data *q = qdisc_priv(sch);
 
-       qdisc_reset(q->qdisc);
+       if (likely(q->qdisc))
+               qdisc_reset(q->qdisc);
        q->slot = 0;
        q->double_buffering = false;
        sfb_zero_all_buckets(q);
index e6ee797..c305d8d 100644 (file)
@@ -896,7 +896,8 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
                }
                memcpy(lgr->pnet_id, ibdev->pnetid[ibport - 1],
                       SMC_MAX_PNETID_LEN);
-               if (smc_wr_alloc_lgr_mem(lgr))
+               rc = smc_wr_alloc_lgr_mem(lgr);
+               if (rc)
                        goto free_wq;
                smc_llc_lgr_init(lgr, smc);
 
index da69e1a..e863070 100644 (file)
@@ -148,8 +148,8 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
 {
        struct net *net = d->net;
        struct tipc_net *tn = tipc_net(net);
-       bool trial = time_before(jiffies, tn->addr_trial_end);
        u32 self = tipc_own_addr(net);
+       bool trial = time_before(jiffies, tn->addr_trial_end) && !self;
 
        if (mtyp == DSC_TRIAL_FAIL_MSG) {
                if (!trial)
index 5522865..d92ec92 100644 (file)
@@ -450,12 +450,19 @@ static void tipc_conn_data_ready(struct sock *sk)
 static void tipc_topsrv_accept(struct work_struct *work)
 {
        struct tipc_topsrv *srv = container_of(work, struct tipc_topsrv, awork);
-       struct socket *lsock = srv->listener;
-       struct socket *newsock;
+       struct socket *newsock, *lsock;
        struct tipc_conn *con;
        struct sock *newsk;
        int ret;
 
+       spin_lock_bh(&srv->idr_lock);
+       if (!srv->listener) {
+               spin_unlock_bh(&srv->idr_lock);
+               return;
+       }
+       lsock = srv->listener;
+       spin_unlock_bh(&srv->idr_lock);
+
        while (1) {
                ret = kernel_accept(lsock, &newsock, O_NONBLOCK);
                if (ret < 0)
@@ -489,7 +496,7 @@ static void tipc_topsrv_listener_data_ready(struct sock *sk)
 
        read_lock_bh(&sk->sk_callback_lock);
        srv = sk->sk_user_data;
-       if (srv->listener)
+       if (srv)
                queue_work(srv->rcv_wq, &srv->awork);
        read_unlock_bh(&sk->sk_callback_lock);
 }
@@ -568,7 +575,7 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
        sub.seq.upper = upper;
        sub.timeout = TIPC_WAIT_FOREVER;
        sub.filter = filter;
-       *(u32 *)&sub.usr_handle = port;
+       *(u64 *)&sub.usr_handle = (u64)port;
 
        con = tipc_conn_alloc(tipc_topsrv(net));
        if (IS_ERR(con))
@@ -699,8 +706,9 @@ static void tipc_topsrv_stop(struct net *net)
        __module_get(lsock->sk->sk_prot_creator->owner);
        srv->listener = NULL;
        spin_unlock_bh(&srv->idr_lock);
-       sock_release(lsock);
+
        tipc_topsrv_work_stop(srv);
+       sock_release(lsock);
        idr_destroy(&srv->conn_idr);
        kfree(srv);
 }
index 9b79e33..955ac3e 100644 (file)
@@ -273,7 +273,7 @@ static int tls_strp_read_copyin(struct tls_strparser *strp)
        return desc.error;
 }
 
-static int tls_strp_read_short(struct tls_strparser *strp)
+static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort)
 {
        struct skb_shared_info *shinfo;
        struct page *page;
@@ -283,7 +283,7 @@ static int tls_strp_read_short(struct tls_strparser *strp)
         * to read the data out. Otherwise the connection will stall.
         * Without pressure threshold of INT_MAX will never be ready.
         */
-       if (likely(!tcp_epollin_ready(strp->sk, INT_MAX)))
+       if (likely(qshort && !tcp_epollin_ready(strp->sk, INT_MAX)))
                return 0;
 
        shinfo = skb_shinfo(strp->anchor);
@@ -315,6 +315,27 @@ static int tls_strp_read_short(struct tls_strparser *strp)
        return 0;
 }
 
+static bool tls_strp_check_no_dup(struct tls_strparser *strp)
+{
+       unsigned int len = strp->stm.offset + strp->stm.full_len;
+       struct sk_buff *skb;
+       u32 seq;
+
+       skb = skb_shinfo(strp->anchor)->frag_list;
+       seq = TCP_SKB_CB(skb)->seq;
+
+       while (skb->len < len) {
+               seq += skb->len;
+               len -= skb->len;
+               skb = skb->next;
+
+               if (TCP_SKB_CB(skb)->seq != seq)
+                       return false;
+       }
+
+       return true;
+}
+
 static void tls_strp_load_anchor_with_queue(struct tls_strparser *strp, int len)
 {
        struct tcp_sock *tp = tcp_sk(strp->sk);
@@ -373,7 +394,7 @@ static int tls_strp_read_sock(struct tls_strparser *strp)
                return tls_strp_read_copyin(strp);
 
        if (inq < strp->stm.full_len)
-               return tls_strp_read_short(strp);
+               return tls_strp_read_copy(strp, true);
 
        if (!strp->stm.full_len) {
                tls_strp_load_anchor_with_queue(strp, inq);
@@ -387,9 +408,12 @@ static int tls_strp_read_sock(struct tls_strparser *strp)
                strp->stm.full_len = sz;
 
                if (!strp->stm.full_len || inq < strp->stm.full_len)
-                       return tls_strp_read_short(strp);
+                       return tls_strp_read_copy(strp, true);
        }
 
+       if (!tls_strp_check_no_dup(strp))
+               return tls_strp_read_copy(strp, false);
+
        strp->msg_ready = 1;
        tls_rx_msg_ready(strp);
 
index 5fc8986..bc751fa 100644 (file)
@@ -401,8 +401,10 @@ int cap_inode_getsecurity(struct user_namespace *mnt_userns,
                                      &tmpbuf, size, GFP_NOFS);
        dput(dentry);
 
-       if (ret < 0 || !tmpbuf)
-               return ret;
+       if (ret < 0 || !tmpbuf) {
+               size = ret;
+               goto out_free;
+       }
 
        fs_ns = inode->i_sb->s_user_ns;
        cap = (struct vfs_cap_data *) tmpbuf;
index fe5fcf5..64a6a37 100644 (file)
@@ -2022,7 +2022,8 @@ static inline int convert_context_handle_invalid_context(
  * in `newc'.  Verify that the context is valid
  * under the new policy.
  */
-static int convert_context(struct context *oldc, struct context *newc, void *p)
+static int convert_context(struct context *oldc, struct context *newc, void *p,
+                          gfp_t gfp_flags)
 {
        struct convert_context_args *args;
        struct ocontext *oc;
@@ -2036,7 +2037,7 @@ static int convert_context(struct context *oldc, struct context *newc, void *p)
        args = p;
 
        if (oldc->str) {
-               s = kstrdup(oldc->str, GFP_KERNEL);
+               s = kstrdup(oldc->str, gfp_flags);
                if (!s)
                        return -ENOMEM;
 
index a54b865..db5cce3 100644 (file)
@@ -325,7 +325,7 @@ int sidtab_context_to_sid(struct sidtab *s, struct context *context,
                }
 
                rc = convert->func(context, &dst_convert->context,
-                                  convert->args);
+                                  convert->args, GFP_ATOMIC);
                if (rc) {
                        context_destroy(&dst->context);
                        goto out_unlock;
@@ -404,7 +404,7 @@ static int sidtab_convert_tree(union sidtab_entry_inner *edst,
                while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
                        rc = convert->func(&esrc->ptr_leaf->entries[i].context,
                                           &edst->ptr_leaf->entries[i].context,
-                                          convert->args);
+                                          convert->args, GFP_KERNEL);
                        if (rc)
                                return rc;
                        (*pos)++;
index 4eff0e4..9fce0d5 100644 (file)
@@ -65,7 +65,7 @@ struct sidtab_isid_entry {
 };
 
 struct sidtab_convert_params {
-       int (*func)(struct context *oldc, struct context *newc, void *args);
+       int (*func)(struct context *oldc, struct context *newc, void *args, gfp_t gfp_flags);
        void *args;
        struct sidtab *target;
 };
index faf6b03..51ed2f3 100644 (file)
@@ -147,6 +147,7 @@ static int i2sbus_get_and_fixup_rsrc(struct device_node *np, int index,
        return rc;
 }
 
+/* Returns 1 if added, 0 for otherwise; don't return a negative value! */
 /* FIXME: look at device node refcounting */
 static int i2sbus_add_dev(struct macio_dev *macio,
                          struct i2sbus_control *control,
@@ -213,7 +214,7 @@ static int i2sbus_add_dev(struct macio_dev *macio,
         * either as the second one in that case is just a modem. */
        if (!ok) {
                kfree(dev);
-               return -ENODEV;
+               return 0;
        }
 
        mutex_init(&dev->lock);
@@ -302,6 +303,10 @@ static int i2sbus_add_dev(struct macio_dev *macio,
 
        if (soundbus_add_one(&dev->sound)) {
                printk(KERN_DEBUG "i2sbus: device registration error!\n");
+               if (dev->sound.ofdev.dev.kobj.state_initialized) {
+                       soundbus_dev_put(&dev->sound);
+                       return 0;
+               }
                goto err;
        }
 
index a727192..50e7ba6 100644 (file)
@@ -753,6 +753,29 @@ int snd_ctl_rename_id(struct snd_card *card, struct snd_ctl_elem_id *src_id,
 }
 EXPORT_SYMBOL(snd_ctl_rename_id);
 
+/**
+ * snd_ctl_rename - rename the control on the card
+ * @card: the card instance
+ * @kctl: the control to rename
+ * @name: the new name
+ *
+ * Renames the specified control on the card to the new name.
+ *
+ * Make sure to take the control write lock - down_write(&card->controls_rwsem).
+ */
+void snd_ctl_rename(struct snd_card *card, struct snd_kcontrol *kctl,
+                   const char *name)
+{
+       remove_hash_entries(card, kctl);
+
+       if (strscpy(kctl->id.name, name, sizeof(kctl->id.name)) < 0)
+               pr_warn("ALSA: Renamed control new name '%s' truncated to '%s'\n",
+                       name, kctl->id.name);
+
+       add_hash_entries(card, kctl);
+}
+EXPORT_SYMBOL(snd_ctl_rename);
+
 #ifndef CONFIG_SND_CTL_FAST_LOOKUP
 static struct snd_kcontrol *
 snd_ctl_find_numid_slow(struct snd_card *card, unsigned int numid)
index cb60a07..ff68532 100644 (file)
@@ -2009,6 +2009,7 @@ static int snd_ac97_dev_register(struct snd_device *device)
        err = device_register(&ac97->dev);
        if (err < 0) {
                ac97_err(ac97, "Can't register ac97 bus\n");
+               put_device(&ac97->dev);
                ac97->dev.bus = NULL;
                return err;
        }
@@ -2655,11 +2656,18 @@ EXPORT_SYMBOL(snd_ac97_resume);
  */
 static void set_ctl_name(char *dst, const char *src, const char *suffix)
 {
-       if (suffix)
-               sprintf(dst, "%s %s", src, suffix);
-       else
-               strcpy(dst, src);
-}      
+       const size_t msize = SNDRV_CTL_ELEM_ID_NAME_MAXLEN;
+
+       if (suffix) {
+               if (snprintf(dst, msize, "%s %s", src, suffix) >= msize)
+                       pr_warn("ALSA: AC97 control name '%s %s' truncated to '%s'\n",
+                               src, suffix, dst);
+       } else {
+               if (strscpy(dst, src, msize) < 0)
+                       pr_warn("ALSA: AC97 control name '%s' truncated to '%s'\n",
+                               src, dst);
+       }
+}
 
 /* remove the control with the given name and optional suffix */
 static int snd_ac97_remove_ctl(struct snd_ac97 *ac97, const char *name,
@@ -2686,8 +2694,11 @@ static int snd_ac97_rename_ctl(struct snd_ac97 *ac97, const char *src,
                               const char *dst, const char *suffix)
 {
        struct snd_kcontrol *kctl = ctl_find(ac97, src, suffix);
+       char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+
        if (kctl) {
-               set_ctl_name(kctl->id.name, dst, suffix);
+               set_ctl_name(name, dst, suffix);
+               snd_ctl_rename(ac97->bus->card, kctl, name);
                return 0;
        }
        return -ENOENT;
@@ -2706,11 +2717,17 @@ static int snd_ac97_swap_ctl(struct snd_ac97 *ac97, const char *s1,
                             const char *s2, const char *suffix)
 {
        struct snd_kcontrol *kctl1, *kctl2;
+       char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+
        kctl1 = ctl_find(ac97, s1, suffix);
        kctl2 = ctl_find(ac97, s2, suffix);
        if (kctl1 && kctl2) {
-               set_ctl_name(kctl1->id.name, s2, suffix);
-               set_ctl_name(kctl2->id.name, s1, suffix);
+               set_ctl_name(name, s2, suffix);
+               snd_ctl_rename(ac97->bus->card, kctl1, name);
+
+               set_ctl_name(name, s1, suffix);
+               snd_ctl_rename(ac97->bus->card, kctl2, name);
+
                return 0;
        }
        return -ENOENT;
index 0aa7af0..6cbb2bc 100644 (file)
@@ -141,7 +141,7 @@ struct snd_vortex {
 #ifndef CHIP_AU8810
        stream_t dma_wt[NR_WT];
        wt_voice_t wt_voice[NR_WT];     /* WT register cache. */
-       char mixwt[(NR_WT / NR_WTPB) * 6];      /* WT mixin objects */
+       s8 mixwt[(NR_WT / NR_WTPB) * 6];        /* WT mixin objects */
 #endif
 
        /* Global resources */
@@ -235,8 +235,8 @@ static int vortex_alsafmt_aspfmt(snd_pcm_format_t alsafmt, vortex_t *v);
 static void vortex_connect_default(vortex_t * vortex, int en);
 static int vortex_adb_allocroute(vortex_t * vortex, int dma, int nr_ch,
                                 int dir, int type, int subdev);
-static char vortex_adb_checkinout(vortex_t * vortex, int resmap[], int out,
-                                 int restype);
+static int vortex_adb_checkinout(vortex_t * vortex, int resmap[], int out,
+                                int restype);
 #ifndef CHIP_AU8810
 static int vortex_wt_allocroute(vortex_t * vortex, int dma, int nr_ch);
 static void vortex_wt_connect(vortex_t * vortex, int en);
index 2ed5100..f217c02 100644 (file)
@@ -1998,7 +1998,7 @@ static const int resnum[VORTEX_RESOURCE_LAST] =
  out: Mean checkout if != 0. Else mean Checkin resource.
  restype: Indicates type of resource to be checked in or out.
 */
-static char
+static int
 vortex_adb_checkinout(vortex_t * vortex, int resmap[], int out, int restype)
 {
        int i, qty = resnum[restype], resinuse = 0;
index 05f5601..f6381c0 100644 (file)
@@ -720,7 +720,7 @@ static int rename_ctl(struct snd_card *card, const char *src, const char *dst)
 {
        struct snd_kcontrol *kctl = ctl_find(card, src);
        if (kctl) {
-               strcpy(kctl->id.name, dst);
+               snd_ctl_rename(card, kctl, dst);
                return 0;
        }
        return -ENOENT;
index e9c0fe3..3c115f8 100644 (file)
@@ -1767,7 +1767,7 @@ static int rename_ctl(struct snd_card *card, const char *src, const char *dst)
 {
        struct snd_kcontrol *kctl = ctl_find(card, src);
        if (kctl) {
-               strcpy(kctl->id.name, dst);
+               snd_ctl_rename(card, kctl, dst);
                return 0;
        }
        return -ENOENT;
index e6c4bb5..701a72e 100644 (file)
@@ -2142,7 +2142,7 @@ static void rename_ctl(struct hda_codec *codec, const char *oldname,
 
        kctl = snd_hda_find_mixer_ctl(codec, oldname);
        if (kctl)
-               strcpy(kctl->id.name, newname);
+               snd_ctl_rename(codec->card, kctl, newname);
 }
 
 static void alc1220_fixup_gb_dual_codecs(struct hda_codec *codec,
@@ -6654,13 +6654,8 @@ static int comp_bind(struct device *dev)
 {
        struct hda_codec *cdc = dev_to_hda_codec(dev);
        struct alc_spec *spec = cdc->spec;
-       int ret;
 
-       ret = component_bind_all(dev, spec->comps);
-       if (ret)
-               return ret;
-
-       return 0;
+       return component_bind_all(dev, spec->comps);
 }
 
 static void comp_unbind(struct device *dev)
@@ -9328,6 +9323,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8902, "HP OMEN 16", ALC285_FIXUP_HP_MUTE_LED),
+       SND_PCI_QUIRK(0x103c, 0x896d, "HP ZBook Firefly 16 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x896e, "HP EliteBook x360 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8971, "HP EliteBook 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8972, "HP EliteBook 840 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
@@ -9346,6 +9342,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x89aa, "HP EliteBook 630 G9", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x89ac, "HP EliteBook 640 G9", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x89ae, "HP EliteBook 650 G9", ALC236_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x89c0, "HP ZBook Power 15.6 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x89c3, "Zbook Studio G9", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
@@ -9400,6 +9397,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
        SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
+       SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402", ALC245_FIXUP_CS35L41_SPI_2),
        SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
        SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
        SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS),
index dcc43a8..65add92 100644 (file)
@@ -433,7 +433,7 @@ struct hdsp_midi {
     struct snd_rawmidi           *rmidi;
     struct snd_rawmidi_substream *input;
     struct snd_rawmidi_substream *output;
-    char                     istimer; /* timer in use */
+    signed char                     istimer; /* timer in use */
     struct timer_list       timer;
     spinlock_t               lock;
     int                             pending;
@@ -480,7 +480,7 @@ struct hdsp {
        pid_t                 playback_pid;
        int                   running;
        int                   system_sample_rate;
-       const char           *channel_map;
+       const signed char    *channel_map;
        int                   dev;
        int                   irq;
        unsigned long         port;
@@ -502,7 +502,7 @@ struct hdsp {
    where the data for that channel can be read/written from/to.
 */
 
-static const char channel_map_df_ss[HDSP_MAX_CHANNELS] = {
+static const signed char channel_map_df_ss[HDSP_MAX_CHANNELS] = {
        0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
        18, 19, 20, 21, 22, 23, 24, 25
 };
@@ -517,7 +517,7 @@ static const char channel_map_mf_ss[HDSP_MAX_CHANNELS] = { /* Multiface */
        -1, -1, -1, -1, -1, -1, -1, -1
 };
 
-static const char channel_map_ds[HDSP_MAX_CHANNELS] = {
+static const signed char channel_map_ds[HDSP_MAX_CHANNELS] = {
        /* ADAT channels are remapped */
        1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23,
        /* channels 12 and 13 are S/PDIF */
@@ -526,7 +526,7 @@ static const char channel_map_ds[HDSP_MAX_CHANNELS] = {
        -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
 };
 
-static const char channel_map_H9632_ss[HDSP_MAX_CHANNELS] = {
+static const signed char channel_map_H9632_ss[HDSP_MAX_CHANNELS] = {
        /* ADAT channels */
        0, 1, 2, 3, 4, 5, 6, 7,
        /* SPDIF */
@@ -540,7 +540,7 @@ static const char channel_map_H9632_ss[HDSP_MAX_CHANNELS] = {
        -1, -1
 };
 
-static const char channel_map_H9632_ds[HDSP_MAX_CHANNELS] = {
+static const signed char channel_map_H9632_ds[HDSP_MAX_CHANNELS] = {
        /* ADAT */
        1, 3, 5, 7,
        /* SPDIF */
@@ -554,7 +554,7 @@ static const char channel_map_H9632_ds[HDSP_MAX_CHANNELS] = {
        -1, -1, -1, -1, -1, -1
 };
 
-static const char channel_map_H9632_qs[HDSP_MAX_CHANNELS] = {
+static const signed char channel_map_H9632_qs[HDSP_MAX_CHANNELS] = {
        /* ADAT is disabled in this mode */
        /* SPDIF */
        8, 9,
@@ -3939,7 +3939,7 @@ static snd_pcm_uframes_t snd_hdsp_hw_pointer(struct snd_pcm_substream *substream
        return hdsp_hw_pointer(hdsp);
 }
 
-static char *hdsp_channel_buffer_location(struct hdsp *hdsp,
+static signed char *hdsp_channel_buffer_location(struct hdsp *hdsp,
                                             int stream,
                                             int channel)
 
@@ -3964,7 +3964,7 @@ static int snd_hdsp_playback_copy(struct snd_pcm_substream *substream,
                                  void __user *src, unsigned long count)
 {
        struct hdsp *hdsp = snd_pcm_substream_chip(substream);
-       char *channel_buf;
+       signed char *channel_buf;
 
        if (snd_BUG_ON(pos + count > HDSP_CHANNEL_BUFFER_BYTES))
                return -EINVAL;
@@ -3982,7 +3982,7 @@ static int snd_hdsp_playback_copy_kernel(struct snd_pcm_substream *substream,
                                         void *src, unsigned long count)
 {
        struct hdsp *hdsp = snd_pcm_substream_chip(substream);
-       char *channel_buf;
+       signed char *channel_buf;
 
        channel_buf = hdsp_channel_buffer_location(hdsp, substream->pstr->stream, channel);
        if (snd_BUG_ON(!channel_buf))
@@ -3996,7 +3996,7 @@ static int snd_hdsp_capture_copy(struct snd_pcm_substream *substream,
                                 void __user *dst, unsigned long count)
 {
        struct hdsp *hdsp = snd_pcm_substream_chip(substream);
-       char *channel_buf;
+       signed char *channel_buf;
 
        if (snd_BUG_ON(pos + count > HDSP_CHANNEL_BUFFER_BYTES))
                return -EINVAL;
@@ -4014,7 +4014,7 @@ static int snd_hdsp_capture_copy_kernel(struct snd_pcm_substream *substream,
                                        void *dst, unsigned long count)
 {
        struct hdsp *hdsp = snd_pcm_substream_chip(substream);
-       char *channel_buf;
+       signed char *channel_buf;
 
        channel_buf = hdsp_channel_buffer_location(hdsp, substream->pstr->stream, channel);
        if (snd_BUG_ON(!channel_buf))
@@ -4028,7 +4028,7 @@ static int snd_hdsp_hw_silence(struct snd_pcm_substream *substream,
                               unsigned long count)
 {
        struct hdsp *hdsp = snd_pcm_substream_chip(substream);
-       char *channel_buf;
+       signed char *channel_buf;
 
        channel_buf = hdsp_channel_buffer_location (hdsp, substream->pstr->stream, channel);
        if (snd_BUG_ON(!channel_buf))
index 1d614fe..e7c320a 100644 (file)
@@ -230,7 +230,7 @@ struct snd_rme9652 {
        int last_spdif_sample_rate;     /* so that we can catch externally ... */
        int last_adat_sample_rate;      /* ... induced rate changes            */
 
-       const char *channel_map;
+       const signed char *channel_map;
 
        struct snd_card *card;
        struct snd_pcm *pcm;
@@ -247,12 +247,12 @@ struct snd_rme9652 {
    where the data for that channel can be read/written from/to.
 */
 
-static const char channel_map_9652_ss[26] = {
+static const signed char channel_map_9652_ss[26] = {
        0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
        18, 19, 20, 21, 22, 23, 24, 25
 };
 
-static const char channel_map_9636_ss[26] = {
+static const signed char channel_map_9636_ss[26] = {
        0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 
        /* channels 16 and 17 are S/PDIF */
        24, 25,
@@ -260,7 +260,7 @@ static const char channel_map_9636_ss[26] = {
        -1, -1, -1, -1, -1, -1, -1, -1
 };
 
-static const char channel_map_9652_ds[26] = {
+static const signed char channel_map_9652_ds[26] = {
        /* ADAT channels are remapped */
        1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23,
        /* channels 12 and 13 are S/PDIF */
@@ -269,7 +269,7 @@ static const char channel_map_9652_ds[26] = {
        -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
 };
 
-static const char channel_map_9636_ds[26] = {
+static const signed char channel_map_9636_ds[26] = {
        /* ADAT channels are remapped */
        1, 3, 5, 7, 9, 11, 13, 15,
        /* channels 8 and 9 are S/PDIF */
@@ -1819,7 +1819,7 @@ static snd_pcm_uframes_t snd_rme9652_hw_pointer(struct snd_pcm_substream *substr
        return rme9652_hw_pointer(rme9652);
 }
 
-static char *rme9652_channel_buffer_location(struct snd_rme9652 *rme9652,
+static signed char *rme9652_channel_buffer_location(struct snd_rme9652 *rme9652,
                                             int stream,
                                             int channel)
 
@@ -1847,7 +1847,7 @@ static int snd_rme9652_playback_copy(struct snd_pcm_substream *substream,
                                     void __user *src, unsigned long count)
 {
        struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream);
-       char *channel_buf;
+       signed char *channel_buf;
 
        if (snd_BUG_ON(pos + count > RME9652_CHANNEL_BUFFER_BYTES))
                return -EINVAL;
@@ -1867,7 +1867,7 @@ static int snd_rme9652_playback_copy_kernel(struct snd_pcm_substream *substream,
                                            void *src, unsigned long count)
 {
        struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream);
-       char *channel_buf;
+       signed char *channel_buf;
 
        channel_buf = rme9652_channel_buffer_location(rme9652,
                                                      substream->pstr->stream,
@@ -1883,7 +1883,7 @@ static int snd_rme9652_capture_copy(struct snd_pcm_substream *substream,
                                    void __user *dst, unsigned long count)
 {
        struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream);
-       char *channel_buf;
+       signed char *channel_buf;
 
        if (snd_BUG_ON(pos + count > RME9652_CHANNEL_BUFFER_BYTES))
                return -EINVAL;
@@ -1903,7 +1903,7 @@ static int snd_rme9652_capture_copy_kernel(struct snd_pcm_substream *substream,
                                           void *dst, unsigned long count)
 {
        struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream);
-       char *channel_buf;
+       signed char *channel_buf;
 
        channel_buf = rme9652_channel_buffer_location(rme9652,
                                                      substream->pstr->stream,
@@ -1919,7 +1919,7 @@ static int snd_rme9652_hw_silence(struct snd_pcm_substream *substream,
                                  unsigned long count)
 {
        struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream);
-       char *channel_buf;
+       signed char *channel_buf;
 
        channel_buf = rme9652_channel_buffer_location (rme9652,
                                                       substream->pstr->stream,
index 2cb50d5..6c0f1de 100644 (file)
@@ -49,6 +49,27 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
                .driver_data = &acp6x_card,
                .matches = {
                        DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "21D0"),
+               }
+       },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "21D0"),
+               }
+       },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "21D1"),
+               }
+       },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "21D2"),
                }
        },
index e3b90c4..7022e62 100644 (file)
@@ -1629,6 +1629,7 @@ config SND_SOC_TFA989X
 config SND_SOC_TLV320ADC3XXX
        tristate "Texas Instruments TLV320ADC3001/3101 audio ADC"
        depends on I2C
+       depends on GPIOLIB
        help
         Enable support for Texas Instruments TLV320ADC3001 and TLV320ADC3101
         ADCs.
index ebdd567..09e3a92 100644 (file)
 #define CX2072X_PLBK_DRC_PARM_LEN      9
 #define CX2072X_CLASSD_AMP_LEN         6
 
-/* DAI interfae type */
+/* DAI interface type */
 #define CX2072X_DAI_HIFI       1
 #define CX2072X_DAI_DSP                2
 #define CX2072X_DAI_DSP_PWM    3 /* 4 ch, including mic and AEC */
index 5201a8f..71ea576 100644 (file)
@@ -136,14 +136,17 @@ enum {
 #define REG_CGR3_GO1L_OFFSET           0
 #define REG_CGR3_GO1L_MASK             (0x1f << REG_CGR3_GO1L_OFFSET)
 
+#define REG_CGR10_GIL_OFFSET           0
+#define REG_CGR10_GIR_OFFSET           4
+
 struct jz_icdc {
        struct regmap *regmap;
        void __iomem *base;
        struct clk *clk;
 };
 
-static const SNDRV_CTL_TLVD_DECLARE_DB_LINEAR(jz4725b_dac_tlv, -2250, 0);
-static const SNDRV_CTL_TLVD_DECLARE_DB_LINEAR(jz4725b_line_tlv, -1500, 600);
+static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(jz4725b_adc_tlv,     0, 150, 0);
+static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(jz4725b_dac_tlv, -2250, 150, 0);
 
 static const struct snd_kcontrol_new jz4725b_codec_controls[] = {
        SOC_DOUBLE_TLV("Master Playback Volume",
@@ -151,11 +154,11 @@ static const struct snd_kcontrol_new jz4725b_codec_controls[] = {
                       REG_CGR1_GODL_OFFSET,
                       REG_CGR1_GODR_OFFSET,
                       0xf, 1, jz4725b_dac_tlv),
-       SOC_DOUBLE_R_TLV("Master Capture Volume",
-                        JZ4725B_CODEC_REG_CGR3,
-                        JZ4725B_CODEC_REG_CGR2,
-                        REG_CGR2_GO1R_OFFSET,
-                        0x1f, 1, jz4725b_line_tlv),
+       SOC_DOUBLE_TLV("Master Capture Volume",
+                      JZ4725B_CODEC_REG_CGR10,
+                      REG_CGR10_GIL_OFFSET,
+                      REG_CGR10_GIR_OFFSET,
+                      0xf, 0, jz4725b_adc_tlv),
 
        SOC_SINGLE("Master Playback Switch", JZ4725B_CODEC_REG_CR1,
                   REG_CR1_DAC_MUTE_OFFSET, 1, 1),
@@ -180,7 +183,7 @@ static SOC_VALUE_ENUM_SINGLE_DECL(jz4725b_codec_adc_src_enum,
                                  jz4725b_codec_adc_src_texts,
                                  jz4725b_codec_adc_src_values);
 static const struct snd_kcontrol_new jz4725b_codec_adc_src_ctrl =
-                       SOC_DAPM_ENUM("Route", jz4725b_codec_adc_src_enum);
+       SOC_DAPM_ENUM("ADC Source Capture Route", jz4725b_codec_adc_src_enum);
 
 static const struct snd_kcontrol_new jz4725b_codec_mixer_controls[] = {
        SOC_DAPM_SINGLE("Line In Bypass", JZ4725B_CODEC_REG_CR1,
@@ -225,7 +228,7 @@ static const struct snd_soc_dapm_widget jz4725b_codec_dapm_widgets[] = {
        SND_SOC_DAPM_ADC("ADC", "Capture",
                         JZ4725B_CODEC_REG_PMR1, REG_PMR1_SB_ADC_OFFSET, 1),
 
-       SND_SOC_DAPM_MUX("ADC Source", SND_SOC_NOPM, 0, 0,
+       SND_SOC_DAPM_MUX("ADC Source Capture Route", SND_SOC_NOPM, 0, 0,
                         &jz4725b_codec_adc_src_ctrl),
 
        /* Mixer */
@@ -236,7 +239,8 @@ static const struct snd_soc_dapm_widget jz4725b_codec_dapm_widgets[] = {
        SND_SOC_DAPM_MIXER("DAC to Mixer", JZ4725B_CODEC_REG_CR1,
                           REG_CR1_DACSEL_OFFSET, 0, NULL, 0),
 
-       SND_SOC_DAPM_MIXER("Line In", SND_SOC_NOPM, 0, 0, NULL, 0),
+       SND_SOC_DAPM_MIXER("Line In", JZ4725B_CODEC_REG_PMR1,
+                          REG_PMR1_SB_LIN_OFFSET, 1, NULL, 0),
        SND_SOC_DAPM_MIXER("HP Out", JZ4725B_CODEC_REG_CR1,
                           REG_CR1_HP_DIS_OFFSET, 1, NULL, 0),
 
@@ -283,11 +287,11 @@ static const struct snd_soc_dapm_route jz4725b_codec_dapm_routes[] = {
        {"Mixer", NULL, "DAC to Mixer"},
 
        {"Mixer to ADC", NULL, "Mixer"},
-       {"ADC Source", "Mixer", "Mixer to ADC"},
-       {"ADC Source", "Line In", "Line In"},
-       {"ADC Source", "Mic 1", "Mic 1"},
-       {"ADC Source", "Mic 2", "Mic 2"},
-       {"ADC", NULL, "ADC Source"},
+       {"ADC Source Capture Route", "Mixer", "Mixer to ADC"},
+       {"ADC Source Capture Route", "Line In", "Line In"},
+       {"ADC Source Capture Route", "Mic 1", "Mic 1"},
+       {"ADC Source Capture Route", "Mic 2", "Mic 2"},
+       {"ADC", NULL, "ADC Source Capture Route"},
 
        {"Out Stage", NULL, "Mixer"},
        {"HP Out", NULL, "Out Stage"},
index 554c33e..cc2df5f 100644 (file)
@@ -503,14 +503,14 @@ static int mt6660_i2c_probe(struct i2c_client *client)
                dev_err(chip->dev, "read chip revision fail\n");
                goto probe_fail;
        }
+       pm_runtime_set_active(chip->dev);
+       pm_runtime_enable(chip->dev);
 
        ret = devm_snd_soc_register_component(chip->dev,
                                               &mt6660_component_driver,
                                               &mt6660_codec_dai, 1);
-       if (!ret) {
-               pm_runtime_set_active(chip->dev);
-               pm_runtime_enable(chip->dev);
-       }
+       if (ret)
+               pm_runtime_disable(chip->dev);
 
        return ret;
 
index b66bfec..49f527c 100644 (file)
@@ -391,18 +391,18 @@ static int rt1019_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
                        unsigned int rx_mask, int slots, int slot_width)
 {
        struct snd_soc_component *component = dai->component;
-       unsigned int val = 0, rx_slotnum;
+       unsigned int cn = 0, cl = 0, rx_slotnum;
        int ret = 0, first_bit;
 
        switch (slots) {
        case 4:
-               val |= RT1019_I2S_TX_4CH;
+               cn = RT1019_I2S_TX_4CH;
                break;
        case 6:
-               val |= RT1019_I2S_TX_6CH;
+               cn = RT1019_I2S_TX_6CH;
                break;
        case 8:
-               val |= RT1019_I2S_TX_8CH;
+               cn = RT1019_I2S_TX_8CH;
                break;
        case 2:
                break;
@@ -412,16 +412,16 @@ static int rt1019_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
 
        switch (slot_width) {
        case 20:
-               val |= RT1019_I2S_DL_20;
+               cl = RT1019_TDM_CL_20;
                break;
        case 24:
-               val |= RT1019_I2S_DL_24;
+               cl = RT1019_TDM_CL_24;
                break;
        case 32:
-               val |= RT1019_I2S_DL_32;
+               cl = RT1019_TDM_CL_32;
                break;
        case 8:
-               val |= RT1019_I2S_DL_8;
+               cl = RT1019_TDM_CL_8;
                break;
        case 16:
                break;
@@ -470,8 +470,10 @@ static int rt1019_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
                goto _set_tdm_err_;
        }
 
+       snd_soc_component_update_bits(component, RT1019_TDM_1,
+               RT1019_TDM_CL_MASK, cl);
        snd_soc_component_update_bits(component, RT1019_TDM_2,
-               RT1019_I2S_CH_TX_MASK | RT1019_I2S_DF_MASK, val);
+               RT1019_I2S_CH_TX_MASK, cn);
 
 _set_tdm_err_:
        return ret;
index 64df831..48ba15e 100644 (file)
 #define RT1019_TDM_BCLK_MASK           (0x1 << 6)
 #define RT1019_TDM_BCLK_NORM           (0x0 << 6)
 #define RT1019_TDM_BCLK_INV                    (0x1 << 6)
+#define RT1019_TDM_CL_MASK                     (0x7)
+#define RT1019_TDM_CL_8                                (0x4)
+#define RT1019_TDM_CL_32                       (0x3)
+#define RT1019_TDM_CL_24                       (0x2)
+#define RT1019_TDM_CL_20                       (0x1)
+#define RT1019_TDM_CL_16                       (0x0)
 
 /* 0x0401 TDM Control-2 */
 #define RT1019_I2S_CH_TX_MASK          (0x3 << 6)
index 5c29416..f99aed3 100644 (file)
@@ -50,6 +50,7 @@ static bool rt1308_volatile_register(struct device *dev, unsigned int reg)
        case 0x3008:
        case 0x300a:
        case 0xc000:
+       case 0xc710:
        case 0xc860 ... 0xc863:
        case 0xc870 ... 0xc873:
                return true;
@@ -200,6 +201,7 @@ static int rt1308_io_init(struct device *dev, struct sdw_slave *slave)
 {
        struct rt1308_sdw_priv *rt1308 = dev_get_drvdata(dev);
        int ret = 0;
+       unsigned int tmp;
 
        if (rt1308->hw_init)
                return 0;
@@ -231,6 +233,10 @@ static int rt1308_io_init(struct device *dev, struct sdw_slave *slave)
        /* sw reset */
        regmap_write(rt1308->regmap, RT1308_SDW_RESET, 0);
 
+       regmap_read(rt1308->regmap, 0xc710, &tmp);
+       rt1308->hw_ver = tmp;
+       dev_dbg(dev, "%s, hw_ver=0x%x\n", __func__, rt1308->hw_ver);
+
        /* initial settings */
        regmap_write(rt1308->regmap, 0xc103, 0xc0);
        regmap_write(rt1308->regmap, 0xc030, 0x17);
@@ -246,8 +252,14 @@ static int rt1308_io_init(struct device *dev, struct sdw_slave *slave)
        regmap_write(rt1308->regmap, 0xc062, 0x05);
        regmap_write(rt1308->regmap, 0xc171, 0x07);
        regmap_write(rt1308->regmap, 0xc173, 0x0d);
-       regmap_write(rt1308->regmap, 0xc311, 0x7f);
-       regmap_write(rt1308->regmap, 0xc900, 0x90);
+       if (rt1308->hw_ver == RT1308_VER_C) {
+               regmap_write(rt1308->regmap, 0xc311, 0x7f);
+               regmap_write(rt1308->regmap, 0xc300, 0x09);
+       } else {
+               regmap_write(rt1308->regmap, 0xc311, 0x4f);
+               regmap_write(rt1308->regmap, 0xc300, 0x0b);
+       }
+       regmap_write(rt1308->regmap, 0xc900, 0x5a);
        regmap_write(rt1308->regmap, 0xc1a0, 0x84);
        regmap_write(rt1308->regmap, 0xc1a1, 0x01);
        regmap_write(rt1308->regmap, 0xc360, 0x78);
@@ -257,7 +269,6 @@ static int rt1308_io_init(struct device *dev, struct sdw_slave *slave)
        regmap_write(rt1308->regmap, 0xc070, 0x00);
        regmap_write(rt1308->regmap, 0xc100, 0xd7);
        regmap_write(rt1308->regmap, 0xc101, 0xd7);
-       regmap_write(rt1308->regmap, 0xc300, 0x09);
 
        if (rt1308->first_hw_init) {
                regcache_cache_bypass(rt1308->regmap, false);
index 6668e19..62ce277 100644 (file)
@@ -139,10 +139,12 @@ static const struct reg_default rt1308_reg_defaults[] = {
        { 0x3005, 0x23 },
        { 0x3008, 0x02 },
        { 0x300a, 0x00 },
+       { 0xc000 | (RT1308_DATA_PATH << 4), 0x00 },
        { 0xc003 | (RT1308_DAC_SET << 4), 0x00 },
        { 0xc000 | (RT1308_POWER << 4), 0x00 },
        { 0xc001 | (RT1308_POWER << 4), 0x00 },
        { 0xc002 | (RT1308_POWER << 4), 0x00 },
+       { 0xc000 | (RT1308_POWER_STATUS << 4), 0x00 },
 };
 
 #define RT1308_SDW_OFFSET 0xc000
@@ -163,6 +165,7 @@ struct rt1308_sdw_priv {
        bool first_hw_init;
        int rx_mask;
        int slots;
+       int hw_ver;
 };
 
 struct sdw_stream_data {
index ff7c423..d3a0f91 100644 (file)
@@ -286,4 +286,9 @@ enum {
        RT1308_AIFS
 };
 
+enum rt1308_hw_ver {
+       RT1308_VER_C = 2,
+       RT1308_VER_D
+};
+
 #endif         /* end of _RT1308_H_ */
index 466a37f..80c673a 100644 (file)
@@ -1981,7 +1981,7 @@ static int rt5682s_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
                unsigned int rx_mask, int slots, int slot_width)
 {
        struct snd_soc_component *component = dai->component;
-       unsigned int cl, val = 0;
+       unsigned int cl, val = 0, tx_slotnum;
 
        if (tx_mask || rx_mask)
                snd_soc_component_update_bits(component,
@@ -1990,6 +1990,16 @@ static int rt5682s_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
                snd_soc_component_update_bits(component,
                        RT5682S_TDM_ADDA_CTRL_2, RT5682S_TDM_EN, 0);
 
+       /* Tx slot configuration */
+       tx_slotnum = hweight_long(tx_mask);
+       if (tx_slotnum) {
+               if (tx_slotnum > slots) {
+                       dev_err(component->dev, "Invalid or oversized Tx slots.\n");
+                       return -EINVAL;
+               }
+               val |= (tx_slotnum - 1) << RT5682S_TDM_ADC_DL_SFT;
+       }
+
        switch (slots) {
        case 4:
                val |= RT5682S_TDM_TX_CH_4;
@@ -2010,7 +2020,8 @@ static int rt5682s_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
        }
 
        snd_soc_component_update_bits(component, RT5682S_TDM_CTRL,
-               RT5682S_TDM_TX_CH_MASK | RT5682S_TDM_RX_CH_MASK, val);
+               RT5682S_TDM_TX_CH_MASK | RT5682S_TDM_RX_CH_MASK |
+               RT5682S_TDM_ADC_DL_MASK, val);
 
        switch (slot_width) {
        case 8:
index 824dc65..45464a0 100644 (file)
 #define RT5682S_TDM_RX_CH_8                    (0x3 << 8)
 #define RT5682S_TDM_ADC_LCA_MASK               (0x7 << 4)
 #define RT5682S_TDM_ADC_LCA_SFT                        4
+#define RT5682S_TDM_ADC_DL_MASK                        (0x3 << 0)
 #define RT5682S_TDM_ADC_DL_SFT                 0
 
 /* TDM control 2 (0x007a) */
index baab320..a969547 100644 (file)
@@ -1449,7 +1449,7 @@ static struct i2c_driver adc3xxx_i2c_driver = {
                   .of_match_table = tlv320adc3xxx_of_match,
                  },
        .probe_new = adc3xxx_i2c_probe,
-       .remove = adc3xxx_i2c_remove,
+       .remove = __exit_p(adc3xxx_i2c_remove),
        .id_table = adc3xxx_i2c_id,
 };
 
index c09c9ac..adaf886 100644 (file)
@@ -2099,6 +2099,9 @@ static int wm5102_probe(struct platform_device *pdev)
                regmap_update_bits(arizona->regmap, wm5102_digital_vu[i],
                                   WM5102_DIG_VU, WM5102_DIG_VU);
 
+       pm_runtime_enable(&pdev->dev);
+       pm_runtime_idle(&pdev->dev);
+
        ret = arizona_request_irq(arizona, ARIZONA_IRQ_DSP_IRQ1,
                                  "ADSP2 Compressed IRQ", wm5102_adsp2_irq,
                                  wm5102);
@@ -2131,9 +2134,6 @@ static int wm5102_probe(struct platform_device *pdev)
                goto err_spk_irqs;
        }
 
-       pm_runtime_enable(&pdev->dev);
-       pm_runtime_idle(&pdev->dev);
-
        return ret;
 
 err_spk_irqs:
@@ -2142,6 +2142,7 @@ err_dsp_irq:
        arizona_set_irq_wake(arizona, ARIZONA_IRQ_DSP_IRQ1, 0);
        arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, wm5102);
 err_jack_codec_dev:
+       pm_runtime_disable(&pdev->dev);
        arizona_jack_codec_dev_remove(&wm5102->core);
 
        return ret;
index fc634c9..e0b9716 100644 (file)
@@ -2457,6 +2457,9 @@ static int wm5110_probe(struct platform_device *pdev)
                regmap_update_bits(arizona->regmap, wm5110_digital_vu[i],
                                   WM5110_DIG_VU, WM5110_DIG_VU);
 
+       pm_runtime_enable(&pdev->dev);
+       pm_runtime_idle(&pdev->dev);
+
        ret = arizona_request_irq(arizona, ARIZONA_IRQ_DSP_IRQ1,
                                  "ADSP2 Compressed IRQ", wm5110_adsp2_irq,
                                  wm5110);
@@ -2489,9 +2492,6 @@ static int wm5110_probe(struct platform_device *pdev)
                goto err_spk_irqs;
        }
 
-       pm_runtime_enable(&pdev->dev);
-       pm_runtime_idle(&pdev->dev);
-
        return ret;
 
 err_spk_irqs:
@@ -2500,6 +2500,7 @@ err_dsp_irq:
        arizona_set_irq_wake(arizona, ARIZONA_IRQ_DSP_IRQ1, 0);
        arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, wm5110);
 err_jack_codec_dev:
+       pm_runtime_disable(&pdev->dev);
        arizona_jack_codec_dev_remove(&wm5110->core);
 
        return ret;
index 8104966..b4b4355 100644 (file)
@@ -1840,6 +1840,49 @@ SOC_SINGLE_TLV("SPKOUTR Mixer DACR Volume", WM8962_SPEAKER_MIXER_5,
               4, 1, 0, inmix_tlv),
 };
 
+static int tp_event(struct snd_soc_dapm_widget *w,
+                   struct snd_kcontrol *kcontrol, int event)
+{
+       int ret, reg, val, mask;
+       struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+
+       ret = pm_runtime_resume_and_get(component->dev);
+       if (ret < 0) {
+               dev_err(component->dev, "Failed to resume device: %d\n", ret);
+               return ret;
+       }
+
+       reg = WM8962_ADDITIONAL_CONTROL_4;
+
+       if (!strcmp(w->name, "TEMP_HP")) {
+               mask = WM8962_TEMP_ENA_HP_MASK;
+               val = WM8962_TEMP_ENA_HP;
+       } else if (!strcmp(w->name, "TEMP_SPK")) {
+               mask = WM8962_TEMP_ENA_SPK_MASK;
+               val = WM8962_TEMP_ENA_SPK;
+       } else {
+               pm_runtime_put(component->dev);
+               return -EINVAL;
+       }
+
+       switch (event) {
+       case SND_SOC_DAPM_POST_PMD:
+               val = 0;
+               fallthrough;
+       case SND_SOC_DAPM_POST_PMU:
+               ret = snd_soc_component_update_bits(component, reg, mask, val);
+               break;
+       default:
+               WARN(1, "Invalid event %d\n", event);
+               pm_runtime_put(component->dev);
+               return -EINVAL;
+       }
+
+       pm_runtime_put(component->dev);
+
+       return 0;
+}
+
 static int cp_event(struct snd_soc_dapm_widget *w,
                    struct snd_kcontrol *kcontrol, int event)
 {
@@ -2140,8 +2183,10 @@ SND_SOC_DAPM_SUPPLY("TOCLK", WM8962_ADDITIONAL_CONTROL_1, 0, 0, NULL, 0),
 SND_SOC_DAPM_SUPPLY_S("DSP2", 1, WM8962_DSP2_POWER_MANAGEMENT,
                      WM8962_DSP2_ENA_SHIFT, 0, dsp2_event,
                      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
-SND_SOC_DAPM_SUPPLY("TEMP_HP", WM8962_ADDITIONAL_CONTROL_4, 2, 0, NULL, 0),
-SND_SOC_DAPM_SUPPLY("TEMP_SPK", WM8962_ADDITIONAL_CONTROL_4, 1, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("TEMP_HP", SND_SOC_NOPM, 0, 0, tp_event,
+               SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_SUPPLY("TEMP_SPK", SND_SOC_NOPM, 0, 0, tp_event,
+               SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
 
 SND_SOC_DAPM_MIXER("INPGAL", WM8962_LEFT_INPUT_PGA_CONTROL, 4, 0,
                   inpgal, ARRAY_SIZE(inpgal)),
@@ -3763,6 +3808,11 @@ static int wm8962_i2c_probe(struct i2c_client *i2c)
        if (ret < 0)
                goto err_pm_runtime;
 
+       regmap_update_bits(wm8962->regmap, WM8962_ADDITIONAL_CONTROL_4,
+                           WM8962_TEMP_ENA_HP_MASK, 0);
+       regmap_update_bits(wm8962->regmap, WM8962_ADDITIONAL_CONTROL_4,
+                           WM8962_TEMP_ENA_SPK_MASK, 0);
+
        regcache_cache_only(wm8962->regmap, true);
 
        /* The drivers should power up as needed */
index 77136a5..c0207e9 100644 (file)
@@ -1161,6 +1161,9 @@ static int wm8997_probe(struct platform_device *pdev)
                regmap_update_bits(arizona->regmap, wm8997_digital_vu[i],
                                   WM8997_DIG_VU, WM8997_DIG_VU);
 
+       pm_runtime_enable(&pdev->dev);
+       pm_runtime_idle(&pdev->dev);
+
        arizona_init_common(arizona);
 
        ret = arizona_init_vol_limit(arizona);
@@ -1179,14 +1182,12 @@ static int wm8997_probe(struct platform_device *pdev)
                goto err_spk_irqs;
        }
 
-       pm_runtime_enable(&pdev->dev);
-       pm_runtime_idle(&pdev->dev);
-
        return ret;
 
 err_spk_irqs:
        arizona_free_spk_irqs(arizona);
 err_jack_codec_dev:
+       pm_runtime_disable(&pdev->dev);
        arizona_jack_codec_dev_remove(&wm8997->core);
 
        return ret;
index b327372..fe7cf97 100644 (file)
@@ -417,7 +417,7 @@ static inline bool parse_as_dpcm_link(struct asoc_simple_priv *priv,
         * or has convert-xxx property
         */
        if ((of_get_child_count(codec_port) > 1) ||
-           (adata->convert_rate || adata->convert_channels))
+           asoc_simple_is_convert_required(adata))
                return true;
 
        return false;
index bef1683..be69bbc 100644 (file)
@@ -85,6 +85,21 @@ void asoc_simple_parse_convert(struct device_node *np,
 }
 EXPORT_SYMBOL_GPL(asoc_simple_parse_convert);
 
+/**
+ * asoc_simple_is_convert_required() - Query if HW param conversion was requested
+ * @data: Link data.
+ *
+ * Returns true if any HW param conversion was requested for this DAI link with
+ * any "convert-xxx" properties.
+ */
+bool asoc_simple_is_convert_required(const struct asoc_simple_data *data)
+{
+       return data->convert_rate ||
+              data->convert_channels ||
+              data->convert_sample_format;
+}
+EXPORT_SYMBOL_GPL(asoc_simple_is_convert_required);
+
 int asoc_simple_parse_daifmt(struct device *dev,
                             struct device_node *node,
                             struct device_node *codec,
index 78419e1..feb55b6 100644 (file)
@@ -393,8 +393,7 @@ static int __simple_for_each_link(struct asoc_simple_priv *priv,
                         * or has convert-xxx property
                         */
                        if (dpcm_selectable &&
-                           (num > 2 ||
-                            adata.convert_rate || adata.convert_channels)) {
+                           (num > 2 || asoc_simple_is_convert_required(&adata))) {
                                /*
                                 * np
                                 *       |1(CPU)|0(Codec)  li->cpu
index 2d09868..2358be2 100644 (file)
@@ -223,6 +223,18 @@ static const struct dmi_system_id sof_rt5682_quirk_table[] = {
                                        SOF_RT5682_SSP_AMP(2) |
                                        SOF_RT5682_NUM_HDMIDEV(4)),
        },
+       {
+               .callback = sof_rt5682_quirk_cb,
+               .matches = {
+                       DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Rex"),
+               },
+               .driver_data = (void *)(SOF_RT5682_MCLK_EN |
+                                       SOF_RT5682_SSP_CODEC(2) |
+                                       SOF_SPEAKER_AMP_PRESENT |
+                                       SOF_RT5682_SSP_AMP(0) |
+                                       SOF_RT5682_NUM_HDMIDEV(4)
+                                       ),
+       },
        {}
 };
 
index 2ff30b4..ee9857d 100644 (file)
@@ -202,6 +202,17 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
                                        SOF_SDW_PCH_DMIC |
                                        RT711_JD1),
        },
+       {
+               /* NUC15 LAPBC710 skews */
+               .callback = sof_sdw_quirk_cb,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+                       DMI_MATCH(DMI_BOARD_NAME, "LAPBC710"),
+               },
+               .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+                                       SOF_SDW_PCH_DMIC |
+                                       RT711_JD1),
+       },
        /* TigerLake-SDCA devices */
        {
                .callback = sof_sdw_quirk_cb,
index bbba2df..3312b57 100644 (file)
@@ -689,11 +689,6 @@ static void load_codec_module(struct hda_codec *codec)
 
 #endif /* CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC */
 
-static void skl_codec_device_exit(struct device *dev)
-{
-       snd_hdac_device_exit(dev_to_hdac_dev(dev));
-}
-
 static struct hda_codec *skl_codec_device_init(struct hdac_bus *bus, int addr)
 {
        struct hda_codec *codec;
@@ -706,12 +701,11 @@ static struct hda_codec *skl_codec_device_init(struct hdac_bus *bus, int addr)
        }
 
        codec->core.type = HDA_DEV_ASOC;
-       codec->core.dev.release = skl_codec_device_exit;
 
        ret = snd_hdac_device_register(&codec->core);
        if (ret) {
                dev_err(bus->dev, "failed to register hdac device\n");
-               snd_hdac_device_exit(&codec->core);
+               put_device(&codec->core.dev);
                return ERR_PTR(ret);
        }
 
index d0e59e0..8c7398b 100644 (file)
@@ -187,6 +187,7 @@ config SND_SOC_SC8280XP
 config SND_SOC_SC7180
        tristate "SoC Machine driver for SC7180 boards"
        depends on I2C && GPIOLIB
+       depends on SOUNDWIRE || SOUNDWIRE=n
        select SND_SOC_QCOM_COMMON
        select SND_SOC_LPASS_SC7180
        select SND_SOC_MAX98357A
index 8a56f38..5435384 100644 (file)
@@ -782,10 +782,20 @@ static bool lpass_hdmi_regmap_volatile(struct device *dev, unsigned int reg)
                return true;
        if (reg == LPASS_HDMI_TX_LEGACY_ADDR(v))
                return true;
+       if (reg == LPASS_HDMI_TX_VBIT_CTL_ADDR(v))
+               return true;
+       if (reg == LPASS_HDMI_TX_PARITY_ADDR(v))
+               return true;
 
        for (i = 0; i < v->hdmi_rdma_channels; ++i) {
                if (reg == LPAIF_HDMI_RDMACURR_REG(v, i))
                        return true;
+               if (reg == LPASS_HDMI_TX_DMA_ADDR(v, i))
+                       return true;
+               if (reg == LPASS_HDMI_TX_CH_LSB_ADDR(v, i))
+                       return true;
+               if (reg == LPASS_HDMI_TX_CH_MSB_ADDR(v, i))
+                       return true;
        }
        return false;
 }
index 659b9ad..e12f824 100644 (file)
@@ -1213,9 +1213,11 @@ int snd_soc_pcm_component_pm_runtime_get(struct snd_soc_pcm_runtime *rtd,
        int i;
 
        for_each_rtd_components(rtd, i, component) {
-               int ret = pm_runtime_resume_and_get(component->dev);
-               if (ret < 0 && ret != -EACCES)
+               int ret = pm_runtime_get_sync(component->dev);
+               if (ret < 0 && ret != -EACCES) {
+                       pm_runtime_put_noidle(component->dev);
                        return soc_component_ret(component, ret);
+               }
                /* mark stream if succeeded */
                soc_component_mark_push(component, stream, pm);
        }
index 1e9afc4..f2ec2a6 100644 (file)
@@ -109,11 +109,6 @@ EXPORT_SYMBOL_NS(hda_codec_jack_check, SND_SOC_SOF_HDA_AUDIO_CODEC);
 #define is_generic_config(x)   0
 #endif
 
-static void hda_codec_device_exit(struct device *dev)
-{
-       snd_hdac_device_exit(dev_to_hdac_dev(dev));
-}
-
 static struct hda_codec *hda_codec_device_init(struct hdac_bus *bus, int addr, int type)
 {
        struct hda_codec *codec;
@@ -126,12 +121,11 @@ static struct hda_codec *hda_codec_device_init(struct hdac_bus *bus, int addr, i
        }
 
        codec->core.type = type;
-       codec->core.dev.release = hda_codec_device_exit;
 
        ret = snd_hdac_device_register(&codec->core);
        if (ret) {
                dev_err(bus->dev, "failed to register hdac device\n");
-               snd_hdac_device_exit(&codec->core);
+               put_device(&codec->core.dev);
                return ERR_PTR(ret);
        }
 
index 899b00d..9f39da9 100644 (file)
@@ -38,7 +38,7 @@ static const struct sof_dev_desc mtl_desc = {
                [SOF_INTEL_IPC4] = "intel/sof-ace-tplg",
        },
        .default_fw_filename = {
-               [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+               [SOF_INTEL_IPC4] = "sof-mtl.ri",
        },
        .nocodec_tplg_filename = "sof-mtl-nocodec.tplg",
        .ops = &sof_mtl_ops,
index 2d63cc2..4cfe4f2 100644 (file)
@@ -159,6 +159,34 @@ static const struct sof_dev_desc adl_desc = {
        .ops_init = sof_tgl_ops_init,
 };
 
+static const struct sof_dev_desc adl_n_desc = {
+       .machines               = snd_soc_acpi_intel_adl_machines,
+       .alt_machines           = snd_soc_acpi_intel_adl_sdw_machines,
+       .use_acpi_target_states = true,
+       .resindex_lpe_base      = 0,
+       .resindex_pcicfg_base   = -1,
+       .resindex_imr_base      = -1,
+       .irqindex_host_ipc      = -1,
+       .chip_info = &tgl_chip_info,
+       .ipc_supported_mask     = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
+       .ipc_default            = SOF_IPC,
+       .default_fw_path = {
+               [SOF_IPC] = "intel/sof",
+               [SOF_INTEL_IPC4] = "intel/avs/adl-n",
+       },
+       .default_tplg_path = {
+               [SOF_IPC] = "intel/sof-tplg",
+               [SOF_INTEL_IPC4] = "intel/avs-tplg",
+       },
+       .default_fw_filename = {
+               [SOF_IPC] = "sof-adl-n.ri",
+               [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+       },
+       .nocodec_tplg_filename = "sof-adl-nocodec.tplg",
+       .ops = &sof_tgl_ops,
+       .ops_init = sof_tgl_ops_init,
+};
+
 static const struct sof_dev_desc rpls_desc = {
        .machines               = snd_soc_acpi_intel_rpl_machines,
        .alt_machines           = snd_soc_acpi_intel_rpl_sdw_machines,
@@ -246,7 +274,7 @@ static const struct pci_device_id sof_pci_ids[] = {
        { PCI_DEVICE(0x8086, 0x51cf), /* RPL-PX */
                .driver_data = (unsigned long)&rpl_desc},
        { PCI_DEVICE(0x8086, 0x54c8), /* ADL-N */
-               .driver_data = (unsigned long)&adl_desc},
+               .driver_data = (unsigned long)&adl_n_desc},
        { 0, }
 };
 MODULE_DEVICE_TABLE(pci, sof_pci_ids);
index 9c70800..70dea8a 100644 (file)
@@ -108,6 +108,7 @@ struct sof_mtrace_core_data {
        int id;
        u32 slot_offset;
        void *log_buffer;
+       struct mutex buffer_lock; /* for log_buffer alloc/free */
        u32 host_read_ptr;
        u32 dsp_write_ptr;
        /* pos update IPC arrived before the slot offset is known, queried */
@@ -128,14 +129,22 @@ static int sof_ipc4_mtrace_dfs_open(struct inode *inode, struct file *file)
        struct sof_mtrace_core_data *core_data = inode->i_private;
        int ret;
 
+       mutex_lock(&core_data->buffer_lock);
+
+       if (core_data->log_buffer) {
+               ret = -EBUSY;
+               goto out;
+       }
+
        ret = debugfs_file_get(file->f_path.dentry);
        if (unlikely(ret))
-               return ret;
+               goto out;
 
        core_data->log_buffer = kmalloc(SOF_MTRACE_SLOT_SIZE, GFP_KERNEL);
        if (!core_data->log_buffer) {
                debugfs_file_put(file->f_path.dentry);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out;
        }
 
        ret = simple_open(inode, file);
@@ -144,6 +153,9 @@ static int sof_ipc4_mtrace_dfs_open(struct inode *inode, struct file *file)
                debugfs_file_put(file->f_path.dentry);
        }
 
+out:
+       mutex_unlock(&core_data->buffer_lock);
+
        return ret;
 }
 
@@ -280,7 +292,10 @@ static int sof_ipc4_mtrace_dfs_release(struct inode *inode, struct file *file)
 
        debugfs_file_put(file->f_path.dentry);
 
+       mutex_lock(&core_data->buffer_lock);
        kfree(core_data->log_buffer);
+       core_data->log_buffer = NULL;
+       mutex_unlock(&core_data->buffer_lock);
 
        return 0;
 }
@@ -563,6 +578,7 @@ static int ipc4_mtrace_init(struct snd_sof_dev *sdev)
                struct sof_mtrace_core_data *core_data = &priv->cores[i];
 
                init_waitqueue_head(&core_data->trace_sleep);
+               mutex_init(&core_data->buffer_lock);
                core_data->sdev = sdev;
                core_data->id = i;
        }
index 5ed8e36..a870759 100644 (file)
@@ -126,15 +126,10 @@ EXPORT_SYMBOL(snd_emux_register);
  */
 int snd_emux_free(struct snd_emux *emu)
 {
-       unsigned long flags;
-
        if (! emu)
                return -EINVAL;
 
-       spin_lock_irqsave(&emu->voice_lock, flags);
-       if (emu->timer_active)
-               del_timer(&emu->tlist);
-       spin_unlock_irqrestore(&emu->voice_lock, flags);
+       del_timer_sync(&emu->tlist);
 
        snd_emux_proc_free(emu);
        snd_emux_delete_virmidi(emu);
index e1bf1b5..f3e8484 100644 (file)
@@ -47,6 +47,8 @@ struct snd_usb_implicit_fb_match {
 static const struct snd_usb_implicit_fb_match playback_implicit_fb_quirks[] = {
        /* Fixed EP */
        /* FIXME: check the availability of generic matching */
+       IMPLICIT_FB_FIXED_DEV(0x0763, 0x2030, 0x81, 3), /* M-Audio Fast Track C400 */
+       IMPLICIT_FB_FIXED_DEV(0x0763, 0x2031, 0x81, 3), /* M-Audio Fast Track C600 */
        IMPLICIT_FB_FIXED_DEV(0x0763, 0x2080, 0x81, 2), /* M-Audio FastTrack Ultra */
        IMPLICIT_FB_FIXED_DEV(0x0763, 0x2081, 0x81, 2), /* M-Audio FastTrack Ultra */
        IMPLICIT_FB_FIXED_DEV(0x2466, 0x8010, 0x81, 2), /* Fractal Audio Axe-Fx III */
index a564195..9105ec6 100644 (file)
@@ -1631,7 +1631,7 @@ static void check_no_speaker_on_headset(struct snd_kcontrol *kctl,
        if (!found)
                return;
 
-       strscpy(kctl->id.name, "Headphone", sizeof(kctl->id.name));
+       snd_ctl_rename(card, kctl, "Headphone");
 }
 
 static const struct usb_feature_control_info *get_feature_control_info(int control)
index 8aa0d27..abc4186 100644 (file)
@@ -60,6 +60,7 @@
 #define ARM_CPU_IMP_FUJITSU            0x46
 #define ARM_CPU_IMP_HISI               0x48
 #define ARM_CPU_IMP_APPLE              0x61
+#define ARM_CPU_IMP_AMPERE             0xC0
 
 #define ARM_CPU_PART_AEM_V8            0xD0F
 #define ARM_CPU_PART_FOUNDATION                0xD00
 #define APPLE_CPU_PART_M1_ICESTORM_MAX 0x028
 #define APPLE_CPU_PART_M1_FIRESTORM_MAX        0x029
 
+#define AMPERE_CPU_PART_AMPERE1                0xAC3
+
 #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
 #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
 #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
 #define MIDR_APPLE_M1_FIRESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_PRO)
 #define MIDR_APPLE_M1_ICESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_MAX)
 #define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX)
+#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
 
 /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
 #define MIDR_FUJITSU_ERRATUM_010001            MIDR_FUJITSU_A64FX
index ef4775c..b71f4f2 100644 (file)
@@ -96,7 +96,7 @@
 #define X86_FEATURE_SYSCALL32          ( 3*32+14) /* "" syscall in IA32 userspace */
 #define X86_FEATURE_SYSENTER32         ( 3*32+15) /* "" sysenter in IA32 userspace */
 #define X86_FEATURE_REP_GOOD           ( 3*32+16) /* REP microcode works well */
-/* FREE!                                ( 3*32+17) */
+#define X86_FEATURE_AMD_LBR_V2         ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */
 #define X86_FEATURE_LFENCE_RDTSC       ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
 #define X86_FEATURE_ACC_POWER          ( 3*32+19) /* AMD Accumulated Power Mechanism */
 #define X86_FEATURE_NOPL               ( 3*32+20) /* The NOPL (0F 1F) instructions */
index d0d7b9b..5418e2f 100644 (file)
@@ -27,7 +27,7 @@
  * Output:
  * rax original destination
  */
-SYM_FUNC_START(__memcpy)
+SYM_TYPED_FUNC_START(__memcpy)
        ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
                      "jmp memcpy_erms", X86_FEATURE_ERMS
 
index 57619f2..38f8851 100644 (file)
@@ -103,6 +103,7 @@ FEATURE_TESTS_EXTRA :=                  \
          libbpf-bpf_prog_load           \
          libbpf-bpf_object__next_program \
          libbpf-bpf_object__next_map    \
+         libbpf-bpf_program__set_insns  \
          libbpf-bpf_create_map         \
          libpfm4                        \
          libdebuginfod                 \
index 04b07ff..690fe97 100644 (file)
@@ -63,6 +63,7 @@ FILES=                                          \
          test-libbpf-bpf_map_create.bin                \
          test-libbpf-bpf_object__next_program.bin \
          test-libbpf-bpf_object__next_map.bin   \
+         test-libbpf-bpf_program__set_insns.bin        \
          test-libbpf-btf__raw_data.bin          \
          test-get_cpuid.bin                     \
          test-sdt.bin                           \
@@ -316,6 +317,9 @@ $(OUTPUT)test-libbpf-bpf_object__next_program.bin:
 $(OUTPUT)test-libbpf-bpf_object__next_map.bin:
        $(BUILD) -lbpf
 
+$(OUTPUT)test-libbpf-bpf_program__set_insns.bin:
+       $(BUILD) -lbpf
+
 $(OUTPUT)test-libbpf-btf__raw_data.bin:
        $(BUILD) -lbpf
 
diff --git a/tools/build/feature/test-libbpf-bpf_program__set_insns.c b/tools/build/feature/test-libbpf-bpf_program__set_insns.c
new file mode 100644 (file)
index 0000000..f3b7f18
--- /dev/null
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <bpf/libbpf.h>
+
+int main(void)
+{
+       bpf_program__set_insns(NULL /* prog */, NULL /* new_insns */, 0 /* new_insn_cnt */);
+       return 0;
+}
index aadee6d..8d35893 100644 (file)
@@ -547,6 +547,10 @@ static int calc_digits(int num)
 {
        int count = 0;
 
+       /* It takes a digit to represent zero */
+       if (!num)
+               return 1;
+
        while (num != 0) {
                num /= 10;
                count++;
index 1416822..f243ce6 100644 (file)
@@ -68,6 +68,8 @@ enum {
 #define IPPROTO_PIM            IPPROTO_PIM
   IPPROTO_COMP = 108,          /* Compression Header Protocol          */
 #define IPPROTO_COMP           IPPROTO_COMP
+  IPPROTO_L2TP = 115,          /* Layer 2 Tunnelling Protocol          */
+#define IPPROTO_L2TP           IPPROTO_L2TP
   IPPROTO_SCTP = 132,          /* Stream Control Transport Protocol    */
 #define IPPROTO_SCTP           IPPROTO_SCTP
   IPPROTO_UDPLITE = 136,       /* UDP-Lite (RFC 3828)                  */
@@ -188,21 +190,13 @@ struct ip_mreq_source {
 };
 
 struct ip_msfilter {
+       __be32          imsf_multiaddr;
+       __be32          imsf_interface;
+       __u32           imsf_fmode;
+       __u32           imsf_numsrc;
        union {
-               struct {
-                       __be32          imsf_multiaddr_aux;
-                       __be32          imsf_interface_aux;
-                       __u32           imsf_fmode_aux;
-                       __u32           imsf_numsrc_aux;
-                       __be32          imsf_slist[1];
-               };
-               struct {
-                       __be32          imsf_multiaddr;
-                       __be32          imsf_interface;
-                       __u32           imsf_fmode;
-                       __u32           imsf_numsrc;
-                       __be32          imsf_slist_flex[];
-               };
+               __be32          imsf_slist[1];
+               __DECLARE_FLEX_ARRAY(__be32, imsf_slist_flex);
        };
 };
 
index eed0315..0d5d441 100644 (file)
@@ -1177,6 +1177,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_VM_DISABLE_NX_HUGE_PAGES 220
 #define KVM_CAP_S390_ZPCI_OP 221
 #define KVM_CAP_S390_CPU_TOPOLOGY 222
+#define KVM_CAP_DIRTY_LOG_RING_ACQ_REL 223
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index ea6defa..ccb7f5d 100644 (file)
@@ -164,8 +164,6 @@ enum perf_event_sample_format {
        PERF_SAMPLE_WEIGHT_STRUCT               = 1U << 24,
 
        PERF_SAMPLE_MAX = 1U << 25,             /* non-ABI */
-
-       __PERF_SAMPLE_CALLCHAIN_EARLY           = 1ULL << 63, /* non-ABI; internal use */
 };
 
 #define PERF_SAMPLE_WEIGHT_TYPE        (PERF_SAMPLE_WEIGHT | PERF_SAMPLE_WEIGHT_STRUCT)
@@ -263,6 +261,17 @@ enum {
        PERF_BR_MAX,
 };
 
+/*
+ * Common branch speculation outcome classification
+ */
+enum {
+       PERF_BR_SPEC_NA                 = 0,    /* Not available */
+       PERF_BR_SPEC_WRONG_PATH         = 1,    /* Speculative but on wrong path */
+       PERF_BR_NON_SPEC_CORRECT_PATH   = 2,    /* Non-speculative but on correct path */
+       PERF_BR_SPEC_CORRECT_PATH       = 3,    /* Speculative and on correct path */
+       PERF_BR_SPEC_MAX,
+};
+
 enum {
        PERF_BR_NEW_FAULT_ALGN          = 0,    /* Alignment fault */
        PERF_BR_NEW_FAULT_DATA          = 1,    /* Data fault */
@@ -282,11 +291,11 @@ enum {
        PERF_BR_PRIV_HV         = 3,
 };
 
-#define PERF_BR_ARM64_FIQ              PERF_BR_NEW_ARCH_1
-#define PERF_BR_ARM64_DEBUG_HALT       PERF_BR_NEW_ARCH_2
-#define PERF_BR_ARM64_DEBUG_EXIT       PERF_BR_NEW_ARCH_3
-#define PERF_BR_ARM64_DEBUG_INST       PERF_BR_NEW_ARCH_4
-#define PERF_BR_ARM64_DEBUG_DATA       PERF_BR_NEW_ARCH_5
+#define PERF_BR_ARM64_FIQ              PERF_BR_NEW_ARCH_1
+#define PERF_BR_ARM64_DEBUG_HALT       PERF_BR_NEW_ARCH_2
+#define PERF_BR_ARM64_DEBUG_EXIT       PERF_BR_NEW_ARCH_3
+#define PERF_BR_ARM64_DEBUG_INST       PERF_BR_NEW_ARCH_4
+#define PERF_BR_ARM64_DEBUG_DATA       PERF_BR_NEW_ARCH_5
 
 #define PERF_SAMPLE_BRANCH_PLM_ALL \
        (PERF_SAMPLE_BRANCH_USER|\
@@ -1397,6 +1406,7 @@ union perf_mem_data_src {
  *     abort: aborting a hardware transaction
  *    cycles: cycles from last branch (or 0 if not supported)
  *      type: branch type
+ *      spec: branch speculation info (or 0 if not supported)
  */
 struct perf_branch_entry {
        __u64   from;
@@ -1407,9 +1417,10 @@ struct perf_branch_entry {
                abort:1,    /* transaction abort */
                cycles:16,  /* cycle count to last branch */
                type:4,     /* branch type */
+               spec:2,     /* branch speculation info */
                new_type:4, /* additional branch type */
                priv:3,     /* privilege level */
-               reserved:33;
+               reserved:31;
 };
 
 union perf_sample_weight {
index 1500a0f..7cab2c6 100644 (file)
@@ -124,7 +124,8 @@ struct statx {
        __u32   stx_dev_minor;
        /* 0x90 */
        __u64   stx_mnt_id;
-       __u64   __spare2;
+       __u32   stx_dio_mem_align;      /* Memory buffer alignment for direct I/O */
+       __u32   stx_dio_offset_align;   /* File offset alignment for direct I/O */
        /* 0xa0 */
        __u64   __spare3[12];   /* Spare space for future expansion */
        /* 0x100 */
@@ -152,6 +153,7 @@ struct statx {
 #define STATX_BASIC_STATS      0x000007ffU     /* The stuff in the normal stat struct */
 #define STATX_BTIME            0x00000800U     /* Want/got stx_btime */
 #define STATX_MNT_ID           0x00001000U     /* Got stx_mnt_id */
+#define STATX_DIOALIGN         0x00002000U     /* Want/got direct I/O alignment info */
 
 #define STATX__RESERVED                0x80000000U     /* Reserved for future struct statx expansion */
 
index 3974a2a..de6810e 100644 (file)
@@ -3,22 +3,6 @@
  *  Advanced Linux Sound Architecture - ALSA - Driver
  *  Copyright (c) 1994-2003 by Jaroslav Kysela <perex@perex.cz>,
  *                             Abramo Bagnara <abramo@alsa-project.org>
- *
- *
- *   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; either version 2 of the License, or
- *   (at your option) any later version.
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   You should have received a copy of the GNU General Public License
- *   along with this program; if not, write to the Free Software
- *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- *
  */
 
 #ifndef _UAPI__SOUND_ASOUND_H
index 6fd4b13..898226e 100644 (file)
@@ -588,6 +588,10 @@ ifndef NO_LIBELF
           ifeq ($(feature-libbpf-bpf_object__next_map), 1)
             CFLAGS += -DHAVE_LIBBPF_BPF_OBJECT__NEXT_MAP
           endif
+          $(call feature_check,libbpf-bpf_program__set_insns)
+          ifeq ($(feature-libbpf-bpf_program__set_insns), 1)
+            CFLAGS += -DHAVE_LIBBPF_BPF_PROGRAM__SET_INSNS
+          endif
           $(call feature_check,libbpf-btf__raw_data)
           ifeq ($(feature-libbpf-btf__raw_data), 1)
             CFLAGS += -DHAVE_LIBBPF_BTF__RAW_DATA
@@ -604,6 +608,7 @@ ifndef NO_LIBELF
         CFLAGS += -DHAVE_LIBBPF_BPF_PROG_LOAD
         CFLAGS += -DHAVE_LIBBPF_BPF_OBJECT__NEXT_PROGRAM
         CFLAGS += -DHAVE_LIBBPF_BPF_OBJECT__NEXT_MAP
+        CFLAGS += -DHAVE_LIBBPF_BPF_PROGRAM__SET_INSNS
         CFLAGS += -DHAVE_LIBBPF_BTF__RAW_DATA
         CFLAGS += -DHAVE_LIBBPF_BPF_MAP_CREATE
       endif
index 2bca64f..e9e0df4 100644 (file)
 176    64      rt_sigtimedwait                 sys_rt_sigtimedwait
 177    nospu   rt_sigqueueinfo                 sys_rt_sigqueueinfo             compat_sys_rt_sigqueueinfo
 178    nospu   rt_sigsuspend                   sys_rt_sigsuspend               compat_sys_rt_sigsuspend
-179    common  pread64                         sys_pread64                     compat_sys_ppc_pread64
-180    common  pwrite64                        sys_pwrite64                    compat_sys_ppc_pwrite64
+179    32      pread64                         sys_ppc_pread64                 compat_sys_ppc_pread64
+179    64      pread64                         sys_pread64
+180    32      pwrite64                        sys_ppc_pwrite64                compat_sys_ppc_pwrite64
+180    64      pwrite64                        sys_pwrite64
 181    common  chown                           sys_chown
 182    common  getcwd                          sys_getcwd
 183    common  capget                          sys_capget
 188    common  putpmsg                         sys_ni_syscall
 189    nospu   vfork                           sys_vfork
 190    common  ugetrlimit                      sys_getrlimit                   compat_sys_getrlimit
-191    common  readahead                       sys_readahead                   compat_sys_ppc_readahead
+191    32      readahead                       sys_ppc_readahead               compat_sys_ppc_readahead
+191    64      readahead                       sys_readahead
 192    32      mmap2                           sys_mmap2                       compat_sys_mmap2
-193    32      truncate64                      sys_truncate64                  compat_sys_ppc_truncate64
-194    32      ftruncate64                     sys_ftruncate64                 compat_sys_ppc_ftruncate64
+193    32      truncate64                      sys_ppc_truncate64              compat_sys_ppc_truncate64
+194    32      ftruncate64                     sys_ppc_ftruncate64             compat_sys_ppc_ftruncate64
 195    32      stat64                          sys_stat64
 196    32      lstat64                         sys_lstat64
 197    32      fstat64                         sys_fstat64
 230    common  io_submit                       sys_io_submit                   compat_sys_io_submit
 231    common  io_cancel                       sys_io_cancel
 232    nospu   set_tid_address                 sys_set_tid_address
-233    common  fadvise64                       sys_fadvise64                   compat_sys_ppc32_fadvise64
+233    32      fadvise64                       sys_ppc32_fadvise64             compat_sys_ppc32_fadvise64
+233    64      fadvise64                       sys_fadvise64
 234    nospu   exit_group                      sys_exit_group
 235    nospu   lookup_dcookie                  sys_lookup_dcookie              compat_sys_lookup_dcookie
 236    common  epoll_create                    sys_epoll_create
index 52d254b..e128b85 100644 (file)
@@ -649,7 +649,7 @@ static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
 static volatile int signr = -1;
 static volatile int child_finished;
 #ifdef HAVE_EVENTFD_SUPPORT
-static int done_fd = -1;
+static volatile int done_fd = -1;
 #endif
 
 static void sig_handler(int sig)
@@ -661,19 +661,24 @@ static void sig_handler(int sig)
 
        done = 1;
 #ifdef HAVE_EVENTFD_SUPPORT
-{
-       u64 tmp = 1;
-       /*
-        * It is possible for this signal handler to run after done is checked
-        * in the main loop, but before the perf counter fds are polled. If this
-        * happens, the poll() will continue to wait even though done is set,
-        * and will only break out if either another signal is received, or the
-        * counters are ready for read. To ensure the poll() doesn't sleep when
-        * done is set, use an eventfd (done_fd) to wake up the poll().
-        */
-       if (write(done_fd, &tmp, sizeof(tmp)) < 0)
-               pr_err("failed to signal wakeup fd, error: %m\n");
-}
+       if (done_fd >= 0) {
+               u64 tmp = 1;
+               int orig_errno = errno;
+
+               /*
+                * It is possible for this signal handler to run after done is
+                * checked in the main loop, but before the perf counter fds are
+                * polled. If this happens, the poll() will continue to wait
+                * even though done is set, and will only break out if either
+                * another signal is received, or the counters are ready for
+                * read. To ensure the poll() doesn't sleep when done is set,
+                * use an eventfd (done_fd) to wake up the poll().
+                */
+               if (write(done_fd, &tmp, sizeof(tmp)) < 0)
+                       pr_err("failed to signal wakeup fd, error: %m\n");
+
+               errno = orig_errno;
+       }
 #endif // HAVE_EVENTFD_SUPPORT
 }
 
@@ -2834,8 +2839,12 @@ out_free_threads:
 
 out_delete_session:
 #ifdef HAVE_EVENTFD_SUPPORT
-       if (done_fd >= 0)
-               close(done_fd);
+       if (done_fd >= 0) {
+               fd = done_fd;
+               done_fd = -1;
+
+               close(fd);
+       }
 #endif
        zstd_fini(&session->zstd_data);
        perf_session__delete(session);
index 6ee44b1..eacca9a 100755 (executable)
@@ -143,7 +143,7 @@ for i in $SYNC_CHECK_FILES; do
 done
 
 # diff with extra ignore lines
-check arch/x86/lib/memcpy_64.S        '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memcpy_\(erms\|orig\))"'
+check arch/x86/lib/memcpy_64.S        '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memcpy_\(erms\|orig\))" -I"^#include <linux/cfi_types.h>"'
 check arch/x86/lib/memset_64.S        '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memset_\(erms\|orig\))"'
 check arch/x86/include/asm/amd-ibs.h  '-I "^#include [<\"]\(asm/\)*msr-index.h"'
 check arch/arm64/include/asm/cputype.h '-I "^#include [<\"]\(asm/\)*sysreg.h"'
index 6970203..6443a06 100644 (file)
         "MetricName": "indirect_branch"
     },
     {
-        "MetricExpr": "(armv8_pmuv3_0@event\\=0x1014@ + armv8_pmuv3_0@event\\=0x1018@) / BR_MIS_PRED",
+        "MetricExpr": "(armv8_pmuv3_0@event\\=0x1013@ + armv8_pmuv3_0@event\\=0x1016@) / BR_MIS_PRED",
         "PublicDescription": "Push branch L3 topdown metric",
         "BriefDescription": "Push branch L3 topdown metric",
         "MetricGroup": "TopDownL3",
         "MetricName": "push_branch"
     },
     {
-        "MetricExpr": "armv8_pmuv3_0@event\\=0x100c@ / BR_MIS_PRED",
+        "MetricExpr": "armv8_pmuv3_0@event\\=0x100d@ / BR_MIS_PRED",
         "PublicDescription": "Pop branch L3 topdown metric",
         "BriefDescription": "Pop branch L3 topdown metric",
         "MetricGroup": "TopDownL3",
         "MetricName": "pop_branch"
     },
     {
-        "MetricExpr": "(BR_MIS_PRED - armv8_pmuv3_0@event\\=0x1010@ - armv8_pmuv3_0@event\\=0x1014@ - armv8_pmuv3_0@event\\=0x1018@ - armv8_pmuv3_0@event\\=0x100c@) / BR_MIS_PRED",
+        "MetricExpr": "(BR_MIS_PRED - armv8_pmuv3_0@event\\=0x1010@ - armv8_pmuv3_0@event\\=0x1013@ - armv8_pmuv3_0@event\\=0x1016@ - armv8_pmuv3_0@event\\=0x100d@) / BR_MIS_PRED",
         "PublicDescription": "Other branch L3 topdown metric",
         "BriefDescription": "Other branch L3 topdown metric",
         "MetricGroup": "TopDownL3",
index 8ba3e81..fe050d4 100644 (file)
@@ -1,13 +1,13 @@
 [
     {
       "MetricName": "VEC_GROUP_PUMP_RETRY_RATIO_P01",
-      "MetricExpr": "(hv_24x7@PM_PB_RTY_VG_PUMP01\\,chip\\=?@ / hv_24x7@PM_PB_VG_PUMP01\\,chip\\=?@) * 100",
+      "MetricExpr": "(hv_24x7@PM_PB_RTY_VG_PUMP01\\,chip\\=?@ / (1 + hv_24x7@PM_PB_VG_PUMP01\\,chip\\=?@)) * 100",
       "ScaleUnit": "1%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "VEC_GROUP_PUMP_RETRY_RATIO_P23",
-      "MetricExpr": "(hv_24x7@PM_PB_RTY_VG_PUMP23\\,chip\\=?@ / hv_24x7@PM_PB_VG_PUMP23\\,chip\\=?@) * 100",
+      "MetricExpr": "(hv_24x7@PM_PB_RTY_VG_PUMP23\\,chip\\=?@ / (1 + hv_24x7@PM_PB_VG_PUMP23\\,chip\\=?@)) * 100",
       "ScaleUnit": "1%",
       "AggregationMode": "PerChip"
     },
     },
     {
       "MetricName": "REMOTE_NODE_PUMPS_RETRIES_RATIO_P01",
-      "MetricExpr": "(hv_24x7@PM_PB_RTY_RNS_PUMP01\\,chip\\=?@ / hv_24x7@PM_PB_RNS_PUMP01\\,chip\\=?@) * 100",
+      "MetricExpr": "(hv_24x7@PM_PB_RTY_RNS_PUMP01\\,chip\\=?@ / (1 + hv_24x7@PM_PB_RNS_PUMP01\\,chip\\=?@)) * 100",
       "ScaleUnit": "1%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "REMOTE_NODE_PUMPS_RETRIES_RATIO_P23",
-      "MetricExpr": "(hv_24x7@PM_PB_RTY_RNS_PUMP23\\,chip\\=?@ / hv_24x7@PM_PB_RNS_PUMP23\\,chip\\=?@) * 100",
+      "MetricExpr": "(hv_24x7@PM_PB_RTY_RNS_PUMP23\\,chip\\=?@ / (1 + hv_24x7@PM_PB_RNS_PUMP23\\,chip\\=?@)) * 100",
       "ScaleUnit": "1%",
       "AggregationMode": "PerChip"
     },
     },
     {
       "MetricName": "XLINK0_OUT_TOTAL_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_XLINK0_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_XLINK0_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "XLINK1_OUT_TOTAL_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_XLINK1_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_XLINK1_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "XLINK2_OUT_TOTAL_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_XLINK2_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_XLINK2_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "XLINK3_OUT_TOTAL_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_XLINK3_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_XLINK3_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "XLINK4_OUT_TOTAL_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_XLINK4_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_XLINK4_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "XLINK5_OUT_TOTAL_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_XLINK5_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_XLINK5_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "XLINK6_OUT_TOTAL_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_XLINK6_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_XLINK6_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "XLINK7_OUT_TOTAL_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_XLINK7_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_XLINK7_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "XLINK0_OUT_DATA_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_XLINK0_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_XLINK0_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1.063%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "XLINK1_OUT_DATA_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_XLINK1_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_XLINK1_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1.063%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "XLINK2_OUT_DATA_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_XLINK2_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_XLINK2_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1.063%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "XLINK3_OUT_DATA_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_XLINK3_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_XLINK3_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1.063%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "XLINK4_OUT_DATA_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_XLINK4_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_XLINK4_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1.063%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "XLINK5_OUT_DATA_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_XLINK5_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_XLINK5_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1.063%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "XLINK6_OUT_DATA_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_XLINK6_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_XLINK6_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1.063%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "XLINK7_OUT_DATA_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_XLINK7_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_XLINK7_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1.063%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "ALINK0_OUT_TOTAL_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_ALINK0_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_ALINK0_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "ALINK1_OUT_TOTAL_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_ALINK1_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_ALINK1_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "ALINK2_OUT_TOTAL_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_ALINK2_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_ALINK2_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "ALINK3_OUT_TOTAL_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_ALINK3_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_ALINK3_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "ALINK4_OUT_TOTAL_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_ALINK4_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_ALINK4_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "ALINK5_OUT_TOTAL_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_ALINK5_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_ALINK5_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "ALINK6_OUT_TOTAL_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_ALINK6_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_ALINK6_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "ALINK7_OUT_TOTAL_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_ALINK7_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_ALINK7_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "ALINK0_OUT_DATA_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_ALINK0_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_ALINK0_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1.063%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "ALINK1_OUT_DATA_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_ALINK1_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_ALINK1_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1.063%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "ALINK2_OUT_DATA_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_ALINK2_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_ALINK2_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1.063%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "ALINK3_OUT_DATA_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_ALINK3_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_ALINK3_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1.063%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "ALINK4_OUT_DATA_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_ALINK4_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_ALINK4_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1.063%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "ALINK5_OUT_DATA_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_ALINK5_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_ALINK5_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1.063%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "ALINK6_OUT_DATA_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_ALINK6_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_ALINK6_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1.063%",
       "AggregationMode": "PerChip"
     },
     {
       "MetricName": "ALINK7_OUT_DATA_UTILIZATION",
-      "MetricExpr": "((hv_24x7@PM_ALINK7_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+      "MetricExpr": "((hv_24x7@PM_ALINK7_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
       "ScaleUnit": "1.063%",
       "AggregationMode": "PerChip"
     },
index 4c0aabb..f5ed7b1 100755 (executable)
@@ -526,6 +526,12 @@ test_kernel_trace()
 test_virtual_lbr()
 {
        echo "--- Test virtual LBR ---"
+       # Check if python script is supported
+       libpython=$(perf version --build-options | grep python | grep -cv OFF)
+       if [ "${libpython}" != "1" ] ; then
+               echo "SKIP: python scripting is not supported"
+               return 2
+       fi
 
        # Python script to determine the maximum size of branch stacks
        cat << "_end_of_file_" > "${maxbrstack}"
index 110f0c6..5f5320f 100644 (file)
@@ -66,6 +66,7 @@ size_t syscall_arg__scnprintf_statx_mask(char *bf, size_t size, struct syscall_a
        P_FLAG(BLOCKS);
        P_FLAG(BTIME);
        P_FLAG(MNT_ID);
+       P_FLAG(DIOALIGN);
 
 #undef P_FLAG
 
index 60d8beb..46ada5e 100644 (file)
@@ -2325,11 +2325,19 @@ struct sym_args {
        bool            near;
 };
 
+static bool kern_sym_name_match(const char *kname, const char *name)
+{
+       size_t n = strlen(name);
+
+       return !strcmp(kname, name) ||
+              (!strncmp(kname, name, n) && kname[n] == '\t');
+}
+
 static bool kern_sym_match(struct sym_args *args, const char *name, char type)
 {
        /* A function with the same name, and global or the n'th found or any */
        return kallsyms__is_function(type) &&
-              !strcmp(name, args->name) &&
+              kern_sym_name_match(name, args->name) &&
               ((args->global && isupper(type)) ||
                (args->selected && ++(args->cnt) == args->idx) ||
                (!args->global && !args->selected));
index eee64dd..cc7c1f9 100644 (file)
@@ -36,6 +36,11 @@ struct btf *btf__load_from_kernel_by_id(__u32 id)
 #endif
 
 #ifndef HAVE_LIBBPF_BPF_PROG_LOAD
+LIBBPF_API int bpf_load_program(enum bpf_prog_type type,
+                               const struct bpf_insn *insns, size_t insns_cnt,
+                               const char *license, __u32 kern_version,
+                               char *log_buf, size_t log_buf_sz);
+
 int bpf_prog_load(enum bpf_prog_type prog_type,
                  const char *prog_name __maybe_unused,
                  const char *license,
index d657594..f4adecc 100644 (file)
 
 #include <internal/xyarray.h>
 
+#ifndef HAVE_LIBBPF_BPF_PROGRAM__SET_INSNS
+int bpf_program__set_insns(struct bpf_program *prog __maybe_unused,
+                          struct bpf_insn *new_insns __maybe_unused, size_t new_insn_cnt __maybe_unused)
+{
+       pr_err("%s: not support, update libbpf\n", __func__);
+       return -ENOTSUP;
+}
+
+int libbpf_register_prog_handler(const char *sec __maybe_unused,
+                                 enum bpf_prog_type prog_type __maybe_unused,
+                                 enum bpf_attach_type exp_attach_type __maybe_unused,
+                                 const struct libbpf_prog_handler_opts *opts __maybe_unused)
+{
+       pr_err("%s: not support, update libbpf\n", __func__);
+       return -ENOTSUP;
+}
+#endif
+
 /* temporarily disable libbpf deprecation warnings */
 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
 
index aa0c517..75e2248 100644 (file)
        SYM_ALIAS(alias, name, SYM_T_FUNC, SYM_L_WEAK)
 #endif
 
+// In the kernel sources (include/linux/cfi_types.h), this has a different
+// definition when CONFIG_CFI_CLANG is used, for tools/ just use the !clang
+// definition:
+#ifndef SYM_TYPED_START
+#define SYM_TYPED_START(name, linkage, align...)        \
+        SYM_START(name, linkage, align)
+#endif
+
+#ifndef SYM_TYPED_FUNC_START
+#define SYM_TYPED_FUNC_START(name)                      \
+        SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
+#endif
+
 #endif /* PERF_LINUX_LINKAGE_H_ */
index e6020c0..3213dbe 100644 (file)
@@ -6,22 +6,22 @@
    |_|                    |___/          |_|
 
    pm-graph: suspend/resume/boot timing analysis tools
-    Version: 5.9
+    Version: 5.10
      Author: Todd Brandt <todd.e.brandt@intel.com>
-  Home Page: https://01.org/pm-graph
+  Home Page: https://www.intel.com/content/www/us/en/developer/topic-technology/open/pm-graph/overview.html
 
  Report bugs/issues at bugzilla.kernel.org Tools/pm-graph
        - https://bugzilla.kernel.org/buglist.cgi?component=pm-graph&product=Tools
 
  Full documentation available online & in man pages
        - Getting Started:
-         https://01.org/pm-graph/documentation/getting-started
+         https://www.intel.com/content/www/us/en/developer/articles/technical/usage.html
 
-       - Config File Format:
-         https://01.org/pm-graph/documentation/3-config-file-format
+       - Feature Summary:
+         https://www.intel.com/content/www/us/en/developer/topic-technology/open/pm-graph/features.html
 
        - upstream version in git:
-         https://github.com/intel/pm-graph/
+         git clone https://github.com/intel/pm-graph/
 
  Table of Contents
        - Overview
index 5126271..643271b 100644 (file)
@@ -78,6 +78,9 @@ This helps maintain the consistency of test data for better comparison.
 If a wifi connection is available, check that it reconnects after resume. Include
 the reconnect time in the total resume time calculation and treat wifi timeouts
 as resume failures.
+.TP
+\fB-wifitrace\fR
+Trace through the wifi reconnect time and include it in the timeline.
 
 .SS "advanced"
 .TP
index 33981ad..cfe3433 100755 (executable)
@@ -86,7 +86,7 @@ def ascii(text):
 #       store system values and test parameters
 class SystemValues:
        title = 'SleepGraph'
-       version = '5.9'
+       version = '5.10'
        ansi = False
        rs = 0
        display = ''
@@ -100,6 +100,7 @@ class SystemValues:
        ftracelog = False
        acpidebug = True
        tstat = True
+       wifitrace = False
        mindevlen = 0.0001
        mincglen = 0.0
        cgphase = ''
@@ -124,6 +125,7 @@ class SystemValues:
        epath = '/sys/kernel/debug/tracing/events/power/'
        pmdpath = '/sys/power/pm_debug_messages'
        s0ixpath = '/sys/module/intel_pmc_core/parameters/warn_on_s0ix_failures'
+       s0ixres = '/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us'
        acpipath='/sys/module/acpi/parameters/debug_level'
        traceevents = [
                'suspend_resume',
@@ -180,6 +182,7 @@ class SystemValues:
        tmstart = 'SUSPEND START %Y%m%d-%H:%M:%S.%f'
        tmend = 'RESUME COMPLETE %Y%m%d-%H:%M:%S.%f'
        tracefuncs = {
+               'async_synchronize_full': {},
                'sys_sync': {},
                'ksys_sync': {},
                '__pm_notifier_call_chain': {},
@@ -304,6 +307,7 @@ class SystemValues:
                [2, 'suspendstats', 'sh', '-c', 'grep -v invalid /sys/power/suspend_stats/*'],
                [2, 'cpuidle', 'sh', '-c', 'grep -v invalid /sys/devices/system/cpu/cpu*/cpuidle/state*/s2idle/*'],
                [2, 'battery', 'sh', '-c', 'grep -v invalid /sys/class/power_supply/*/*'],
+               [2, 'thermal', 'sh', '-c', 'grep . /sys/class/thermal/thermal_zone*/temp'],
        ]
        cgblacklist = []
        kprobes = dict()
@@ -777,7 +781,7 @@ class SystemValues:
                        return
                if not quiet:
                        sysvals.printSystemInfo(False)
-                       pprint('INITIALIZING FTRACE...')
+                       pprint('INITIALIZING FTRACE')
                # turn trace off
                self.fsetVal('0', 'tracing_on')
                self.cleanupFtrace()
@@ -841,7 +845,7 @@ class SystemValues:
                                for name in self.dev_tracefuncs:
                                        self.defaultKprobe(name, self.dev_tracefuncs[name])
                        if not quiet:
-                               pprint('INITIALIZING KPROBES...')
+                               pprint('INITIALIZING KPROBES')
                        self.addKprobes(self.verbose)
                if(self.usetraceevents):
                        # turn trace events on
@@ -1133,6 +1137,15 @@ class SystemValues:
                                self.cfgdef[file] = fp.read().strip()
                        fp.write(value)
                        fp.close()
+       def s0ixSupport(self):
+               if not os.path.exists(self.s0ixres) or not os.path.exists(self.mempowerfile):
+                       return False
+               fp = open(sysvals.mempowerfile, 'r')
+               data = fp.read().strip()
+               fp.close()
+               if '[s2idle]' in data:
+                       return True
+               return False
        def haveTurbostat(self):
                if not self.tstat:
                        return False
@@ -1146,7 +1159,7 @@ class SystemValues:
                        self.vprint(out)
                        return True
                return False
-       def turbostat(self):
+       def turbostat(self, s0ixready):
                cmd = self.getExec('turbostat')
                rawout = keyline = valline = ''
                fullcmd = '%s -q -S echo freeze > %s' % (cmd, self.powerfile)
@@ -1173,6 +1186,8 @@ class SystemValues:
                for key in keyline:
                        idx = keyline.index(key)
                        val = valline[idx]
+                       if key == 'SYS%LPI' and not s0ixready and re.match('^[0\.]*$', val):
+                               continue
                        out.append('%s=%s' % (key, val))
                return '|'.join(out)
        def netfixon(self, net='both'):
@@ -1183,14 +1198,6 @@ class SystemValues:
                out = ascii(fp.read()).strip()
                fp.close()
                return out
-       def wifiRepair(self):
-               out = self.netfixon('wifi')
-               if not out or 'error' in out.lower():
-                       return ''
-               m = re.match('WIFI \S* ONLINE (?P<action>\S*)', out)
-               if not m:
-                       return 'dead'
-               return m.group('action')
        def wifiDetails(self, dev):
                try:
                        info = open('/sys/class/net/%s/device/uevent' % dev, 'r').read().strip()
@@ -1220,11 +1227,6 @@ class SystemValues:
                                return '%s reconnected %.2f' % \
                                        (self.wifiDetails(dev), max(0, time.time() - start))
                        time.sleep(0.01)
-               if self.netfix:
-                       res = self.wifiRepair()
-                       if res:
-                               timeout = max(0, time.time() - start)
-                               return '%s %s %d' % (self.wifiDetails(dev), res, timeout)
                return '%s timeout %d' % (self.wifiDetails(dev), timeout)
        def errorSummary(self, errinfo, msg):
                found = False
@@ -1346,6 +1348,20 @@ class SystemValues:
                        for i in self.rslist:
                                self.setVal(self.rstgt, i)
                        pprint('runtime suspend settings restored on %d devices' % len(self.rslist))
+       def start(self, pm):
+               if self.useftrace:
+                       self.dlog('start ftrace tracing')
+                       self.fsetVal('1', 'tracing_on')
+                       if self.useprocmon:
+                               self.dlog('start the process monitor')
+                               pm.start()
+       def stop(self, pm):
+               if self.useftrace:
+                       if self.useprocmon:
+                               self.dlog('stop the process monitor')
+                               pm.stop()
+                       self.dlog('stop ftrace tracing')
+                       self.fsetVal('0', 'tracing_on')
 
 sysvals = SystemValues()
 switchvalues = ['enable', 'disable', 'on', 'off', 'true', 'false', '1', '0']
@@ -1643,19 +1659,20 @@ class Data:
                ubiquitous = False
                if kprobename in dtf and 'ub' in dtf[kprobename]:
                        ubiquitous = True
-               title = cdata+' '+rdata
-               mstr = '\(.*\) *(?P<args>.*) *\((?P<caller>.*)\+.* arg1=(?P<ret>.*)'
-               m = re.match(mstr, title)
-               if m:
-                       c = m.group('caller')
-                       a = m.group('args').strip()
-                       r = m.group('ret')
+               mc = re.match('\(.*\) *(?P<args>.*)', cdata)
+               mr = re.match('\((?P<caller>\S*).* arg1=(?P<ret>.*)', rdata)
+               if mc and mr:
+                       c = mr.group('caller').split('+')[0]
+                       a = mc.group('args').strip()
+                       r = mr.group('ret')
                        if len(r) > 6:
                                r = ''
                        else:
                                r = 'ret=%s ' % r
                        if ubiquitous and c in dtf and 'ub' in dtf[c]:
                                return False
+               else:
+                       return False
                color = sysvals.kprobeColor(kprobename)
                e = DevFunction(displayname, a, c, r, start, end, ubiquitous, proc, pid, color)
                tgtdev['src'].append(e)
@@ -1772,6 +1789,14 @@ class Data:
                                                e.time = self.trimTimeVal(e.time, t0, dT, left)
                                                e.end = self.trimTimeVal(e.end, t0, dT, left)
                                                e.length = e.end - e.time
+                               if('cpuexec' in d):
+                                       cpuexec = dict()
+                                       for e in d['cpuexec']:
+                                               c0, cN = e
+                                               c0 = self.trimTimeVal(c0, t0, dT, left)
+                                               cN = self.trimTimeVal(cN, t0, dT, left)
+                                               cpuexec[(c0, cN)] = d['cpuexec'][e]
+                                       d['cpuexec'] = cpuexec
                for dir in ['suspend', 'resume']:
                        list = []
                        for e in self.errorinfo[dir]:
@@ -2086,75 +2111,43 @@ class Data:
                return d
        def addProcessUsageEvent(self, name, times):
                # get the start and end times for this process
-               maxC = 0
-               tlast = 0
-               start = -1
-               end = -1
+               cpuexec = dict()
+               tlast = start = end = -1
                for t in sorted(times):
-                       if tlast == 0:
+                       if tlast < 0:
                                tlast = t
                                continue
-                       if name in self.pstl[t]:
-                               if start == -1 or tlast < start:
+                       if name in self.pstl[t] and self.pstl[t][name] > 0:
+                               if start < 0:
                                        start = tlast
-                               if end == -1 or t > end:
-                                       end = t
+                               end, key = t, (tlast, t)
+                               maxj = (t - tlast) * 1024.0
+                               cpuexec[key] = min(1.0, float(self.pstl[t][name]) / maxj)
                        tlast = t
-               if start == -1 or end == -1:
-                       return 0
+               if start < 0 or end < 0:
+                       return
                # add a new action for this process and get the object
                out = self.newActionGlobal(name, start, end, -3)
-               if not out:
-                       return 0
-               phase, devname = out
-               dev = self.dmesg[phase]['list'][devname]
-               # get the cpu exec data
-               tlast = 0
-               clast = 0
-               cpuexec = dict()
-               for t in sorted(times):
-                       if tlast == 0 or t <= start or t > end:
-                               tlast = t
-                               continue
-                       list = self.pstl[t]
-                       c = 0
-                       if name in list:
-                               c = list[name]
-                       if c > maxC:
-                               maxC = c
-                       if c != clast:
-                               key = (tlast, t)
-                               cpuexec[key] = c
-                               tlast = t
-                               clast = c
-               dev['cpuexec'] = cpuexec
-               return maxC
+               if out:
+                       phase, devname = out
+                       dev = self.dmesg[phase]['list'][devname]
+                       dev['cpuexec'] = cpuexec
        def createProcessUsageEvents(self):
-               # get an array of process names
-               proclist = []
-               for t in sorted(self.pstl):
-                       pslist = self.pstl[t]
-                       for ps in sorted(pslist):
-                               if ps not in proclist:
-                                       proclist.append(ps)
-               # get a list of data points for suspend and resume
-               tsus = []
-               tres = []
+               # get an array of process names and times
+               proclist = {'sus': dict(), 'res': dict()}
+               tdata = {'sus': [], 'res': []}
                for t in sorted(self.pstl):
-                       if t < self.tSuspended:
-                               tsus.append(t)
-                       else:
-                               tres.append(t)
+                       dir = 'sus' if t < self.tSuspended else 'res'
+                       for ps in sorted(self.pstl[t]):
+                               if ps not in proclist[dir]:
+                                       proclist[dir][ps] = 0
+                       tdata[dir].append(t)
                # process the events for suspend and resume
-               if len(proclist) > 0:
+               if len(proclist['sus']) > 0 or len(proclist['res']) > 0:
                        sysvals.vprint('Process Execution:')
-               for ps in proclist:
-                       c = self.addProcessUsageEvent(ps, tsus)
-                       if c > 0:
-                               sysvals.vprint('%25s (sus): %d' % (ps, c))
-                       c = self.addProcessUsageEvent(ps, tres)
-                       if c > 0:
-                               sysvals.vprint('%25s (res): %d' % (ps, c))
+               for dir in ['sus', 'res']:
+                       for ps in sorted(proclist[dir]):
+                               self.addProcessUsageEvent(ps, tdata[dir])
        def handleEndMarker(self, time, msg=''):
                dm = self.dmesg
                self.setEnd(time, msg)
@@ -3218,7 +3211,7 @@ class ProcessMonitor:
 #       markers, and/or kprobes required for primary parsing.
 def doesTraceLogHaveTraceEvents():
        kpcheck = ['_cal: (', '_ret: (']
-       techeck = ['suspend_resume', 'device_pm_callback']
+       techeck = ['suspend_resume', 'device_pm_callback', 'tracing_mark_write']
        tmcheck = ['SUSPEND START', 'RESUME COMPLETE']
        sysvals.usekprobes = False
        fp = sysvals.openlog(sysvals.ftracefile, 'r')
@@ -3241,7 +3234,7 @@ def doesTraceLogHaveTraceEvents():
                                check.remove(i)
                tmcheck = check
        fp.close()
-       sysvals.usetraceevents = True if len(techeck) < 2 else False
+       sysvals.usetraceevents = True if len(techeck) < 3 else False
        sysvals.usetracemarkers = True if len(tmcheck) == 0 else False
 
 # Function: appendIncompleteTraceLog
@@ -3456,6 +3449,8 @@ def parseTraceLog(live=False):
                        continue
                # process cpu exec line
                if t.type == 'tracing_mark_write':
+                       if t.name == 'CMD COMPLETE' and data.tKernRes == 0:
+                               data.tKernRes = t.time
                        m = re.match(tp.procexecfmt, t.name)
                        if(m):
                                parts, msg = 1, m.group('ps')
@@ -3674,6 +3669,9 @@ def parseTraceLog(live=False):
                                e = next((x for x in reversed(tp.ktemp[key]) if x['end'] < 0), 0)
                                if not e:
                                        continue
+                               if (t.time - e['begin']) * 1000 < sysvals.mindevlen:
+                                       tp.ktemp[key].pop()
+                                       continue
                                e['end'] = t.time
                                e['rdata'] = kprobedata
                                # end of kernel resume
@@ -4213,6 +4211,8 @@ def callgraphHTML(sv, hf, num, cg, title, color, devid):
                        fmt = '<n>(%.3f ms @ '+sv.timeformat+')</n>'
                        flen = fmt % (line.length*1000, line.time)
                if line.isLeaf():
+                       if line.length * 1000 < sv.mincglen:
+                               continue
                        hf.write(html_func_leaf.format(line.name, flen))
                elif line.freturn:
                        hf.write(html_func_end)
@@ -4827,14 +4827,11 @@ def createHTML(testruns, testfail):
                                        if('cpuexec' in dev):
                                                for t in sorted(dev['cpuexec']):
                                                        start, end = t
-                                                       j = float(dev['cpuexec'][t]) / 5
-                                                       if j > 1.0:
-                                                               j = 1.0
                                                        height = '%.3f' % (rowheight/3)
                                                        top = '%.3f' % (rowtop + devtl.scaleH + 2*rowheight/3)
                                                        left = '%f' % (((start-m0)*100)/mTotal)
                                                        width = '%f' % ((end-start)*100/mTotal)
-                                                       color = 'rgba(255, 0, 0, %f)' % j
+                                                       color = 'rgba(255, 0, 0, %f)' % dev['cpuexec'][t]
                                                        devtl.html += \
                                                                html_cpuexec.format(left, top, height, width, color)
                                        if('src' not in dev):
@@ -5453,17 +5450,9 @@ def executeSuspend(quiet=False):
                call('sync', shell=True)
        sv.dlog('read dmesg')
        sv.initdmesg()
-       # start ftrace
-       if sv.useftrace:
-               if not quiet:
-                       pprint('START TRACING')
-               sv.dlog('start ftrace tracing')
-               sv.fsetVal('1', 'tracing_on')
-               if sv.useprocmon:
-                       sv.dlog('start the process monitor')
-                       pm.start()
-       sv.dlog('run the cmdinfo list before')
+       sv.dlog('cmdinfo before')
        sv.cmdinfo(True)
+       sv.start(pm)
        # execute however many s/r runs requested
        for count in range(1,sv.execcount+1):
                # x2delay in between test runs
@@ -5500,6 +5489,7 @@ def executeSuspend(quiet=False):
                        if res != 0:
                                tdata['error'] = 'cmd returned %d' % res
                else:
+                       s0ixready = sv.s0ixSupport()
                        mode = sv.suspendmode
                        if sv.memmode and os.path.exists(sv.mempowerfile):
                                mode = 'mem'
@@ -5509,9 +5499,10 @@ def executeSuspend(quiet=False):
                                sv.testVal(sv.diskpowerfile, 'radio', sv.diskmode)
                        if sv.acpidebug:
                                sv.testVal(sv.acpipath, 'acpi', '0xe')
-                       if mode == 'freeze' and sv.haveTurbostat():
+                       if ((mode == 'freeze') or (sv.memmode == 's2idle')) \
+                               and sv.haveTurbostat():
                                # execution will pause here
-                               turbo = sv.turbostat()
+                               turbo = sv.turbostat(s0ixready)
                                if turbo:
                                        tdata['turbo'] = turbo
                        else:
@@ -5522,7 +5513,8 @@ def executeSuspend(quiet=False):
                                        pf.close()
                                except Exception as e:
                                        tdata['error'] = str(e)
-               sv.dlog('system returned from resume')
+               sv.fsetVal('CMD COMPLETE', 'trace_marker')
+               sv.dlog('system returned')
                # reset everything
                sv.testVal('restoreall')
                if(sv.rtcwake):
@@ -5535,33 +5527,29 @@ def executeSuspend(quiet=False):
                        sv.fsetVal('WAIT END', 'trace_marker')
                # return from suspend
                pprint('RESUME COMPLETE')
-               sv.fsetVal(datetime.now().strftime(sv.tmend), 'trace_marker')
+               if(count < sv.execcount):
+                       sv.fsetVal(datetime.now().strftime(sv.tmend), 'trace_marker')
+               elif(not sv.wifitrace):
+                       sv.fsetVal(datetime.now().strftime(sv.tmend), 'trace_marker')
+                       sv.stop(pm)
                if sv.wifi and wifi:
                        tdata['wifi'] = sv.pollWifi(wifi)
                        sv.dlog('wifi check, %s' % tdata['wifi'])
-                       if sv.netfix:
-                               netfixout = sv.netfixon('wired')
-               elif sv.netfix:
-                       netfixout = sv.netfixon()
-               if sv.netfix and netfixout:
-                       tdata['netfix'] = netfixout
+               if(count == sv.execcount and sv.wifitrace):
+                       sv.fsetVal(datetime.now().strftime(sv.tmend), 'trace_marker')
+                       sv.stop(pm)
+               if sv.netfix:
+                       tdata['netfix'] = sv.netfixon()
                        sv.dlog('netfix, %s' % tdata['netfix'])
                if(sv.suspendmode == 'mem' or sv.suspendmode == 'command'):
                        sv.dlog('read the ACPI FPDT')
                        tdata['fw'] = getFPDT(False)
                testdata.append(tdata)
-       sv.dlog('run the cmdinfo list after')
+       sv.dlog('cmdinfo after')
        cmdafter = sv.cmdinfo(False)
-       # stop ftrace
-       if sv.useftrace:
-               if sv.useprocmon:
-                       sv.dlog('stop the process monitor')
-                       pm.stop()
-               sv.fsetVal('0', 'tracing_on')
        # grab a copy of the dmesg output
        if not quiet:
                pprint('CAPTURING DMESG')
-       sysvals.dlog('EXECUTION TRACE END')
        sv.getdmesg(testdata)
        # grab a copy of the ftrace output
        if sv.useftrace:
@@ -6350,6 +6338,8 @@ def data_from_html(file, outpath, issues, fulldetail=False):
                if not m:
                        continue
                name, time, phase = m.group('n'), m.group('t'), m.group('p')
+               if name == 'async_synchronize_full':
+                       continue
                if ' async' in name or ' sync' in name:
                        name = ' '.join(name.split(' ')[:-1])
                if phase.startswith('suspend'):
@@ -6701,6 +6691,7 @@ def printHelp():
        '   -skiphtml    Run the test and capture the trace logs, but skip the timeline (default: disabled)\n'\
        '   -result fn   Export a results table to a text file for parsing.\n'\
        '   -wifi        If a wifi connection is available, check that it reconnects after resume.\n'\
+       '   -wifitrace   Trace kernel execution through wifi reconnect.\n'\
        '   -netfix      Use netfix to reset the network in the event it fails to resume.\n'\
        '  [testprep]\n'\
        '   -sync        Sync the filesystems before starting the test\n'\
@@ -6828,6 +6819,8 @@ if __name__ == '__main__':
                        sysvals.sync = True
                elif(arg == '-wifi'):
                        sysvals.wifi = True
+               elif(arg == '-wifitrace'):
+                       sysvals.wifitrace = True
                elif(arg == '-netfix'):
                        sysvals.netfix = True
                elif(arg == '-gzip'):
index 0464b2c..f07aef7 100644 (file)
@@ -49,6 +49,7 @@ TARGETS += net
 TARGETS += net/af_unix
 TARGETS += net/forwarding
 TARGETS += net/mptcp
+TARGETS += net/openvswitch
 TARGETS += netfilter
 TARGETS += nsfs
 TARGETS += pidfd
index 127b8ca..24dd621 100644 (file)
@@ -3936,6 +3936,19 @@ static struct btf_raw_test raw_tests[] = {
        .err_str = "Invalid type_id",
 },
 {
+       .descr = "decl_tag test #16, func proto, return type",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),                          /* [1] */
+               BTF_VAR_ENC(NAME_TBD, 1, 0),                                            /* [2] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), 2), (-1), /* [3] */
+               BTF_FUNC_PROTO_ENC(3, 0),                                               /* [4] */
+               BTF_END_RAW,
+       },
+       BTF_STR_SEC("\0local\0tag1"),
+       .btf_load_err = true,
+       .err_str = "Invalid return type",
+},
+{
        .descr = "type_tag test #1",
        .raw_types = {
                BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
index 099c23d..b39093d 100644 (file)
@@ -47,14 +47,14 @@ record_sample(struct bpf_dynptr *dynptr, void *context)
                if (status) {
                        bpf_printk("bpf_dynptr_read() failed: %d\n", status);
                        err = 1;
-                       return 0;
+                       return 1;
                }
        } else {
                sample = bpf_dynptr_data(dynptr, 0, sizeof(*sample));
                if (!sample) {
                        bpf_printk("Unexpectedly failed to get sample\n");
                        err = 2;
-                       return 0;
+                       return 1;
                }
                stack_sample = *sample;
        }
index e9dab5f..6b8d2e2 100644 (file)
@@ -7,6 +7,8 @@ TEST_PROGS := \
        bond-lladdr-target.sh \
        dev_addr_lists.sh
 
-TEST_FILES := lag_lib.sh
+TEST_FILES := \
+       lag_lib.sh \
+       net_forwarding_lib.sh
 
 include ../../../lib.mk
index e6fa24e..5cfe7d8 100755 (executable)
@@ -14,7 +14,7 @@ ALL_TESTS="
 REQUIRE_MZ=no
 NUM_NETIFS=0
 lib_dir=$(dirname "$0")
-source "$lib_dir"/../../../net/forwarding/lib.sh
+source "$lib_dir"/net_forwarding_lib.sh
 
 source "$lib_dir"/lag_lib.sh
 
diff --git a/tools/testing/selftests/drivers/net/bonding/net_forwarding_lib.sh b/tools/testing/selftests/drivers/net/bonding/net_forwarding_lib.sh
new file mode 120000 (symlink)
index 0000000..39c9682
--- /dev/null
@@ -0,0 +1 @@
+../../../net/forwarding/lib.sh
\ No newline at end of file
index dca8be6..a1f269e 100755 (executable)
@@ -18,8 +18,8 @@ NUM_NETIFS=1
 REQUIRE_JQ="no"
 REQUIRE_MZ="no"
 NETIF_CREATE="no"
-lib_dir=$(dirname $0)/../../../net/forwarding
-source $lib_dir/lib.sh
+lib_dir=$(dirname "$0")
+source "$lib_dir"/lib.sh
 
 cleanup() {
        echo "Cleaning up"
index 642d8df..6a86e61 100644 (file)
@@ -3,4 +3,8 @@
 
 TEST_PROGS := dev_addr_lists.sh
 
+TEST_FILES := \
+       lag_lib.sh \
+       net_forwarding_lib.sh
+
 include ../../../lib.mk
index debda72..3391311 100755 (executable)
@@ -11,14 +11,14 @@ ALL_TESTS="
 REQUIRE_MZ=no
 NUM_NETIFS=0
 lib_dir=$(dirname "$0")
-source "$lib_dir"/../../../net/forwarding/lib.sh
+source "$lib_dir"/net_forwarding_lib.sh
 
-source "$lib_dir"/../bonding/lag_lib.sh
+source "$lib_dir"/lag_lib.sh
 
 
 destroy()
 {
-       local ifnames=(dummy0 dummy1 team0 mv0)
+       local ifnames=(dummy1 dummy2 team0 mv0)
        local ifname
 
        for ifname in "${ifnames[@]}"; do
diff --git a/tools/testing/selftests/drivers/net/team/lag_lib.sh b/tools/testing/selftests/drivers/net/team/lag_lib.sh
new file mode 120000 (symlink)
index 0000000..e1347a1
--- /dev/null
@@ -0,0 +1 @@
+../bonding/lag_lib.sh
\ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/team/net_forwarding_lib.sh b/tools/testing/selftests/drivers/net/team/net_forwarding_lib.sh
new file mode 120000 (symlink)
index 0000000..39c9682
--- /dev/null
@@ -0,0 +1 @@
+../../../net/forwarding/lib.sh
\ No newline at end of file
index db52257..d3a79da 100644 (file)
@@ -1,7 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Generic dynamic event - check if duplicate events are caught
-# requires: dynamic_events "e[:[<group>/]<event>] <attached-group>.<attached-event> [<args>]":README
+# requires: dynamic_events "e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]":README
 
 echo 0 > events/enable
 
index 914fe2e..6461c37 100644 (file)
@@ -1,7 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event trigger - test inter-event histogram trigger eprobe on synthetic event
-# requires: dynamic_events synthetic_events events/syscalls/sys_enter_openat/hist "e[:[<group>/]<event>] <attached-group>.<attached-event> [<args>]":README
+# requires: dynamic_events synthetic_events events/syscalls/sys_enter_openat/hist "e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]":README
 
 echo 0 > events/enable
 
index 7321490..5a0e0df 100644 (file)
@@ -3,11 +3,11 @@ INCLUDES := -I../include -I../../ -I../../../../../usr/include/
 CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES) $(KHDR_INCLUDES)
 LDLIBS := -lpthread -lrt
 
-HEADERS := \
+LOCAL_HDRS := \
        ../include/futextest.h \
        ../include/atomic.h \
        ../include/logging.h
-TEST_GEN_FILES := \
+TEST_GEN_PROGS := \
        futex_wait_timeout \
        futex_wait_wouldblock \
        futex_requeue_pi \
@@ -24,5 +24,3 @@ TEST_PROGS := run.sh
 top_srcdir = ../../../../..
 DEFAULT_INSTALL_HDR_PATH := 1
 include ../../lib.mk
-
-$(TEST_GEN_FILES): $(HEADERS)
index 39f0fa2..05d66ef 100644 (file)
@@ -2,10 +2,10 @@
 CFLAGS := $(CFLAGS) -Wall -D_GNU_SOURCE
 LDLIBS += -lm
 
-uname_M := $(shell uname -m 2>/dev/null || echo not)
-ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
+ARCH ?= $(shell uname -m 2>/dev/null || echo not)
+ARCH_PROCESSED := $(shell echo $(ARCH) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
 
-ifeq (x86,$(ARCH))
+ifeq (x86,$(ARCH_PROCESSED))
 TEST_GEN_FILES := msr aperf
 endif
 
index 806a150..67fe7a4 100644 (file)
@@ -1,10 +1,10 @@
 # SPDX-License-Identifier: GPL-2.0-only
 # Makefile for kexec tests
 
-uname_M := $(shell uname -m 2>/dev/null || echo not)
-ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
+ARCH ?= $(shell uname -m 2>/dev/null || echo not)
+ARCH_PROCESSED := $(shell echo $(ARCH) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
 
-ifeq ($(ARCH),$(filter $(ARCH),x86 ppc64le))
+ifeq ($(ARCH_PROCESSED),$(filter $(ARCH_PROCESSED),x86 ppc64le))
 TEST_PROGS := test_kexec_load.sh test_kexec_file_load.sh
 TEST_FILES := kexec_common_lib.sh
 
index e05ecb3..9c131d9 100644 (file)
@@ -662,8 +662,8 @@ int test_kvm_device(uint32_t gic_dev_type)
                                             : KVM_DEV_TYPE_ARM_VGIC_V2;
 
        if (!__kvm_test_create_device(v.vm, other)) {
-               ret = __kvm_test_create_device(v.vm, other);
-               TEST_ASSERT(ret && (errno == EINVAL || errno == EEXIST),
+               ret = __kvm_create_device(v.vm, other);
+               TEST_ASSERT(ret < 0 && (errno == EINVAL || errno == EEXIST),
                                "create GIC device while other version exists");
        }
 
index 6ee7e1d..bb1d17a 100644 (file)
@@ -67,7 +67,7 @@ struct memslot_antagonist_args {
 static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay,
                               uint64_t nr_modifications)
 {
-       const uint64_t pages = 1;
+       uint64_t pages = max_t(int, vm->page_size, getpagesize()) / vm->page_size;
        uint64_t gpa;
        int i;
 
index 8a5cb80..2a57271 100644 (file)
 #include <time.h>
 #include <sched.h>
 #include <signal.h>
+#include <pthread.h>
 
 #include <sys/eventfd.h>
 
+/* Defined in include/linux/kvm_types.h */
+#define GPA_INVALID            (~(ulong)0)
+
 #define SHINFO_REGION_GVA      0xc0000000ULL
 #define SHINFO_REGION_GPA      0xc0000000ULL
 #define SHINFO_REGION_SLOT     10
@@ -44,6 +48,8 @@
 
 #define MIN_STEAL_TIME         50000
 
+#define SHINFO_RACE_TIMEOUT    2       /* seconds */
+
 #define __HYPERVISOR_set_timer_op      15
 #define __HYPERVISOR_sched_op          29
 #define __HYPERVISOR_event_channel_op  32
@@ -126,7 +132,7 @@ struct {
        struct kvm_irq_routing_entry entries[2];
 } irq_routes;
 
-bool guest_saw_irq;
+static volatile bool guest_saw_irq;
 
 static void evtchn_handler(struct ex_regs *regs)
 {
@@ -148,6 +154,7 @@ static void guest_wait_for_irq(void)
 static void guest_code(void)
 {
        struct vcpu_runstate_info *rs = (void *)RUNSTATE_VADDR;
+       int i;
 
        __asm__ __volatile__(
                "sti\n"
@@ -325,6 +332,49 @@ static void guest_code(void)
        guest_wait_for_irq();
 
        GUEST_SYNC(21);
+       /* Racing host ioctls */
+
+       guest_wait_for_irq();
+
+       GUEST_SYNC(22);
+       /* Racing vmcall against host ioctl */
+
+       ports[0] = 0;
+
+       p = (struct sched_poll) {
+               .ports = ports,
+               .nr_ports = 1,
+               .timeout = 0
+       };
+
+wait_for_timer:
+       /*
+        * Poll for a timer wake event while the worker thread is mucking with
+        * the shared info.  KVM XEN drops timer IRQs if the shared info is
+        * invalid when the timer expires.  Arbitrarily poll 100 times before
+        * giving up and asking the VMM to re-arm the timer.  100 polls should
+        * consume enough time to beat on KVM without taking too long if the
+        * timer IRQ is dropped due to an invalid event channel.
+        */
+       for (i = 0; i < 100 && !guest_saw_irq; i++)
+               asm volatile("vmcall"
+                            : "=a" (rax)
+                            : "a" (__HYPERVISOR_sched_op),
+                              "D" (SCHEDOP_poll),
+                              "S" (&p)
+                            : "memory");
+
+       /*
+        * Re-send the timer IRQ if it was (likely) dropped due to the timer
+        * expiring while the event channel was invalid.
+        */
+       if (!guest_saw_irq) {
+               GUEST_SYNC(23);
+               goto wait_for_timer;
+       }
+       guest_saw_irq = false;
+
+       GUEST_SYNC(24);
 }
 
 static int cmp_timespec(struct timespec *a, struct timespec *b)
@@ -352,11 +402,36 @@ static void handle_alrm(int sig)
        TEST_FAIL("IRQ delivery timed out");
 }
 
+static void *juggle_shinfo_state(void *arg)
+{
+       struct kvm_vm *vm = (struct kvm_vm *)arg;
+
+       struct kvm_xen_hvm_attr cache_init = {
+               .type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
+               .u.shared_info.gfn = SHINFO_REGION_GPA / PAGE_SIZE
+       };
+
+       struct kvm_xen_hvm_attr cache_destroy = {
+               .type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
+               .u.shared_info.gfn = GPA_INVALID
+       };
+
+       for (;;) {
+               __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_init);
+               __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_destroy);
+               pthread_testcancel();
+       };
+
+       return NULL;
+}
+
 int main(int argc, char *argv[])
 {
        struct timespec min_ts, max_ts, vm_ts;
        struct kvm_vm *vm;
+       pthread_t thread;
        bool verbose;
+       int ret;
 
        verbose = argc > 1 && (!strncmp(argv[1], "-v", 3) ||
                               !strncmp(argv[1], "--verbose", 10));
@@ -785,6 +860,71 @@ int main(int argc, char *argv[])
                        case 21:
                                TEST_ASSERT(!evtchn_irq_expected,
                                            "Expected event channel IRQ but it didn't happen");
+                               alarm(0);
+
+                               if (verbose)
+                                       printf("Testing shinfo lock corruption (KVM_XEN_HVM_EVTCHN_SEND)\n");
+
+                               ret = pthread_create(&thread, NULL, &juggle_shinfo_state, (void *)vm);
+                               TEST_ASSERT(ret == 0, "pthread_create() failed: %s", strerror(ret));
+
+                               struct kvm_irq_routing_xen_evtchn uxe = {
+                                       .port = 1,
+                                       .vcpu = vcpu->id,
+                                       .priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL
+                               };
+
+                               evtchn_irq_expected = true;
+                               for (time_t t = time(NULL) + SHINFO_RACE_TIMEOUT; time(NULL) < t;)
+                                       __vm_ioctl(vm, KVM_XEN_HVM_EVTCHN_SEND, &uxe);
+                               break;
+
+                       case 22:
+                               TEST_ASSERT(!evtchn_irq_expected,
+                                           "Expected event channel IRQ but it didn't happen");
+
+                               if (verbose)
+                                       printf("Testing shinfo lock corruption (SCHEDOP_poll)\n");
+
+                               shinfo->evtchn_pending[0] = 1;
+
+                               evtchn_irq_expected = true;
+                               tmr.u.timer.expires_ns = rs->state_entry_time +
+                                                        SHINFO_RACE_TIMEOUT * 1000000000ULL;
+                               vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
+                               break;
+
+                       case 23:
+                               /*
+                                * Optional and possibly repeated sync point.
+                                * Injecting the timer IRQ may fail if the
+                                * shinfo is invalid when the timer expires.
+                                * If the timer has expired but the IRQ hasn't
+                                * been delivered, rearm the timer and retry.
+                                */
+                               vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
+
+                               /* Resume the guest if the timer is still pending. */
+                               if (tmr.u.timer.expires_ns)
+                                       break;
+
+                               /* All done if the IRQ was delivered. */
+                               if (!evtchn_irq_expected)
+                                       break;
+
+                               tmr.u.timer.expires_ns = rs->state_entry_time +
+                                                        SHINFO_RACE_TIMEOUT * 1000000000ULL;
+                               vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
+                               break;
+                       case 24:
+                               TEST_ASSERT(!evtchn_irq_expected,
+                                           "Expected event channel IRQ but it didn't happen");
+
+                               ret = pthread_cancel(thread);
+                               TEST_ASSERT(ret == 0, "pthread_cancel() failed: %s", strerror(ret));
+
+                               ret = pthread_join(thread, 0);
+                               TEST_ASSERT(ret == 0, "pthread_join() failed: %s", strerror(ret));
                                goto done;
 
                        case 0x20:
index 9d4cb94..a3ea3d4 100644 (file)
@@ -70,7 +70,7 @@ endef
 run_tests: all
 ifdef building_out_of_srctree
        @if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \
-               rsync -aq $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
+               rsync -aLq $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
        fi
        @if [ "X$(TEST_PROGS)" != "X" ]; then \
                $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) \
@@ -84,7 +84,7 @@ endif
 
 define INSTALL_SINGLE_RULE
        $(if $(INSTALL_LIST),@mkdir -p $(INSTALL_PATH))
-       $(if $(INSTALL_LIST),rsync -a $(INSTALL_LIST) $(INSTALL_PATH)/)
+       $(if $(INSTALL_LIST),rsync -aL $(INSTALL_LIST) $(INSTALL_PATH)/)
 endef
 
 define INSTALL_RULE
index 74ee506..611be86 100755 (executable)
@@ -138,7 +138,6 @@ online_all_offline_memory()
 {
        for memory in `hotpluggable_offline_memory`; do
                if ! online_memory_expect_success $memory; then
-                       echo "$FUNCNAME $memory: unexpected fail" >&2
                        retval=1
                fi
        done
index 2a6b0bc..69c5836 100644 (file)
@@ -70,6 +70,7 @@ TEST_PROGS += io_uring_zerocopy_tx.sh
 TEST_GEN_FILES += bind_bhash
 TEST_GEN_PROGS += sk_bind_sendto_listen
 TEST_GEN_PROGS += sk_connect_zero_addr
+TEST_PROGS += test_ingress_egress_chaining.sh
 
 TEST_FILES := settings
 
diff --git a/tools/testing/selftests/net/openvswitch/Makefile b/tools/testing/selftests/net/openvswitch/Makefile
new file mode 100644 (file)
index 0000000..2f1508a
--- /dev/null
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+
+top_srcdir = ../../../../..
+
+CFLAGS =  -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include $(KHDR_INCLUDES)
+
+TEST_PROGS := openvswitch.sh
+
+TEST_FILES := ovs-dpctl.py
+
+EXTRA_CLEAN := test_netlink_checks
+
+include ../../lib.mk
diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh
new file mode 100755 (executable)
index 0000000..7ce4670
--- /dev/null
@@ -0,0 +1,218 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# OVS kernel module self tests
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+PAUSE_ON_FAIL=no
+VERBOSE=0
+TRACING=0
+
+tests="
+       netlink_checks                          ovsnl: validate netlink attrs and settings"
+
+info() {
+    [ $VERBOSE = 0 ] || echo $*
+}
+
+ovs_base=`pwd`
+sbxs=
+sbx_add () {
+       info "adding sandbox '$1'"
+
+       sbxs="$sbxs $1"
+
+       NO_BIN=0
+
+       # Create sandbox.
+       local d="$ovs_base"/$1
+       if [ -e $d ]; then
+               info "removing $d"
+               rm -rf "$d"
+       fi
+       mkdir "$d" || return 1
+       ovs_setenv $1
+}
+
+ovs_exit_sig() {
+       [ -e ${ovs_dir}/cleanup ] && . "$ovs_dir/cleanup"
+}
+
+on_exit() {
+       echo "$1" > ${ovs_dir}/cleanup.tmp
+       cat ${ovs_dir}/cleanup >> ${ovs_dir}/cleanup.tmp
+       mv ${ovs_dir}/cleanup.tmp ${ovs_dir}/cleanup
+}
+
+ovs_setenv() {
+       sandbox=$1
+
+       ovs_dir=$ovs_base${1:+/$1}; export ovs_dir
+
+       test -e ${ovs_dir}/cleanup || : > ${ovs_dir}/cleanup
+}
+
+ovs_sbx() {
+       if test "X$2" != X; then
+               (ovs_setenv $1; shift; "$@" >> ${ovs_dir}/debug.log)
+       else
+               ovs_setenv $1
+       fi
+}
+
+ovs_add_dp () {
+       info "Adding DP/Bridge IF: sbx:$1 dp:$2 {$3, $4, $5}"
+       sbxname="$1"
+       shift
+       ovs_sbx "$sbxname" python3 $ovs_base/ovs-dpctl.py add-dp $*
+       on_exit "ovs_sbx $sbxname python3 $ovs_base/ovs-dpctl.py del-dp $1;"
+}
+
+usage() {
+       echo
+       echo "$0 [OPTIONS] [TEST]..."
+       echo "If no TEST argument is given, all tests will be run."
+       echo
+       echo "Options"
+       echo "  -t: capture traffic via tcpdump"
+       echo "  -v: verbose"
+       echo "  -p: pause on failure"
+       echo
+       echo "Available tests${tests}"
+       exit 1
+}
+
+# netlink_validation
+# - Create a dp
+# - check no warning with "old version" simulation
+test_netlink_checks () {
+       sbx_add "test_netlink_checks" || return 1
+
+       info "setting up new DP"
+       ovs_add_dp "test_netlink_checks" nv0 || return 1
+       # now try again
+       PRE_TEST=$(dmesg | grep -E "RIP: [0-9a-fA-Fx]+:ovs_dp_cmd_new\+")
+       ovs_add_dp "test_netlink_checks" nv0 -V 0 || return 1
+       POST_TEST=$(dmesg | grep -E "RIP: [0-9a-fA-Fx]+:ovs_dp_cmd_new\+")
+       if [ "$PRE_TEST" != "$POST_TEST" ]; then
+               info "failed - gen warning"
+               return 1
+       fi
+
+       return 0
+}
+
+run_test() {
+       (
+       tname="$1"
+       tdesc="$2"
+
+       if ! lsmod | grep openvswitch >/dev/null 2>&1; then
+               stdbuf -o0 printf "TEST: %-60s  [NOMOD]\n" "${tdesc}"
+               return $ksft_skip
+       fi
+
+       if python3 ovs-dpctl.py -h 2>&1 | \
+            grep "Need to install the python" >/dev/null 2>&1; then
+               stdbuf -o0 printf "TEST: %-60s  [PYLIB]\n" "${tdesc}"
+               return $ksft_skip
+       fi
+       printf "TEST: %-60s  [START]\n" "${tname}"
+
+       unset IFS
+
+       eval test_${tname}
+       ret=$?
+
+       if [ $ret -eq 0 ]; then
+               printf "TEST: %-60s  [ OK ]\n" "${tdesc}"
+               ovs_exit_sig
+               rm -rf "$ovs_dir"
+       elif [ $ret -eq 1 ]; then
+               printf "TEST: %-60s  [FAIL]\n" "${tdesc}"
+               if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+                       echo
+                       echo "Pausing. Logs in $ovs_dir/. Hit enter to continue"
+                       read a
+               fi
+               ovs_exit_sig
+               [ "${PAUSE_ON_FAIL}" = "yes" ] || rm -rf "$ovs_dir"
+               exit 1
+       elif [ $ret -eq $ksft_skip ]; then
+               printf "TEST: %-60s  [SKIP]\n" "${tdesc}"
+       elif [ $ret -eq 2 ]; then
+               rm -rf test_${tname}
+               run_test "$1" "$2"
+       fi
+
+       return $ret
+       )
+       ret=$?
+       case $ret in
+               0)
+                       [ $all_skipped = true ] && [ $exitcode=$ksft_skip ] && exitcode=0
+                       all_skipped=false
+               ;;
+               $ksft_skip)
+                       [ $all_skipped = true ] && exitcode=$ksft_skip
+               ;;
+               *)
+                       all_skipped=false
+                       exitcode=1
+               ;;
+       esac
+
+       return $ret
+}
+
+
+exitcode=0
+desc=0
+all_skipped=true
+
+while getopts :pvt o
+do
+       case $o in
+       p) PAUSE_ON_FAIL=yes;;
+       v) VERBOSE=1;;
+       t) if which tcpdump > /dev/null 2>&1; then
+               TRACING=1
+          else
+               echo "=== tcpdump not available, tracing disabled"
+          fi
+          ;;
+       *) usage;;
+       esac
+done
+shift $(($OPTIND-1))
+
+IFS="  
+"
+
+for arg do
+       # Check first that all requested tests are available before running any
+       command -v > /dev/null "test_${arg}" || { echo "=== Test ${arg} not found"; usage; }
+done
+
+name=""
+desc=""
+for t in ${tests}; do
+       [ "${name}" = "" ]      && name="${t}"  && continue
+       [ "${desc}" = "" ]      && desc="${t}"
+
+       run_this=1
+       for arg do
+               [ "${arg}" != "${arg#--*}" ] && continue
+               [ "${arg}" = "${name}" ] && run_this=1 && break
+               run_this=0
+       done
+       if [ $run_this -eq 1 ]; then
+               run_test "${name}" "${desc}"
+       fi
+       name=""
+       desc=""
+done
+
+exit ${exitcode}
diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
new file mode 100644 (file)
index 0000000..3243c90
--- /dev/null
@@ -0,0 +1,351 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+# Controls the openvswitch module.  Part of the kselftest suite, but
+# can be used for some diagnostic purpose as well.
+
+import argparse
+import errno
+import sys
+
+try:
+    from pyroute2 import NDB
+
+    from pyroute2.netlink import NLM_F_ACK
+    from pyroute2.netlink import NLM_F_REQUEST
+    from pyroute2.netlink import genlmsg
+    from pyroute2.netlink import nla
+    from pyroute2.netlink.exceptions import NetlinkError
+    from pyroute2.netlink.generic import GenericNetlinkSocket
+except ModuleNotFoundError:
+    print("Need to install the python pyroute2 package.")
+    sys.exit(0)
+
+
+OVS_DATAPATH_FAMILY = "ovs_datapath"
+OVS_VPORT_FAMILY = "ovs_vport"
+OVS_FLOW_FAMILY = "ovs_flow"
+OVS_PACKET_FAMILY = "ovs_packet"
+OVS_METER_FAMILY = "ovs_meter"
+OVS_CT_LIMIT_FAMILY = "ovs_ct_limit"
+
+OVS_DATAPATH_VERSION = 2
+OVS_DP_CMD_NEW = 1
+OVS_DP_CMD_DEL = 2
+OVS_DP_CMD_GET = 3
+OVS_DP_CMD_SET = 4
+
+OVS_VPORT_CMD_NEW = 1
+OVS_VPORT_CMD_DEL = 2
+OVS_VPORT_CMD_GET = 3
+OVS_VPORT_CMD_SET = 4
+
+
+class ovs_dp_msg(genlmsg):
+    # include the OVS version
+    # We need a custom header rather than just being able to rely on
+    # genlmsg because fields ends up not expressing everything correctly
+    # if we use the canonical example of setting fields = (('customfield',),)
+    fields = genlmsg.fields + (("dpifindex", "I"),)
+
+
+class OvsDatapath(GenericNetlinkSocket):
+
+    OVS_DP_F_VPORT_PIDS = 1 << 1
+    OVS_DP_F_DISPATCH_UPCALL_PER_CPU = 1 << 3
+
+    class dp_cmd_msg(ovs_dp_msg):
+        """
+        Message class that will be used to communicate with the kernel module.
+        """
+
+        nla_map = (
+            ("OVS_DP_ATTR_UNSPEC", "none"),
+            ("OVS_DP_ATTR_NAME", "asciiz"),
+            ("OVS_DP_ATTR_UPCALL_PID", "uint32"),
+            ("OVS_DP_ATTR_STATS", "dpstats"),
+            ("OVS_DP_ATTR_MEGAFLOW_STATS", "megaflowstats"),
+            ("OVS_DP_ATTR_USER_FEATURES", "uint32"),
+            ("OVS_DP_ATTR_PAD", "none"),
+            ("OVS_DP_ATTR_MASKS_CACHE_SIZE", "uint32"),
+            ("OVS_DP_ATTR_PER_CPU_PIDS", "array(uint32)"),
+        )
+
+        class dpstats(nla):
+            fields = (
+                ("hit", "=Q"),
+                ("missed", "=Q"),
+                ("lost", "=Q"),
+                ("flows", "=Q"),
+            )
+
+        class megaflowstats(nla):
+            fields = (
+                ("mask_hit", "=Q"),
+                ("masks", "=I"),
+                ("padding", "=I"),
+                ("cache_hits", "=Q"),
+                ("pad1", "=Q"),
+            )
+
+    def __init__(self):
+        GenericNetlinkSocket.__init__(self)
+        self.bind(OVS_DATAPATH_FAMILY, OvsDatapath.dp_cmd_msg)
+
+    def info(self, dpname, ifindex=0):
+        msg = OvsDatapath.dp_cmd_msg()
+        msg["cmd"] = OVS_DP_CMD_GET
+        msg["version"] = OVS_DATAPATH_VERSION
+        msg["reserved"] = 0
+        msg["dpifindex"] = ifindex
+        msg["attrs"].append(["OVS_DP_ATTR_NAME", dpname])
+
+        try:
+            reply = self.nlm_request(
+                msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST
+            )
+            reply = reply[0]
+        except NetlinkError as ne:
+            if ne.code == errno.ENODEV:
+                reply = None
+            else:
+                raise ne
+
+        return reply
+
+    def create(self, dpname, shouldUpcall=False, versionStr=None):
+        msg = OvsDatapath.dp_cmd_msg()
+        msg["cmd"] = OVS_DP_CMD_NEW
+        if versionStr is None:
+            msg["version"] = OVS_DATAPATH_VERSION
+        else:
+            msg["version"] = int(versionStr.split(":")[0], 0)
+        msg["reserved"] = 0
+        msg["dpifindex"] = 0
+        msg["attrs"].append(["OVS_DP_ATTR_NAME", dpname])
+
+        dpfeatures = 0
+        if versionStr is not None and versionStr.find(":") != -1:
+            dpfeatures = int(versionStr.split(":")[1], 0)
+        else:
+            dpfeatures = OvsDatapath.OVS_DP_F_VPORT_PIDS
+
+        msg["attrs"].append(["OVS_DP_ATTR_USER_FEATURES", dpfeatures])
+        if not shouldUpcall:
+            msg["attrs"].append(["OVS_DP_ATTR_UPCALL_PID", 0])
+
+        try:
+            reply = self.nlm_request(
+                msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK
+            )
+            reply = reply[0]
+        except NetlinkError as ne:
+            if ne.code == errno.EEXIST:
+                reply = None
+            else:
+                raise ne
+
+        return reply
+
+    def destroy(self, dpname):
+        msg = OvsDatapath.dp_cmd_msg()
+        msg["cmd"] = OVS_DP_CMD_DEL
+        msg["version"] = OVS_DATAPATH_VERSION
+        msg["reserved"] = 0
+        msg["dpifindex"] = 0
+        msg["attrs"].append(["OVS_DP_ATTR_NAME", dpname])
+
+        try:
+            reply = self.nlm_request(
+                msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK
+            )
+            reply = reply[0]
+        except NetlinkError as ne:
+            if ne.code == errno.ENODEV:
+                reply = None
+            else:
+                raise ne
+
+        return reply
+
+
+class OvsVport(GenericNetlinkSocket):
+    class ovs_vport_msg(ovs_dp_msg):
+        nla_map = (
+            ("OVS_VPORT_ATTR_UNSPEC", "none"),
+            ("OVS_VPORT_ATTR_PORT_NO", "uint32"),
+            ("OVS_VPORT_ATTR_TYPE", "uint32"),
+            ("OVS_VPORT_ATTR_NAME", "asciiz"),
+            ("OVS_VPORT_ATTR_OPTIONS", "none"),
+            ("OVS_VPORT_ATTR_UPCALL_PID", "array(uint32)"),
+            ("OVS_VPORT_ATTR_STATS", "vportstats"),
+            ("OVS_VPORT_ATTR_PAD", "none"),
+            ("OVS_VPORT_ATTR_IFINDEX", "uint32"),
+            ("OVS_VPORT_ATTR_NETNSID", "uint32"),
+        )
+
+        class vportstats(nla):
+            fields = (
+                ("rx_packets", "=Q"),
+                ("tx_packets", "=Q"),
+                ("rx_bytes", "=Q"),
+                ("tx_bytes", "=Q"),
+                ("rx_errors", "=Q"),
+                ("tx_errors", "=Q"),
+                ("rx_dropped", "=Q"),
+                ("tx_dropped", "=Q"),
+            )
+
+    def type_to_str(vport_type):
+        if vport_type == 1:
+            return "netdev"
+        elif vport_type == 2:
+            return "internal"
+        elif vport_type == 3:
+            return "gre"
+        elif vport_type == 4:
+            return "vxlan"
+        elif vport_type == 5:
+            return "geneve"
+        return "unknown:%d" % vport_type
+
+    def __init__(self):
+        GenericNetlinkSocket.__init__(self)
+        self.bind(OVS_VPORT_FAMILY, OvsVport.ovs_vport_msg)
+
+    def info(self, vport_name, dpifindex=0, portno=None):
+        msg = OvsVport.ovs_vport_msg()
+
+        msg["cmd"] = OVS_VPORT_CMD_GET
+        msg["version"] = OVS_DATAPATH_VERSION
+        msg["reserved"] = 0
+        msg["dpifindex"] = dpifindex
+
+        if portno is None:
+            msg["attrs"].append(["OVS_VPORT_ATTR_NAME", vport_name])
+        else:
+            msg["attrs"].append(["OVS_VPORT_ATTR_PORT_NO", portno])
+
+        try:
+            reply = self.nlm_request(
+                msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST
+            )
+            reply = reply[0]
+        except NetlinkError as ne:
+            if ne.code == errno.ENODEV:
+                reply = None
+            else:
+                raise ne
+        return reply
+
+
+def print_ovsdp_full(dp_lookup_rep, ifindex, ndb=NDB()):
+    dp_name = dp_lookup_rep.get_attr("OVS_DP_ATTR_NAME")
+    base_stats = dp_lookup_rep.get_attr("OVS_DP_ATTR_STATS")
+    megaflow_stats = dp_lookup_rep.get_attr("OVS_DP_ATTR_MEGAFLOW_STATS")
+    user_features = dp_lookup_rep.get_attr("OVS_DP_ATTR_USER_FEATURES")
+    masks_cache_size = dp_lookup_rep.get_attr("OVS_DP_ATTR_MASKS_CACHE_SIZE")
+
+    print("%s:" % dp_name)
+    print(
+        "  lookups: hit:%d missed:%d lost:%d"
+        % (base_stats["hit"], base_stats["missed"], base_stats["lost"])
+    )
+    print("  flows:%d" % base_stats["flows"])
+    pkts = base_stats["hit"] + base_stats["missed"]
+    avg = (megaflow_stats["mask_hit"] / pkts) if pkts != 0 else 0.0
+    print(
+        "  masks: hit:%d total:%d hit/pkt:%f"
+        % (megaflow_stats["mask_hit"], megaflow_stats["masks"], avg)
+    )
+    print("  caches:")
+    print("    masks-cache: size:%d" % masks_cache_size)
+
+    if user_features is not None:
+        print("  features: 0x%X" % user_features)
+
+    # port print out
+    vpl = OvsVport()
+    for iface in ndb.interfaces:
+        rep = vpl.info(iface.ifname, ifindex)
+        if rep is not None:
+            print(
+                "  port %d: %s (%s)"
+                % (
+                    rep.get_attr("OVS_VPORT_ATTR_PORT_NO"),
+                    rep.get_attr("OVS_VPORT_ATTR_NAME"),
+                    OvsVport.type_to_str(rep.get_attr("OVS_VPORT_ATTR_TYPE")),
+                )
+            )
+
+
+def main(argv):
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "-v",
+        "--verbose",
+        action="count",
+        help="Increment 'verbose' output counter.",
+    )
+    subparsers = parser.add_subparsers()
+
+    showdpcmd = subparsers.add_parser("show")
+    showdpcmd.add_argument(
+        "showdp", metavar="N", type=str, nargs="?", help="Datapath Name"
+    )
+
+    adddpcmd = subparsers.add_parser("add-dp")
+    adddpcmd.add_argument("adddp", help="Datapath Name")
+    adddpcmd.add_argument(
+        "-u",
+        "--upcall",
+        action="store_true",
+        help="Leave open a reader for upcalls",
+    )
+    adddpcmd.add_argument(
+        "-V",
+        "--versioning",
+        required=False,
+        help="Specify a custom version / feature string",
+    )
+
+    deldpcmd = subparsers.add_parser("del-dp")
+    deldpcmd.add_argument("deldp", help="Datapath Name")
+
+    args = parser.parse_args()
+
+    ovsdp = OvsDatapath()
+    ndb = NDB()
+
+    if hasattr(args, "showdp"):
+        found = False
+        for iface in ndb.interfaces:
+            rep = None
+            if args.showdp is None:
+                rep = ovsdp.info(iface.ifname, 0)
+            elif args.showdp == iface.ifname:
+                rep = ovsdp.info(iface.ifname, 0)
+
+            if rep is not None:
+                found = True
+                print_ovsdp_full(rep, iface.index, ndb)
+
+        if not found:
+            msg = "No DP found"
+            if args.showdp is not None:
+                msg += ":'%s'" % args.showdp
+            print(msg)
+    elif hasattr(args, "adddp"):
+        rep = ovsdp.create(args.adddp, args.upcall, args.versioning)
+        if rep is None:
+            print("DP '%s' already exists" % args.adddp)
+        else:
+            print("DP '%s' added" % args.adddp)
+    elif hasattr(args, "deldp"):
+        ovsdp.destroy(args.deldp)
+
+    return 0
+
+
+if __name__ == "__main__":
+    sys.exit(main(sys.argv))
diff --git a/tools/testing/selftests/net/test_ingress_egress_chaining.sh b/tools/testing/selftests/net/test_ingress_egress_chaining.sh
new file mode 100644 (file)
index 0000000..08adff6
--- /dev/null
@@ -0,0 +1,79 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# This test runs a simple ingress tc setup between two veth pairs,
+# and chains a single egress rule to test ingress chaining to egress.
+#
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+if [ "$(id -u)" -ne 0 ];then
+       echo "SKIP: Need root privileges"
+       exit $ksft_skip
+fi
+
+needed_mods="act_mirred cls_flower sch_ingress"
+for mod in $needed_mods; do
+       modinfo $mod &>/dev/null || { echo "SKIP: Need act_mirred module"; exit $ksft_skip; }
+done
+
+ns="ns$((RANDOM%899+100))"
+veth1="veth1$((RANDOM%899+100))"
+veth2="veth2$((RANDOM%899+100))"
+peer1="peer1$((RANDOM%899+100))"
+peer2="peer2$((RANDOM%899+100))"
+ip_peer1=198.51.100.5
+ip_peer2=198.51.100.6
+
+function fail() {
+       echo "FAIL: $@" >> /dev/stderr
+       exit 1
+}
+
+function cleanup() {
+       killall -q -9 udpgso_bench_rx
+       ip link del $veth1 &> /dev/null
+       ip link del $veth2 &> /dev/null
+       ip netns del $ns &> /dev/null
+}
+trap cleanup EXIT
+
+function config() {
+       echo "Setup veth pairs [$veth1, $peer1], and veth pair [$veth2, $peer2]"
+       ip link add $veth1 type veth peer name $peer1
+       ip link add $veth2 type veth peer name $peer2
+       ip addr add $ip_peer1/24 dev $peer1
+       ip link set $peer1 up
+       ip netns add $ns
+       ip link set dev $peer2 netns $ns
+       ip netns exec $ns ip addr add $ip_peer2/24 dev $peer2
+       ip netns exec $ns ip link set $peer2 up
+       ip link set $veth1 up
+       ip link set $veth2 up
+
+       echo "Add tc filter ingress->egress forwarding $veth1 <-> $veth2"
+       tc qdisc add dev $veth2 ingress
+       tc qdisc add dev $veth1 ingress
+       tc filter add dev $veth2 ingress prio 1 proto all flower \
+               action mirred egress redirect dev $veth1
+       tc filter add dev $veth1 ingress prio 1 proto all flower \
+               action mirred egress redirect dev $veth2
+
+       echo "Add tc filter egress->ingress forwarding $peer1 -> $veth1, bypassing the veth pipe"
+       tc qdisc add dev $peer1 clsact
+       tc filter add dev $peer1 egress prio 20 proto ip flower \
+               action mirred ingress redirect dev $veth1
+}
+
+function test_run() {
+       echo "Run tcp traffic"
+       ./udpgso_bench_rx -t &
+       sleep 1
+       ip netns exec $ns timeout -k 2 10 ./udpgso_bench_tx -t -l 2 -4 -D $ip_peer1 || fail "traffic failed"
+       echo "Test passed"
+}
+
+config
+test_run
+trap - EXIT
+cleanup
index 6d849dc..d1d8483 100644 (file)
@@ -62,6 +62,8 @@ static struct perf_event_attr make_event_attr(bool enabled, volatile void *addr,
                .remove_on_exec = 1, /* Required by sigtrap. */
                .sigtrap        = 1, /* Request synchronous SIGTRAP on event. */
                .sig_data       = TEST_SIG_DATA(addr, id),
+               .exclude_kernel = 1, /* To allow */
+               .exclude_hv     = 1, /* running as !root */
        };
        return attr;
 }
@@ -93,9 +95,13 @@ static void *test_thread(void *arg)
 
        __atomic_fetch_add(&ctx.tids_want_signal, tid, __ATOMIC_RELAXED);
        iter = ctx.iterate_on; /* read */
-       for (i = 0; i < iter - 1; i++) {
-               __atomic_fetch_add(&ctx.tids_want_signal, tid, __ATOMIC_RELAXED);
-               ctx.iterate_on = iter; /* idempotent write */
+       if (iter >= 0) {
+               for (i = 0; i < iter - 1; i++) {
+                       __atomic_fetch_add(&ctx.tids_want_signal, tid, __ATOMIC_RELAXED);
+                       ctx.iterate_on = iter; /* idempotent write */
+               }
+       } else {
+               while (ctx.iterate_on);
        }
 
        return NULL;
@@ -208,4 +214,27 @@ TEST_F(sigtrap_threads, signal_stress)
        EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on, 0));
 }
 
+TEST_F(sigtrap_threads, signal_stress_with_disable)
+{
+       const int target_count = NUM_THREADS * 3000;
+       int i;
+
+       ctx.iterate_on = -1;
+
+       EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_ENABLE, 0), 0);
+       pthread_barrier_wait(&self->barrier);
+       while (__atomic_load_n(&ctx.signal_count, __ATOMIC_RELAXED) < target_count) {
+               EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_DISABLE, 0), 0);
+               EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_ENABLE, 0), 0);
+       }
+       ctx.iterate_on = 0;
+       for (i = 0; i < NUM_THREADS; i++)
+               ASSERT_EQ(pthread_join(self->threads[i], NULL), 0);
+       EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_DISABLE, 0), 0);
+
+       EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on);
+       EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT);
+       EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on, 0));
+}
+
 TEST_HARNESS_MAIN
index fa73353..be8a364 100644 (file)
@@ -111,7 +111,7 @@ class Dot2c(Automata):
 
     def format_aut_init_header(self):
         buff = []
-        buff.append("struct %s %s = {" % (self.struct_automaton_def, self.var_automaton_def))
+        buff.append("static struct %s %s = {" % (self.struct_automaton_def, self.var_automaton_def))
         return buff
 
     def __get_string_vector_per_line_content(self, buff):
index e30f1b4..f1df24c 100644 (file)
@@ -4839,6 +4839,12 @@ struct compat_kvm_clear_dirty_log {
        };
 };
 
+long __weak kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
+                                    unsigned long arg)
+{
+       return -ENOTTY;
+}
+
 static long kvm_vm_compat_ioctl(struct file *filp,
                           unsigned int ioctl, unsigned long arg)
 {
@@ -4847,6 +4853,11 @@ static long kvm_vm_compat_ioctl(struct file *filp,
 
        if (kvm->mm != current->mm || kvm->vm_dead)
                return -EIO;
+
+       r = kvm_arch_vm_compat_ioctl(filp, ioctl, arg);
+       if (r != -ENOTTY)
+               return r;
+
        switch (ioctl) {
 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
        case KVM_CLEAR_DIRTY_LOG: {
@@ -5398,6 +5409,7 @@ static int kvm_debugfs_open(struct inode *inode, struct file *file,
                           int (*get)(void *, u64 *), int (*set)(void *, u64),
                           const char *fmt)
 {
+       int ret;
        struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
                                          inode->i_private;
 
@@ -5409,15 +5421,13 @@ static int kvm_debugfs_open(struct inode *inode, struct file *file,
        if (!kvm_get_kvm_safe(stat_data->kvm))
                return -ENOENT;
 
-       if (simple_attr_open(inode, file, get,
-                   kvm_stats_debugfs_mode(stat_data->desc) & 0222
-                   ? set : NULL,
-                   fmt)) {
+       ret = simple_attr_open(inode, file, get,
+                              kvm_stats_debugfs_mode(stat_data->desc) & 0222
+                              ? set : NULL, fmt);
+       if (ret)
                kvm_put_kvm(stat_data->kvm);
-               return -ENOMEM;
-       }
 
-       return 0;
+       return ret;
 }
 
 static int kvm_debugfs_release(struct inode *inode, struct file *file)
index 68ff41d..346e47f 100644 (file)
@@ -81,6 +81,9 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
 {
        struct kvm_memslots *slots = kvm_memslots(kvm);
 
+       if (!gpc->active)
+               return false;
+
        if ((gpa & ~PAGE_MASK) + len > PAGE_SIZE)
                return false;
 
@@ -240,10 +243,11 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
 {
        struct kvm_memslots *slots = kvm_memslots(kvm);
        unsigned long page_offset = gpa & ~PAGE_MASK;
-       kvm_pfn_t old_pfn, new_pfn;
+       bool unmap_old = false;
        unsigned long old_uhva;
+       kvm_pfn_t old_pfn;
        void *old_khva;
-       int ret = 0;
+       int ret;
 
        /*
         * If must fit within a single page. The 'len' argument is
@@ -261,6 +265,11 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
 
        write_lock_irq(&gpc->lock);
 
+       if (!gpc->active) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
        old_pfn = gpc->pfn;
        old_khva = gpc->khva - offset_in_page(gpc->khva);
        old_uhva = gpc->uhva;
@@ -291,6 +300,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
                /* If the HVA→PFN mapping was already valid, don't unmap it. */
                old_pfn = KVM_PFN_ERR_FAULT;
                old_khva = NULL;
+               ret = 0;
        }
 
  out:
@@ -305,14 +315,15 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
                gpc->khva = NULL;
        }
 
-       /* Snapshot the new pfn before dropping the lock! */
-       new_pfn = gpc->pfn;
+       /* Detect a pfn change before dropping the lock! */
+       unmap_old = (old_pfn != gpc->pfn);
 
+out_unlock:
        write_unlock_irq(&gpc->lock);
 
        mutex_unlock(&gpc->refresh_lock);
 
-       if (old_pfn != new_pfn)
+       if (unmap_old)
                gpc_unmap_khva(kvm, old_pfn, old_khva);
 
        return ret;
@@ -346,42 +357,61 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
 }
 EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap);
 
+void kvm_gpc_init(struct gfn_to_pfn_cache *gpc)
+{
+       rwlock_init(&gpc->lock);
+       mutex_init(&gpc->refresh_lock);
+}
+EXPORT_SYMBOL_GPL(kvm_gpc_init);
 
-int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
-                             struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
-                             gpa_t gpa, unsigned long len)
+int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
+                    struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
+                    gpa_t gpa, unsigned long len)
 {
        WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
 
        if (!gpc->active) {
-               rwlock_init(&gpc->lock);
-               mutex_init(&gpc->refresh_lock);
-
                gpc->khva = NULL;
                gpc->pfn = KVM_PFN_ERR_FAULT;
                gpc->uhva = KVM_HVA_ERR_BAD;
                gpc->vcpu = vcpu;
                gpc->usage = usage;
                gpc->valid = false;
-               gpc->active = true;
 
                spin_lock(&kvm->gpc_lock);
                list_add(&gpc->list, &kvm->gpc_list);
                spin_unlock(&kvm->gpc_lock);
+
+               /*
+                * Activate the cache after adding it to the list, a concurrent
+                * refresh must not establish a mapping until the cache is
+                * reachable by mmu_notifier events.
+                */
+               write_lock_irq(&gpc->lock);
+               gpc->active = true;
+               write_unlock_irq(&gpc->lock);
        }
        return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len);
 }
-EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_init);
+EXPORT_SYMBOL_GPL(kvm_gpc_activate);
 
-void kvm_gfn_to_pfn_cache_destroy(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
+void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
 {
        if (gpc->active) {
+               /*
+                * Deactivate the cache before removing it from the list, KVM
+                * must stall mmu_notifier events until all users go away, i.e.
+                * until gpc->lock is dropped and refresh is guaranteed to fail.
+                */
+               write_lock_irq(&gpc->lock);
+               gpc->active = false;
+               write_unlock_irq(&gpc->lock);
+
                spin_lock(&kvm->gpc_lock);
                list_del(&gpc->list);
                spin_unlock(&kvm->gpc_lock);
 
                kvm_gfn_to_pfn_cache_unmap(kvm, gpc);
-               gpc->active = false;
        }
 }
-EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_destroy);
+EXPORT_SYMBOL_GPL(kvm_gpc_deactivate);