Merge tag 'drm-misc-next-fixes-2020-08-05' of git://anongit.freedesktop.org/drm/drm...
authorDave Airlie <airlied@redhat.com>
Tue, 11 Aug 2020 00:56:11 +0000 (10:56 +1000)
committerDave Airlie <airlied@redhat.com>
Tue, 11 Aug 2020 00:56:36 +0000 (10:56 +1000)
drm-misc-next-fixes for v5.9-rc1:
- Fix drm_dp_mst_port refcount leaks in drm_dp_mst_allocate_vcpi
- Fix a fbcon OOB read in fbdev, found by syzbot.
- Mark vga_tryget static as it's not used elsewhere.
- Small fixes to xlnx.
- Remove null check for kfree in drm_dev_release.
- Fix DRM_FORMAT_MOD_AMLOGIC_FBC definition.
- Fix mode initialization in omap_connector_mode_valid().

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/b2043dad-f118-bd19-54a6-f23bf6264007@linux.intel.com
1822 files changed:
.gitignore
.mailmap
Documentation/ABI/testing/dev-kmsg
Documentation/ABI/testing/sysfs-platform-chipidea-usb-otg
Documentation/admin-guide/README.rst
Documentation/admin-guide/cgroup-v2.rst
Documentation/admin-guide/device-mapper/index.rst
Documentation/admin-guide/mm/transhuge.rst
Documentation/arm64/cpu-feature-registers.rst
Documentation/arm64/silicon-errata.rst
Documentation/block/bfq-iosched.rst
Documentation/bpf/prog_cgroup_sockopt.rst
Documentation/core-api/dma-api.rst
Documentation/core-api/pin_user_pages.rst
Documentation/dev-tools/kcsan.rst
Documentation/dev-tools/kunit/faq.rst
Documentation/devicetree/bindings/Makefile
Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
Documentation/devicetree/bindings/bus/socionext,uniphier-system-bus.yaml
Documentation/devicetree/bindings/clock/imx27-clock.yaml
Documentation/devicetree/bindings/clock/imx31-clock.yaml
Documentation/devicetree/bindings/clock/imx5-clock.yaml
Documentation/devicetree/bindings/display/bridge/sii902x.txt
Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt
Documentation/devicetree/bindings/display/imx/ldb.txt
Documentation/devicetree/bindings/display/msm/dsi.txt
Documentation/devicetree/bindings/display/msm/gpu.txt
Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.yaml
Documentation/devicetree/bindings/display/rockchip/rockchip-drm.yaml
Documentation/devicetree/bindings/gpio/mediatek,mt7621-gpio.txt
Documentation/devicetree/bindings/interrupt-controller/csky,mpintc.txt
Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.txt
Documentation/devicetree/bindings/misc/olpc,xo1.75-ec.txt
Documentation/devicetree/bindings/net/mediatek-bluetooth.txt
Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
Documentation/devicetree/bindings/sound/audio-graph-card.txt
Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
Documentation/devicetree/bindings/spi/amlogic,meson-gx-spicc.yaml
Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
Documentation/devicetree/bindings/thermal/thermal-sensor.yaml
Documentation/devicetree/bindings/thermal/thermal-zones.yaml
Documentation/devicetree/bindings/thermal/ti,am654-thermal.yaml
Documentation/devicetree/bindings/timer/csky,mptimer.txt
Documentation/devicetree/bindings/usb/aspeed,usb-vhub.yaml
Documentation/devicetree/writing-schema.rst
Documentation/filesystems/overlayfs.rst
Documentation/i2c/slave-eeprom-backend.rst
Documentation/kbuild/modules.rst
Documentation/kbuild/reproducible-builds.rst
Documentation/mips/ingenic-tcu.rst
Documentation/networking/arcnet.rst
Documentation/networking/ax25.rst
Documentation/networking/can_ucan_protocol.rst
Documentation/networking/dsa/dsa.rst
Documentation/networking/ieee802154.rst
Documentation/networking/ip-sysctl.rst
Documentation/networking/ipvs-sysctl.rst
Documentation/networking/rxrpc.rst
Documentation/powerpc/vas-api.rst
Documentation/process/changes.rst
Documentation/process/coding-style.rst
Documentation/virt/kvm/api.rst
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/include/asm/elf.h
arch/arc/include/asm/irqflags-compact.h
arch/arc/kernel/entry.S
arch/arc/kernel/head.S
arch/arc/kernel/setup.c
arch/arm/boot/dts/am335x-baltos.dtsi
arch/arm/boot/dts/am335x-boneblack-common.dtsi
arch/arm/boot/dts/am335x-boneblack-wireless.dts
arch/arm/boot/dts/am335x-boneblue.dts
arch/arm/boot/dts/am335x-bonegreen-wireless.dts
arch/arm/boot/dts/am335x-evm.dts
arch/arm/boot/dts/am335x-evmsk.dts
arch/arm/boot/dts/am335x-lxm.dts
arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi
arch/arm/boot/dts/am335x-moxa-uc-8100-me-t.dts
arch/arm/boot/dts/am335x-pepper.dts
arch/arm/boot/dts/am335x-phycore-som.dtsi
arch/arm/boot/dts/am335x-pocketbeagle.dts
arch/arm/boot/dts/am33xx-l4.dtsi
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/am4372.dtsi
arch/arm/boot/dts/am437x-cm-t43.dts
arch/arm/boot/dts/am437x-gp-evm.dts
arch/arm/boot/dts/am437x-l4.dtsi
arch/arm/boot/dts/am437x-sk-evm.dts
arch/arm/boot/dts/am43x-epos-evm.dts
arch/arm/boot/dts/am5729-beagleboneai.dts
arch/arm/boot/dts/bcm-nsp.dtsi
arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
arch/arm/boot/dts/bcm958522er.dts
arch/arm/boot/dts/bcm958525er.dts
arch/arm/boot/dts/bcm958525xmc.dts
arch/arm/boot/dts/bcm958622hr.dts
arch/arm/boot/dts/bcm958623hr.dts
arch/arm/boot/dts/bcm958625hr.dts
arch/arm/boot/dts/bcm958625k.dts
arch/arm/boot/dts/dra7-evm-common.dtsi
arch/arm/boot/dts/dra7-l4.dtsi
arch/arm/boot/dts/imx6qdl-gw551x.dtsi
arch/arm/boot/dts/imx6ul-kontron-n6x1x-s.dtsi
arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi
arch/arm/boot/dts/meson.dtsi
arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/omap4-duovero-parlor.dts
arch/arm/boot/dts/omap4.dtsi
arch/arm/boot/dts/socfpga.dtsi
arch/arm/boot/dts/socfpga_arria10.dtsi
arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
arch/arm/include/asm/efi.h
arch/arm/kernel/asm-offsets.c
arch/arm/mach-bcm/Kconfig
arch/arm/mach-imx/devices/devices-common.h
arch/arm/mach-imx/devices/platform-gpio-mxc.c
arch/arm/mach-imx/devices/platform-imx-dma.c
arch/arm/mach-imx/mm-imx21.c
arch/arm/mach-imx/mm-imx27.c
arch/arm/mach-imx/pm-imx5.c
arch/arm/mach-imx/pm-imx6.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-sti/board-dt.c
arch/arm/xen/enlighten.c
arch/arm64/Kconfig
arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts
arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts
arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts
arch/arm64/boot/dts/amlogic/meson-gxl-s805x.dtsi [new file with mode: 0644]
arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
arch/arm64/boot/dts/freescale/imx8mm-evk.dts
arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts
arch/arm64/configs/defconfig
arch/arm64/include/asm/alternative.h
arch/arm64/include/asm/arch_gicv3.h
arch/arm64/include/asm/arch_timer.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/debug-monitors.h
arch/arm64/include/asm/linkage.h
arch/arm64/include/asm/mmu.h
arch/arm64/include/asm/pgtable-prot.h
arch/arm64/include/asm/syscall.h
arch/arm64/include/asm/thread_info.h
arch/arm64/include/asm/vdso/clocksource.h
arch/arm64/include/asm/vdso/compat_gettimeofday.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/alternative.c
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/debug-monitors.c
arch/arm64/kernel/entry-common.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/kgdb.c
arch/arm64/kernel/perf_regs.c
arch/arm64/kernel/probes/kprobes.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/signal.c
arch/arm64/kernel/signal32.c
arch/arm64/kernel/syscall.c
arch/arm64/kernel/vdso.c
arch/arm64/kernel/vdso/Makefile
arch/arm64/kernel/vdso/sigreturn.S
arch/arm64/kernel/vdso32/Makefile
arch/arm64/kernel/vdso32/sigreturn.S [deleted file]
arch/arm64/kernel/vdso32/vdso.lds.S
arch/arm64/kernel/vmlinux.lds.S
arch/arm64/kvm/hyp-init.S
arch/arm64/kvm/pmu.c
arch/arm64/kvm/pvtime.c
arch/arm64/kvm/reset.c
arch/arm64/kvm/vgic/vgic-v4.c
arch/m68k/kernel/setup_no.c
arch/m68k/mm/mcfmmu.c
arch/mips/boot/dts/ingenic/gcw0.dts
arch/mips/include/asm/unroll.h
arch/mips/kernel/traps.c
arch/mips/kvm/emulate.c
arch/mips/kvm/mips.c
arch/mips/lantiq/xway/sysctrl.c
arch/mips/pci/pci-xtalk-bridge.c
arch/openrisc/kernel/dma.c
arch/powerpc/include/asm/icswx.h
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/paca.c
arch/powerpc/kvm/book3s_64_mmu_radix.c
arch/powerpc/mm/book3s64/pkeys.c
arch/powerpc/mm/nohash/kaslr_booke.c
arch/powerpc/platforms/powernv/vas-fault.c
arch/riscv/Kconfig
arch/riscv/include/asm/barrier.h
arch/riscv/include/asm/gdb_xml.h
arch/riscv/include/asm/kgdb.h
arch/riscv/include/asm/thread_info.h
arch/riscv/kernel/kgdb.c
arch/riscv/kernel/vdso/Makefile
arch/riscv/kernel/vdso/vgettimeofday.c
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/include/asm/kvm_host.h
arch/s390/kernel/debug.c
arch/s390/kernel/early.c
arch/s390/kernel/entry.S
arch/s390/kernel/perf_cpum_sf.c
arch/s390/kernel/setup.c
arch/s390/mm/hugetlbpage.c
arch/s390/mm/maccess.c
arch/s390/pci/pci_event.c
arch/x86/Kconfig
arch/x86/boot/compressed/Makefile
arch/x86/boot/compressed/head_64.S
arch/x86/entry/Makefile
arch/x86/entry/common.c
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64_compat.S
arch/x86/events/Makefile
arch/x86/hyperv/hv_init.c
arch/x86/include/asm/bitops.h
arch/x86/include/asm/bug.h
arch/x86/include/asm/cpu.h
arch/x86/include/asm/cpumask.h
arch/x86/include/asm/fpu/internal.h
arch/x86/include/asm/idtentry.h
arch/x86/include/asm/io_bitmap.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/mwait.h
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/include/asm/pgtable_types.h
arch/x86/include/asm/processor.h
arch/x86/include/uapi/asm/kvm.h
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/msi.c
arch/x86/kernel/apic/vector.c
arch/x86/kernel/cpu/centaur.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cpu.h
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/mce/core.c
arch/x86/kernel/cpu/resctrl/core.c
arch/x86/kernel/cpu/resctrl/internal.h
arch/x86/kernel/cpu/resctrl/rdtgroup.c
arch/x86/kernel/cpu/umwait.c
arch/x86/kernel/cpu/zhaoxin.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/ldt.c
arch/x86/kernel/nmi.c
arch/x86/kernel/paravirt.c
arch/x86/kernel/process.c
arch/x86/kernel/traps.c
arch/x86/kvm/kvm_cache_regs.h
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmcs.h
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c
arch/x86/lib/memcpy_64.S
arch/x86/lib/usercopy_64.c
arch/x86/math-emu/wm_sqrt.S
arch/x86/platform/uv/uv_irq.c
arch/x86/power/cpu.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/xen-asm_64.S
arch/xtensa/kernel/perf_event.c
arch/xtensa/kernel/setup.c
arch/xtensa/kernel/xtensa_ksyms.c
block/bio-integrity.c
block/blk-mq-debugfs.c
block/blk-mq.c
block/keyslot-manager.c
crypto/af_alg.c
crypto/algif_aead.c
crypto/algif_hash.c
crypto/algif_skcipher.c
crypto/asymmetric_keys/public_key.c
drivers/acpi/acpi_configfs.c
drivers/acpi/dptf/dptf_power.c
drivers/acpi/fan.c
drivers/acpi/sysfs.c
drivers/android/binder.c
drivers/base/base.h
drivers/base/core.c
drivers/base/dd.c
drivers/base/power/trace.c
drivers/base/regmap/Kconfig
drivers/base/regmap/regmap-debugfs.c
drivers/base/regmap/regmap.c
drivers/block/nbd.c
drivers/block/virtio_blk.c
drivers/block/zram/zram_drv.c
drivers/bus/ti-sysc.c
drivers/char/tpm/st33zp24/i2c.c
drivers/char/tpm/st33zp24/spi.c
drivers/char/tpm/st33zp24/st33zp24.c
drivers/char/tpm/tpm-dev-common.c
drivers/char/tpm/tpm_ibmvtpm.c
drivers/char/tpm/tpm_tis.c
drivers/char/tpm/tpm_tis_core.c
drivers/char/tpm/tpm_tis_spi_main.c
drivers/char/virtio_console.c
drivers/clk/Kconfig
drivers/clk/clk-ast2600.c
drivers/clk/mvebu/Kconfig
drivers/clk/sifive/fu540-prci.c
drivers/clocksource/arm_arch_timer.c
drivers/counter/104-quad-8.c
drivers/cpufreq/intel_pstate.c
drivers/cpuidle/cpuidle.c
drivers/dma-buf/dma-buf.c
drivers/dma/dmatest.c
drivers/dma/dw/core.c
drivers/dma/fsl-edma-common.c
drivers/dma/fsl-edma-common.h
drivers/dma/fsl-edma.c
drivers/dma/idxd/cdev.c
drivers/dma/idxd/device.c
drivers/dma/idxd/idxd.h
drivers/dma/idxd/irq.c
drivers/dma/idxd/sysfs.c
drivers/dma/imx-sdma.c
drivers/dma/ioat/dma.c
drivers/dma/ioat/dma.h
drivers/dma/mcf-edma.c
drivers/dma/sh/usb-dmac.c
drivers/dma/tegra210-adma.c
drivers/dma/ti/k3-udma-private.c
drivers/dma/ti/k3-udma.c
drivers/edac/amd64_edac.c
drivers/firmware/efi/Kconfig
drivers/firmware/efi/arm-init.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/esrt.c
drivers/firmware/efi/libstub/Makefile
drivers/firmware/efi/libstub/arm32-stub.c
drivers/firmware/efi/libstub/arm64-stub.c
drivers/firmware/efi/libstub/efi-stub-helper.c
drivers/firmware/efi/libstub/efi-stub.c
drivers/firmware/efi/libstub/efistub.h
drivers/firmware/efi/libstub/file.c
drivers/firmware/efi/libstub/skip_spaces.c
drivers/firmware/psci/psci_checker.c
drivers/firmware/raspberrypi.c
drivers/fpga/Kconfig
drivers/gpio/gpio-arizona.c
drivers/gpio/gpio-pca953x.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
drivers/gpu/drm/amd/amdgpu/navi10_ih.c
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_dp_types.h
drivers/gpu/drm/amd/display/dc/dc_hw_types.h
drivers/gpu/drm/amd/display/dc/dc_stream.h
drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
drivers/gpu/drm/amd/display/dc/inc/core_types.h
drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
drivers/gpu/drm/amd/display/modules/freesync/freesync.c
drivers/gpu/drm/amd/powerplay/Makefile
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
drivers/gpu/drm/amd/powerplay/navi10_ppt.c
drivers/gpu/drm/amd/powerplay/renoir_ppt.c
drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/powerplay/smu_cmn.c [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/smu_cmn.h [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/smu_internal.h
drivers/gpu/drm/amd/powerplay/smu_v11_0.c
drivers/gpu/drm/amd/powerplay/smu_v12_0.c
drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
drivers/gpu/drm/drm_crtc_helper_internal.h
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_panel_orientation_quirks.c
drivers/gpu/drm/drm_probe_helper.c
drivers/gpu/drm/exynos/exynos_drm_dma.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_mic.c
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/display/intel_bios.c
drivers/gpu/drm/i915/display/intel_cdclk.c
drivers/gpu/drm/i915/display/intel_combo_phy.c
drivers/gpu/drm/i915/display/intel_ddi.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_display.h
drivers/gpu/drm/i915/display/intel_display_debugfs.c
drivers/gpu/drm/i915/display/intel_display_power.c
drivers/gpu/drm/i915/display/intel_display_types.h
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp.h
drivers/gpu/drm/i915/display/intel_dp_link_training.c
drivers/gpu/drm/i915/display/intel_dp_mst.c
drivers/gpu/drm/i915/display/intel_dp_mst.h
drivers/gpu/drm/i915/display/intel_dpio_phy.c
drivers/gpu/drm/i915/display/intel_dvo.c
drivers/gpu/drm/i915/display/intel_fbc.c
drivers/gpu/drm/i915/display/intel_hdcp.c
drivers/gpu/drm/i915/display/intel_hdmi.c
drivers/gpu/drm/i915/display/intel_hdmi.h
drivers/gpu/drm/i915/display/intel_lspcon.c
drivers/gpu/drm/i915/display/intel_lspcon.h
drivers/gpu/drm/i915/display/intel_psr.c
drivers/gpu/drm/i915/display/intel_sdvo.c
drivers/gpu/drm/i915/display/intel_sdvo_regs.h
drivers/gpu/drm/i915/display/intel_vbt_defs.h
drivers/gpu/drm/i915/display/intel_vdsc.c
drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gem/i915_gem_context.h
drivers/gpu/drm/i915/gem/i915_gem_context_types.h
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/i915/gem/i915_gem_mman.h
drivers/gpu/drm/i915/gem/i915_gem_object.c
drivers/gpu/drm/i915/gem/i915_gem_object.h
drivers/gpu/drm/i915/gem/i915_gem_pages.c
drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
drivers/gpu/drm/i915/gem/i915_gem_tiling.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
drivers/gpu/drm/i915/gem/selftests/mock_context.c
drivers/gpu/drm/i915/gt/debugfs_gt.c
drivers/gpu/drm/i915/gt/gen6_ppgtt.c
drivers/gpu/drm/i915/gt/gen7_renderclear.c
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
drivers/gpu/drm/i915/gt/intel_context.c
drivers/gpu/drm/i915/gt/intel_context_sseu.c
drivers/gpu/drm/i915/gt/intel_engine_cs.c
drivers/gpu/drm/i915/gt/intel_engine_pm.c
drivers/gpu/drm/i915/gt/intel_engine_types.h
drivers/gpu/drm/i915/gt/intel_engine_user.c
drivers/gpu/drm/i915/gt/intel_ggtt.c
drivers/gpu/drm/i915/gt/intel_gt.c
drivers/gpu/drm/i915/gt/intel_gt.h
drivers/gpu/drm/i915/gt/intel_gt_irq.c
drivers/gpu/drm/i915/gt/intel_gt_pm.c
drivers/gpu/drm/i915/gt/intel_gt_requests.c
drivers/gpu/drm/i915/gt/intel_gt_types.h
drivers/gpu/drm/i915/gt/intel_gtt.h
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gt/intel_ppgtt.c
drivers/gpu/drm/i915/gt/intel_renderstate.c
drivers/gpu/drm/i915/gt/intel_reset.c
drivers/gpu/drm/i915/gt/intel_reset.h
drivers/gpu/drm/i915/gt/intel_reset_types.h
drivers/gpu/drm/i915/gt/intel_ring_submission.c
drivers/gpu/drm/i915/gt/intel_rps.c
drivers/gpu/drm/i915/gt/intel_sseu.c
drivers/gpu/drm/i915/gt/intel_sseu.h
drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c [new file with mode: 0644]
drivers/gpu/drm/i915/gt/intel_sseu_debugfs.h [new file with mode: 0644]
drivers/gpu/drm/i915/gt/intel_timeline.c
drivers/gpu/drm/i915/gt/intel_workarounds.c
drivers/gpu/drm/i915/gt/selftest_lrc.c
drivers/gpu/drm/i915/gt/selftest_rc6.c
drivers/gpu/drm/i915/gt/selftest_rps.c
drivers/gpu/drm/i915/gt/selftest_timeline.c
drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
drivers/gpu/drm/i915/gt/uc/intel_uc.c
drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
drivers/gpu/drm/i915/gvt/debugfs.c
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/interrupt.c
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/gvt/mmio_context.h
drivers/gpu/drm/i915/gvt/reg.h
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.h
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_getparam.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_gpu_error.h
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/i915_query.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_utils.c
drivers/gpu/drm/i915/i915_utils.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/i915_vma_types.h
drivers/gpu/drm/i915/intel_device_info.c
drivers/gpu/drm/i915/intel_device_info.h
drivers/gpu/drm/i915/intel_pch.c
drivers/gpu/drm/i915/intel_pch.h
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/i915/intel_uncore.h
drivers/gpu/drm/i915/selftests/i915_perf.c
drivers/gpu/drm/i915/selftests/i915_request.c
drivers/gpu/drm/i915/selftests/mock_gem_device.c
drivers/gpu/drm/i915/selftests/mock_gtt.c
drivers/gpu/drm/mcde/mcde_display.c
drivers/gpu/drm/mcde/mcde_drv.c
drivers/gpu/drm/mediatek/Kconfig
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/mediatek/mtk_drm_plane.c
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/mediatek/mtk_hdmi.c
drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
drivers/gpu/drm/meson/meson_registers.h
drivers/gpu/drm/meson/meson_viu.c
drivers/gpu/drm/msm/adreno/a2xx.xml.h
drivers/gpu/drm/msm/adreno/a2xx_gpu.c
drivers/gpu/drm/msm/adreno/a3xx.xml.h
drivers/gpu/drm/msm/adreno/a4xx.xml.h
drivers/gpu/drm/msm/adreno/a5xx.xml.h
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx.xml.h
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
drivers/gpu/drm/msm/adreno/a6xx_gmu.h
drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.h
drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
drivers/gpu/drm/msm/adreno/a6xx_hfi.c
drivers/gpu/drm/msm/adreno/adreno_common.xml.h
drivers/gpu/drm/msm/adreno/adreno_device.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.h
drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/disp/mdp_common.xml.h
drivers/gpu/drm/msm/dsi/dsi.c
drivers/gpu/drm/msm/dsi/dsi.xml.h
drivers/gpu/drm/msm/dsi/dsi_cfg.c
drivers/gpu/drm/msm/dsi/dsi_cfg.h
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
drivers/gpu/drm/msm/dsi/sfpb.xml.h
drivers/gpu/drm/msm/edp/edp.xml.h
drivers/gpu/drm/msm/hdmi/hdmi.xml.h
drivers/gpu/drm/msm/hdmi/qfprom.xml.h
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/msm/msm_gpu.h
drivers/gpu/drm/msm/msm_submitqueue.c
drivers/gpu/drm/nouveau/Kbuild
drivers/gpu/drm/nouveau/Kconfig
drivers/gpu/drm/nouveau/dispnv04/crtc.c
drivers/gpu/drm/nouveau/dispnv04/disp.c
drivers/gpu/drm/nouveau/dispnv50/base.h
drivers/gpu/drm/nouveau/dispnv50/base507c.c
drivers/gpu/drm/nouveau/dispnv50/base827c.c
drivers/gpu/drm/nouveau/dispnv50/base907c.c
drivers/gpu/drm/nouveau/dispnv50/core.h
drivers/gpu/drm/nouveau/dispnv50/core507d.c
drivers/gpu/drm/nouveau/dispnv50/corec37d.c
drivers/gpu/drm/nouveau/dispnv50/corec57d.c
drivers/gpu/drm/nouveau/dispnv50/crc.c
drivers/gpu/drm/nouveau/dispnv50/crc.h
drivers/gpu/drm/nouveau/dispnv50/crc907d.c
drivers/gpu/drm/nouveau/dispnv50/crcc37d.c
drivers/gpu/drm/nouveau/dispnv50/curs507a.c
drivers/gpu/drm/nouveau/dispnv50/cursc37a.c
drivers/gpu/drm/nouveau/dispnv50/dac507d.c
drivers/gpu/drm/nouveau/dispnv50/dac907d.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/dispnv50/disp.h
drivers/gpu/drm/nouveau/dispnv50/head.c
drivers/gpu/drm/nouveau/dispnv50/head.h
drivers/gpu/drm/nouveau/dispnv50/head507d.c
drivers/gpu/drm/nouveau/dispnv50/head827d.c
drivers/gpu/drm/nouveau/dispnv50/head907d.c
drivers/gpu/drm/nouveau/dispnv50/head917d.c
drivers/gpu/drm/nouveau/dispnv50/headc37d.c
drivers/gpu/drm/nouveau/dispnv50/headc57d.c
drivers/gpu/drm/nouveau/dispnv50/lut.c
drivers/gpu/drm/nouveau/dispnv50/oimm507b.c
drivers/gpu/drm/nouveau/dispnv50/ovly.h
drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
drivers/gpu/drm/nouveau/dispnv50/pior507d.c
drivers/gpu/drm/nouveau/dispnv50/sor507d.c
drivers/gpu/drm/nouveau/dispnv50/sor907d.c
drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
drivers/gpu/drm/nouveau/dispnv50/wndw.c
drivers/gpu/drm/nouveau/dispnv50/wndw.h
drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
drivers/gpu/drm/nouveau/include/nvfw/pmu.h
drivers/gpu/drm/nouveau/include/nvfw/sec2.h
drivers/gpu/drm/nouveau/include/nvhw/class/cl0039.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/cl006c.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/cl006e.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/cl176e.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/cl206e.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/cl502d.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/cl5039.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/cl507a.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/cl507c.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/cl507e.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/cl826f.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/cl827c.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/cl827d.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/cl827e.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/cl837d.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/cl887d.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/cl902d.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/cl9039.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/cl906f.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/cl907c.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/cl907e.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/cla0b5.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/clc37a.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/clc37b.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/clc37d.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/clc37e.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/clc57d.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/class/clc57e.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvhw/drf.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvif/client.h
drivers/gpu/drm/nouveau/include/nvif/device.h
drivers/gpu/drm/nouveau/include/nvif/disp.h
drivers/gpu/drm/nouveau/include/nvif/mem.h
drivers/gpu/drm/nouveau/include/nvif/mmu.h
drivers/gpu/drm/nouveau/include/nvif/notify.h
drivers/gpu/drm/nouveau/include/nvif/object.h
drivers/gpu/drm/nouveau/include/nvif/parent.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvif/printf.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvif/push.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvif/push006c.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvif/push206e.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvif/push507c.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvif/push906f.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvif/pushc37b.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvif/user.h
drivers/gpu/drm/nouveau/include/nvif/vmm.h
drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
drivers/gpu/drm/nouveau/nouveau_abi16.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_bo.h
drivers/gpu/drm/nouveau/nouveau_bo0039.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_bo5039.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_bo74c1.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_bo85b5.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_bo9039.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_bo90b5.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_boa0b5.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_chan.c
drivers/gpu/drm/nouveau/nouveau_chan.h
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_connector.h
drivers/gpu/drm/nouveau/nouveau_debugfs.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_dma.c
drivers/gpu/drm/nouveau/nouveau_dma.h
drivers/gpu/drm/nouveau/nouveau_dmem.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_fence.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nouveau_mem.c
drivers/gpu/drm/nouveau/nouveau_sgdma.c
drivers/gpu/drm/nouveau/nouveau_svm.c
drivers/gpu/drm/nouveau/nouveau_vmm.c
drivers/gpu/drm/nouveau/nv04_fbcon.c
drivers/gpu/drm/nouveau/nv04_fence.c
drivers/gpu/drm/nouveau/nv10_fence.c
drivers/gpu/drm/nouveau/nv17_fence.c
drivers/gpu/drm/nouveau/nv50_fbcon.c
drivers/gpu/drm/nouveau/nv50_fence.c
drivers/gpu/drm/nouveau/nv84_fence.c
drivers/gpu/drm/nouveau/nvc0_fbcon.c
drivers/gpu/drm/nouveau/nvc0_fence.c
drivers/gpu/drm/nouveau/nvif/client.c
drivers/gpu/drm/nouveau/nvif/device.c
drivers/gpu/drm/nouveau/nvif/disp.c
drivers/gpu/drm/nouveau/nvif/driver.c
drivers/gpu/drm/nouveau/nvif/mem.c
drivers/gpu/drm/nouveau/nvif/mmu.c
drivers/gpu/drm/nouveau/nvif/notify.c
drivers/gpu/drm/nouveau/nvif/object.c
drivers/gpu/drm/nouveau/nvif/user.c
drivers/gpu/drm/nouveau/nvif/vmm.c
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgv100.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c
drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c
drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h
drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c
drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c
drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c
drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c
drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c
drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c [moved from drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c with 79% similarity]
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/qxl/qxl_ttm.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/rcar-du/Kconfig
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/dc.h
drivers/gpu/drm/tegra/dsi.c
drivers/gpu/drm/tegra/gr2d.c
drivers/gpu/drm/tegra/gr2d.h
drivers/gpu/drm/tegra/gr3d.c
drivers/gpu/drm/tegra/hub.c
drivers/gpu/drm/tegra/plane.c
drivers/gpu/drm/tegra/plane.h
drivers/gpu/drm/tegra/sor.c
drivers/gpu/drm/ttm/ttm_agp_backend.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
drivers/gpu/host1x/bus.c
drivers/gpu/host1x/debug.c
drivers/gpu/host1x/dev.c
drivers/gpu/host1x/hw/debug_hw.c
drivers/gpu/host1x/job.c
drivers/gpu/host1x/mipi.c
drivers/hid/hid-alps.c
drivers/hid/hid-apple.c
drivers/hid/hid-ids.h
drivers/hid/hid-logitech-dj.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-magicmouse.c
drivers/hid/hid-quirks.c
drivers/hid/hid-steam.c
drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
drivers/hv/vmbus_drv.c
drivers/hwmon/acpi_power_meter.c
drivers/hwmon/amd_energy.c
drivers/hwmon/aspeed-pwm-tacho.c
drivers/hwmon/bt1-pvt.c
drivers/hwmon/drivetemp.c
drivers/hwmon/emc2103.c
drivers/hwmon/max6697.c
drivers/hwmon/nct6775.c
drivers/hwmon/pmbus/Kconfig
drivers/hwmon/pmbus/adm1275.c
drivers/hwmon/pmbus/pmbus_core.c
drivers/hwmon/scmi-hwmon.c
drivers/hwtracing/coresight/coresight-cti.c
drivers/hwtracing/coresight/coresight-etm4x.c
drivers/hwtracing/intel_th/core.c
drivers/hwtracing/intel_th/pci.c
drivers/hwtracing/intel_th/sth.c
drivers/i2c/Kconfig
drivers/i2c/algos/i2c-algo-pca.c
drivers/i2c/busses/i2c-designware-common.c
drivers/i2c/busses/i2c-designware-core.h
drivers/i2c/busses/i2c-designware-pcidrv.c
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-eg20t.c
drivers/i2c/busses/i2c-fsi.c
drivers/i2c/busses/i2c-mlxcpld.c
drivers/i2c/i2c-core-smbus.c
drivers/iio/accel/mma8452.c
drivers/iio/adc/ad7780.c
drivers/iio/adc/adi-axi-adc.c
drivers/iio/health/afe4403.c
drivers/iio/health/afe4404.c
drivers/iio/humidity/hdc100x.c
drivers/iio/humidity/hts221.h
drivers/iio/humidity/hts221_buffer.c
drivers/iio/industrialio-core.c
drivers/iio/magnetometer/ak8974.c
drivers/iio/pressure/ms5611_core.c
drivers/iio/pressure/zpa2326.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/counters.c
drivers/infiniband/core/mad.c
drivers/infiniband/core/rdma_core.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/hw/efa/efa_verbs.c
drivers/infiniband/hw/hfi1/debugfs.c
drivers/infiniband/hw/hfi1/init.c
drivers/infiniband/hw/hfi1/iowait.h
drivers/infiniband/hw/hfi1/ipoib.h
drivers/infiniband/hw/hfi1/ipoib_tx.c
drivers/infiniband/hw/hfi1/netdev_rx.c
drivers/infiniband/hw/hfi1/qp.c
drivers/infiniband/hw/hfi1/tid_rdma.c
drivers/infiniband/hw/hfi1/verbs_txreq.h
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_hw_v1.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/hns/hns_roce_mr.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/mlx5/qpc.c
drivers/infiniband/hw/qedr/qedr_iw_cm.c
drivers/infiniband/sw/rdmavt/qp.c
drivers/infiniband/sw/siw/siw_main.c
drivers/infiniband/sw/siw/siw_qp_rx.c
drivers/input/mouse/elan_i2c_core.c
drivers/input/mouse/synaptics.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/touchscreen/elants_i2c.c
drivers/iommu/Kconfig
drivers/iommu/amd/amd_iommu.h
drivers/iommu/amd/iommu.c
drivers/iommu/arm-smmu-qcom.c
drivers/iommu/hyperv-iommu.c
drivers/iommu/intel/dmar.c
drivers/iommu/intel/iommu.c
drivers/iommu/intel/irq_remapping.c
drivers/iommu/iommu.c
drivers/iommu/sun50i-iommu.c
drivers/irqchip/Kconfig
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic.c
drivers/irqchip/irq-riscv-intc.c
drivers/md/dm-ioctl.c
drivers/md/dm-rq.c
drivers/md/dm-writecache.c
drivers/md/dm-zoned-metadata.c
drivers/md/dm-zoned-reclaim.c
drivers/md/dm-zoned-target.c
drivers/md/dm.c
drivers/media/platform/omap3isp/isp.c
drivers/media/platform/omap3isp/ispvideo.c
drivers/message/fusion/mptbase.c
drivers/message/fusion/mptscsih.c
drivers/mfd/ioc3.c
drivers/misc/atmel-ssc.c
drivers/misc/habanalabs/command_submission.c
drivers/misc/habanalabs/debugfs.c
drivers/misc/habanalabs/gaudi/gaudi.c
drivers/misc/habanalabs/gaudi/gaudiP.h
drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
drivers/misc/mei/bus.c
drivers/misc/mei/hw-me-regs.h
drivers/misc/mei/hw-me.c
drivers/misc/mei/hw-me.h
drivers/misc/mei/pci-me.c
drivers/mmc/host/meson-gx-mmc.c
drivers/mmc/host/owl-mmc.c
drivers/mmc/host/sdhci-msm.c
drivers/mtd/mtdcore.c
drivers/mtd/nand/raw/nandsim.c
drivers/mtd/nand/raw/xway_nand.c
drivers/net/bareudp.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/microchip/ksz8795.c
drivers/net/dsa/microchip/ksz9477.c
drivers/net/dsa/microchip/ksz9477_i2c.c
drivers/net/dsa/sja1105/sja1105_vl.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
drivers/net/ethernet/chelsio/cxgb4/l2t.c
drivers/net/ethernet/chelsio/cxgb4/sched.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/smt.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
drivers/net/ethernet/freescale/enetc/enetc.c
drivers/net/ethernet/freescale/enetc/enetc_hw.h
drivers/net/ethernet/freescale/enetc/enetc_pf.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
drivers/net/ethernet/mellanox/mlx5/core/en/port.c
drivers/net/ethernet/mellanox/mlx5/core/en/port.h
drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlxsw/pci.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
drivers/net/ethernet/neterion/vxge/vxge-config.h
drivers/net/ethernet/netronome/nfp/flower/main.c
drivers/net/ethernet/netronome/nfp/flower/main.h
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/pensando/ionic/ionic_lif.h
drivers/net/ethernet/qlogic/qed/qed.h
drivers/net/ethernet/qlogic/qed/qed_cxt.c
drivers/net/ethernet/qlogic/qed/qed_debug.c
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_iwarp.c
drivers/net/ethernet/qlogic/qed/qed_mcp.c
drivers/net/ethernet/qlogic/qed/qed_mcp.h
drivers/net/ethernet/qlogic/qed/qed_roce.c
drivers/net/ethernet/qlogic/qed/qed_vf.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/qlogic/qede/qede_ptp.c
drivers/net/ethernet/qlogic/qede/qede_ptp.h
drivers/net/ethernet/qlogic/qede/qede_rdma.c
drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/socionext/netsec.c
drivers/net/geneve.c
drivers/net/ipa/gsi.c
drivers/net/ipa/ipa_cmd.c
drivers/net/ipa/ipa_cmd.h
drivers/net/ipa/ipa_data-sdm845.c
drivers/net/ipa/ipa_endpoint.c
drivers/net/ipa/ipa_gsi.c
drivers/net/ipa/ipa_gsi.h
drivers/net/ipa/ipa_qmi_msg.c
drivers/net/macsec.c
drivers/net/macvlan.c
drivers/net/phy/Kconfig
drivers/net/phy/mscc/mscc_macsec.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/phy/phylink.c
drivers/net/phy/smsc.c
drivers/net/tun.c
drivers/net/usb/ax88179_178a.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/smsc95xx.c
drivers/net/vxlan.c
drivers/net/wan/lapbether.c
drivers/net/wireguard/device.c
drivers/net/wireguard/device.h
drivers/net/wireguard/netlink.c
drivers/net/wireguard/noise.c
drivers/net/wireguard/queueing.h
drivers/net/wireguard/receive.c
drivers/net/wireguard/socket.c
drivers/net/wireless/ath/wil6210/txrx.c
drivers/nvdimm/security.c
drivers/nvme/host/core.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/loop.c
drivers/of/of_mdio.c
drivers/opp/of.c
drivers/pci/controller/vmd.c
drivers/perf/arm-cci.c
drivers/perf/arm-ccn.c
drivers/perf/arm_dsu_pmu.c
drivers/perf/arm_smmuv3_pmu.c
drivers/perf/arm_spe_pmu.c
drivers/perf/fsl_imx8_ddr_perf.c
drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
drivers/perf/qcom_l2_pmu.c
drivers/perf/qcom_l3_pmu.c
drivers/perf/thunderx2_pmu.c
drivers/perf/xgene_pmu.c
drivers/phy/allwinner/phy-sun4i-usb.c
drivers/phy/intel/phy-intel-combo.c
drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
drivers/phy/ti/phy-am654-serdes.c
drivers/phy/ti/phy-j721e-wiz.c
drivers/pinctrl/intel/pinctrl-baytrail.c
drivers/pinctrl/pinctrl-amd.h
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/intel_speed_select_if/isst_if_common.h
drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c
drivers/platform/x86/thinkpad_acpi.c
drivers/regulator/Kconfig
drivers/regulator/Makefile
drivers/regulator/da903x-regulator.c [moved from drivers/regulator/da903x.c with 100% similarity]
drivers/regulator/da9063-regulator.c
drivers/regulator/helpers.c
drivers/regulator/pfuze100-regulator.c
drivers/regulator/qcom_smd-regulator.c
drivers/s390/cio/vfio_ccw_chp.c
drivers/s390/net/qeth_core_main.c
drivers/s390/scsi/zfcp_erp.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/libiscsi.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/mpt3sas/mpt3sas_ctl.c
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_nvme.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/scsi_dh.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/scsi_transport_spi.c
drivers/soc/amlogic/meson-gx-socinfo.c
drivers/soc/imx/soc-imx.c
drivers/soc/imx/soc-imx8m.c
drivers/soc/ti/omap_prm.c
drivers/soundwire/intel.c
drivers/spi/spi-fsl-dspi.c
drivers/spi/spi-mt65xx.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-rspi.c
drivers/spi/spi-sprd-adi.c
drivers/spi/spi-stm32-qspi.c
drivers/spi/spi-sun6i.c
drivers/spi/spidev.c
drivers/staging/comedi/drivers/addi_apci_1500.c
drivers/staging/rtl8723bs/core/rtw_wlan_util.c
drivers/staging/wfx/hif_tx.c
drivers/staging/wfx/hif_tx.h
drivers/staging/wfx/queue.c
drivers/staging/wfx/scan.c
drivers/thermal/cpufreq_cooling.c
drivers/thermal/imx_thermal.c
drivers/thermal/intel/int340x_thermal/int3400_thermal.c
drivers/thermal/intel/int340x_thermal/int3403_thermal.c
drivers/thermal/mtk_thermal.c
drivers/thermal/qcom/tsens.c
drivers/thermal/rcar_gen3_thermal.c
drivers/thermal/sprd_thermal.c
drivers/thunderbolt/tunnel.c
drivers/tty/serial/cpm_uart/cpm_uart_core.c
drivers/tty/serial/kgdb_nmi.c
drivers/tty/serial/kgdboc.c
drivers/tty/serial/mxs-auart.c
drivers/tty/serial/serial_core.c
drivers/tty/serial/sh-sci.c
drivers/tty/serial/xilinx_uartps.c
drivers/uio/uio_pdrv_genirq.c
drivers/usb/c67x00/c67x00-sched.c
drivers/usb/cdns3/ep0.c
drivers/usb/cdns3/trace.h
drivers/usb/chipidea/core.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/gadget.c
drivers/usb/dwc2/platform.c
drivers/usb/dwc3/dwc3-exynos.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/early/ehci-dbgp.c
drivers/usb/gadget/function/f_uac1_legacy.c
drivers/usb/gadget/udc/atmel_usba_udc.c
drivers/usb/gadget/udc/gr_udc.c
drivers/usb/gadget/udc/mv_udc_core.c
drivers/usb/gadget/usbstring.c
drivers/usb/host/ehci-exynos.c
drivers/usb/host/ehci-pci.c
drivers/usb/host/ohci-sm501.c
drivers/usb/host/xhci-mtk.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/usbtest.c
drivers/usb/phy/phy-tegra-usb.c
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/renesas_usbhs/fifo.h
drivers/usb/serial/ch341.c
drivers/usb/serial/cypress_m8.c
drivers/usb/serial/cypress_m8.h
drivers/usb/serial/iuu_phoenix.c
drivers/usb/serial/option.c
drivers/usb/typec/mux/intel_pmc_mux.c
drivers/usb/typec/tcpm/tcpci_rt1711h.c
drivers/vdpa/vdpa.c
drivers/vfio/pci/vfio_pci.c
drivers/vfio/pci/vfio_pci_config.c
drivers/vhost/test.c
drivers/vhost/test.h
drivers/vhost/vdpa.c
drivers/video/fbdev/core/fbcon.c
drivers/video/fbdev/uvesafb.c
drivers/virt/vboxguest/vboxguest_core.c
drivers/virt/vboxguest/vboxguest_core.h
drivers/virt/vboxguest/vboxguest_linux.c
drivers/virt/vboxguest/vmmdev.h
drivers/virtio/virtio_mem.c
drivers/xen/xenbus/xenbus_client.c
fs/afs/cell.c
fs/afs/fs_operation.c
fs/afs/internal.h
fs/afs/write.c
fs/autofs/waitq.c
fs/btrfs/block-group.c
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/discard.c
fs/btrfs/disk-io.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/ref-verify.c
fs/btrfs/space-info.c
fs/btrfs/super.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.h
fs/cachefiles/rdwr.c
fs/cifs/cifs_debug.c
fs/cifs/cifsfs.h
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/ioctl.c
fs/cifs/misc.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/transport.c
fs/efivarfs/file.c
fs/erofs/zdata.h
fs/exfat/dir.c
fs/exfat/exfat_fs.h
fs/exfat/file.c
fs/exfat/namei.c
fs/exfat/super.c
fs/fuse/file.c
fs/fuse/inode.c
fs/gfs2/aops.c
fs/gfs2/file.c
fs/gfs2/glock.c
fs/gfs2/glops.c
fs/gfs2/incore.h
fs/gfs2/inode.c
fs/gfs2/log.c
fs/gfs2/log.h
fs/gfs2/main.c
fs/gfs2/ops_fstype.c
fs/gfs2/recovery.c
fs/gfs2/super.c
fs/io_uring.c
fs/namespace.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/nfs4namespace.c
fs/nfs/nfs4proc.c
fs/nfsd/nfs4state.c
fs/nfsd/nfsctl.c
fs/nfsd/nfsd.h
fs/nfsd/vfs.c
fs/ocfs2/dlmglue.c
fs/ocfs2/ocfs2.h
fs/ocfs2/ocfs2_fs.h
fs/ocfs2/suballoc.c
fs/overlayfs/copy_up.c
fs/overlayfs/export.c
fs/overlayfs/file.c
fs/overlayfs/namei.c
fs/overlayfs/overlayfs.h
fs/overlayfs/super.c
fs/proc/proc_sysctl.c
fs/read_write.c
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_log_priv.h
include/asm-generic/cacheflush.h
include/asm-generic/mmiowb.h
include/crypto/if_alg.h
include/drm/drm_modeset_helper_vtables.h
include/drm/i915_pciids.h
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_bo_driver.h
include/drm/ttm/ttm_tt.h
include/linux/atomic-fallback.h
include/linux/bits.h
include/linux/blkdev.h
include/linux/bpf-netns.h
include/linux/bpf.h
include/linux/btf.h
include/linux/cgroup-defs.h
include/linux/cgroup.h
include/linux/compiler-clang.h
include/linux/compiler-gcc.h
include/linux/compiler.h
include/linux/compiler_attributes.h
include/linux/compiler_types.h
include/linux/device.h
include/linux/dma-buf.h
include/linux/dma-direct.h
include/linux/dma-mapping.h
include/linux/efi.h
include/linux/filter.h
include/linux/fs.h
include/linux/fs_context.h
include/linux/host1x.h
include/linux/ieee80211.h
include/linux/if_vlan.h
include/linux/input/elan-i2c-ids.h
include/linux/intel-iommu.h
include/linux/irq_work.h
include/linux/kallsyms.h
include/linux/kgdb.h
include/linux/libata.h
include/linux/lsm_hook_defs.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mmzone.h
include/linux/mod_devicetable.h
include/linux/netdevice.h
include/linux/netfilter_ipv4/ip_tables.h
include/linux/netfilter_ipv6/ip6_tables.h
include/linux/pci.h
include/linux/phy.h
include/linux/qed/qed_chain.h
include/linux/scatterlist.h
include/linux/sched.h
include/linux/sched/jobctl.h
include/linux/serial_core.h
include/linux/skmsg.h
include/linux/smp.h
include/linux/smp_types.h [new file with mode: 0644]
include/linux/swap.h
include/linux/syscalls.h
include/linux/task_work.h
include/linux/timekeeping.h
include/linux/tpm_eventlog.h
include/linux/vmalloc.h
include/net/dst.h
include/net/flow_dissector.h
include/net/flow_offload.h
include/net/genetlink.h
include/net/gue.h
include/net/inet_ecn.h
include/net/ip_tunnels.h
include/net/netns/bpf.h
include/net/pkt_sched.h
include/net/sctp/constants.h
include/net/sock.h
include/net/xfrm.h
include/net/xsk_buff_pool.h
include/sound/compress_driver.h
include/sound/dmaengine_pcm.h
include/sound/soc.h
include/trace/events/rxrpc.h
include/uapi/linux/bpf.h
include/uapi/linux/fb.h
include/uapi/linux/idxd.h
include/uapi/linux/input-event-codes.h
include/uapi/linux/io_uring.h
include/uapi/linux/mrp_bridge.h
include/uapi/linux/rds.h
include/uapi/linux/spi/spidev.h
include/uapi/linux/vboxguest.h
include/uapi/linux/vfio.h
init/Kconfig
kernel/bpf/btf.c
kernel/bpf/cgroup.c
kernel/bpf/devmap.c
kernel/bpf/net_namespace.c
kernel/bpf/reuseport_array.c
kernel/bpf/ringbuf.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup.c
kernel/debug/debug_core.c
kernel/debug/gdbstub.c
kernel/debug/kdb/kdb_io.c
kernel/dma/Kconfig
kernel/dma/direct.c
kernel/dma/mapping.c
kernel/dma/pool.c
kernel/dma/remap.c
kernel/fork.c
kernel/irq/manage.c
kernel/kallsyms.c
kernel/kexec_file.c
kernel/kprobes.c
kernel/module.c
kernel/nsproxy.c
kernel/padata.c
kernel/printk/printk.c
kernel/rcu/rcuperf.c
kernel/rcu/tree.c
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/idle.c
kernel/sched/sched.h
kernel/signal.c
kernel/smp.c
kernel/task_work.c
kernel/time/timer.c
kernel/trace/bpf_trace.c
kernel/trace/ring_buffer.c
kernel/trace/trace_boot.c
kernel/trace/trace_events_trigger.c
lib/Kconfig.kasan
lib/Kconfig.kgdb
lib/packing.c
lib/test_hmm.c
mm/cma.c
mm/compaction.c
mm/debug_vm_pgtable.c
mm/filemap.c
mm/hugetlb.c
mm/memcontrol.c
mm/memory.c
mm/memory_hotplug.c
mm/migrate.c
mm/mremap.c
mm/nommu.c
mm/page_alloc.c
mm/slab.h
mm/slab_common.c
mm/slub.c
mm/swap.c
mm/swap_state.c
mm/vmalloc.c
mm/vmscan.c
mm/workingset.c
net/8021q/vlan_dev.c
net/9p/mod.c
net/bpf/test_run.c
net/bpfilter/bpfilter_kern.c
net/bridge/br_mrp.c
net/bridge/br_multicast.c
net/bridge/br_private.h
net/bridge/br_private_mrp.h
net/bridge/netfilter/nft_meta_bridge.c
net/bridge/netfilter/nft_reject_bridge.c
net/core/dev.c
net/core/dev_addr_lists.c
net/core/drop_monitor.c
net/core/filter.c
net/core/flow_dissector.c
net/core/flow_offload.c
net/core/skmsg.c
net/core/sock.c
net/core/sock_map.c
net/core/sysctl_net_core.c
net/core/xdp.c
net/dsa/tag_edsa.c
net/ethtool/cabletest.c
net/ethtool/common.c
net/ethtool/ioctl.c
net/ethtool/linkstate.c
net/ethtool/netlink.c
net/hsr/hsr_device.c
net/hsr/hsr_device.h
net/hsr/hsr_main.c
net/hsr/hsr_netlink.c
net/ipv4/Kconfig
net/ipv4/esp4_offload.c
net/ipv4/fib_semantics.c
net/ipv4/fou.c
net/ipv4/icmp.c
net/ipv4/ip_output.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_tunnel_core.c
net/ipv4/ip_vti.c
net/ipv4/ipip.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_SYNPROXY.c
net/ipv4/netfilter/iptable_filter.c
net/ipv4/netfilter/iptable_mangle.c
net/ipv4/netfilter/iptable_nat.c
net/ipv4/netfilter/iptable_raw.c
net/ipv4/netfilter/iptable_security.c
net/ipv4/netfilter/nf_flow_table_ipv4.c
net/ipv4/netfilter/nft_dup_ipv4.c
net/ipv4/netfilter/nft_fib_ipv4.c
net/ipv4/netfilter/nft_reject_ipv4.c
net/ipv4/ping.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_cubic.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv6/Kconfig
net/ipv6/esp6_offload.c
net/ipv6/fou6.c
net/ipv6/icmp.c
net/ipv6/ila/ila_main.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6t_SYNPROXY.c
net/ipv6/netfilter/ip6table_filter.c
net/ipv6/netfilter/ip6table_mangle.c
net/ipv6/netfilter/ip6table_nat.c
net/ipv6/netfilter/ip6table_raw.c
net/ipv6/netfilter/ip6table_security.c
net/ipv6/netfilter/nf_flow_table_ipv6.c
net/ipv6/netfilter/nft_dup_ipv6.c
net/ipv6/netfilter/nft_fib_ipv6.c
net/ipv6/netfilter/nft_reject_ipv6.c
net/ipv6/route.c
net/ipv6/sit.c
net/l2tp/l2tp_core.c
net/llc/af_llc.c
net/mac80211/mesh_hwmp.c
net/mac80211/rx.c
net/mac80211/status.c
net/mac80211/tx.c
net/mptcp/options.c
net/mptcp/protocol.h
net/mptcp/subflow.c
net/netfilter/ipset/ip_set_bitmap_ip.c
net/netfilter/ipset/ip_set_bitmap_ipmac.c
net/netfilter/ipset/ip_set_bitmap_port.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_hash_gen.h
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_dup_netdev.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_flow_table_inet.c
net/netfilter/nf_flow_table_offload.c
net/netfilter/nf_synproxy_core.c
net/netfilter/nf_tables_offload.c
net/netfilter/nfnetlink.c
net/netfilter/nft_compat.c
net/netfilter/nft_connlimit.c
net/netfilter/nft_counter.c
net/netfilter/nft_ct.c
net/netfilter/nft_dup_netdev.c
net/netfilter/nft_fib_inet.c
net/netfilter/nft_fib_netdev.c
net/netfilter/nft_flow_offload.c
net/netfilter/nft_hash.c
net/netfilter/nft_limit.c
net/netfilter/nft_log.c
net/netfilter/nft_masq.c
net/netfilter/nft_nat.c
net/netfilter/nft_numgen.c
net/netfilter/nft_objref.c
net/netfilter/nft_osf.c
net/netfilter/nft_queue.c
net/netfilter/nft_quota.c
net/netfilter/nft_redir.c
net/netfilter/nft_reject.c
net/netfilter/nft_reject_inet.c
net/netfilter/nft_synproxy.c
net/netfilter/nft_tunnel.c
net/netfilter/xt_nat.c
net/netlink/genetlink.c
net/openvswitch/actions.c
net/qrtr/qrtr.c
net/rds/connection.c
net/rds/rds.h
net/rds/send.c
net/rds/transport.c
net/rxrpc/call_accept.c
net/rxrpc/call_event.c
net/rxrpc/input.c
net/sched/act_connmark.c
net/sched/act_csum.c
net/sched/act_ct.c
net/sched/act_ctinfo.c
net/sched/act_gate.c
net/sched/act_mpls.c
net/sched/act_skbedit.c
net/sched/cls_api.c
net/sched/cls_flow.c
net/sched/cls_flower.c
net/sched/em_ipset.c
net/sched/em_ipt.c
net/sched/em_meta.c
net/sched/sch_atm.c
net/sched/sch_cake.c
net/sched/sch_dsmark.c
net/sched/sch_fq.c
net/sched/sch_fq_codel.c
net/sched/sch_hhf.c
net/sched/sch_teql.c
net/sctp/associola.c
net/sctp/bind_addr.c
net/sctp/protocol.c
net/smc/smc_clc.c
net/smc/smc_clc.h
net/smc/smc_core.c
net/smc/smc_core.h
net/smc/smc_ib.c
net/smc/smc_ib.h
net/smc/smc_ism.c
net/smc/smc_ism.h
net/smc/smc_llc.c
net/smc/smc_pnet.c
net/smc/smc_wr.c
net/sunrpc/rpc_pipe.c
net/sunrpc/svcsock.c
net/sunrpc/xdr.c
net/sunrpc/xprtrdma/frwr_ops.c
net/sunrpc/xprtrdma/rpc_rdma.c
net/sunrpc/xprtrdma/transport.c
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtrdma/xprt_rdma.h
net/tipc/link.c
net/wireless/nl80211.c
net/xdp/xsk_buff_pool.c
net/xfrm/Kconfig
net/xfrm/xfrm_device.c
net/xfrm/xfrm_interface.c
net/xfrm/xfrm_output.c
samples/bpf/xdp_monitor_user.c
samples/bpf/xdp_redirect_cpu_user.c
samples/bpf/xdp_rxq_info_user.c
samples/vfs/test-statx.c
scripts/Makefile.extrawarn
scripts/Makefile.lib
scripts/atomic/gen-atomic-fallback.sh
scripts/dtc/checks.c
scripts/dtc/dtc.h
scripts/dtc/flattree.c
scripts/dtc/libfdt/fdt_rw.c
scripts/dtc/libfdt/fdt_sw.c
scripts/dtc/libfdt/libfdt.h
scripts/dtc/treesource.c
scripts/dtc/version_gen.h
scripts/dtc/yamltree.c
scripts/gcc-plugins/Kconfig
scripts/kconfig/qconf.cc
scripts/kconfig/qconf.h
security/integrity/iint.c
security/integrity/ima/ima.h
security/integrity/ima/ima_crypto.c
security/security.c
sound/core/compress_offload.c
sound/drivers/opl3/opl3_synth.c
sound/hda/intel-dsp-config.c
sound/pci/hda/hda_auto_parser.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/amd/raven/acp3x-pcm-dma.c
sound/soc/amd/renoir/Makefile
sound/soc/codecs/hdac_hda.c
sound/soc/codecs/max98390.c
sound/soc/codecs/rt1015.c
sound/soc/codecs/rt1015.h
sound/soc/codecs/rt5682.c
sound/soc/fsl/fsl_asrc_common.h
sound/soc/fsl/fsl_asrc_dma.c
sound/soc/fsl/fsl_mqs.c
sound/soc/fsl/fsl_ssi.c
sound/soc/intel/boards/Kconfig
sound/soc/qcom/common.c
sound/soc/qcom/qdsp6/q6afe.c
sound/soc/qcom/qdsp6/q6afe.h
sound/soc/qcom/qdsp6/q6asm.c
sound/soc/rockchip/rockchip_pdm.c
sound/soc/soc-core.c
sound/soc/soc-devres.c
sound/soc/soc-generic-dmaengine-pcm.c
sound/soc/soc-pcm.c
sound/soc/soc-topology.c
sound/soc/sof/intel/Kconfig
sound/soc/sof/intel/hda-stream.c
sound/soc/sof/sof-pci-dev.c
sound/usb/card.h
sound/usb/endpoint.c
sound/usb/format.c
sound/usb/line6/capture.c
sound/usb/line6/driver.c
sound/usb/line6/playback.c
sound/usb/midi.c
sound/usb/mixer.c
sound/usb/mixer.h
sound/usb/mixer_quirks.c
sound/usb/pcm.c
sound/usb/quirks-table.h
sound/usb/quirks.c
tools/arch/x86/include/uapi/asm/kvm.h
tools/arch/x86/lib/memcpy_64.S
tools/bpf/bpftool/Documentation/bpftool-map.rst
tools/bpf/bpftool/map.c
tools/include/linux/bits.h
tools/include/uapi/linux/bpf.h
tools/lib/bpf/bpf.h
tools/lib/bpf/hashmap.h
tools/lib/bpf/libbpf.c
tools/lib/subcmd/parse-options.c
tools/lib/traceevent/kbuffer-parse.c
tools/lib/traceevent/kbuffer.h
tools/objtool/arch.h
tools/objtool/arch/x86/decode.c
tools/objtool/arch/x86/include/arch_elf.h [new file with mode: 0644]
tools/objtool/check.c
tools/objtool/elf.c
tools/objtool/elf.h
tools/objtool/orc_gen.c
tools/perf/arch/x86/util/intel-pt.c
tools/perf/builtin-record.c
tools/perf/builtin-script.c
tools/perf/scripts/python/export-to-postgresql.py
tools/perf/scripts/python/exported-sql-viewer.py
tools/perf/scripts/python/flamegraph.py
tools/perf/ui/browsers/hists.c
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/hashmap.h
tools/perf/util/intel-pt.c
tools/spi/spidev_test.c
tools/testing/kunit/kunit.py
tools/testing/kunit/kunit_config.py
tools/testing/kunit/kunit_parser.py
tools/testing/kunit/kunit_tool_test.py
tools/testing/kunit/test_data/test_insufficient_memory.log [new file with mode: 0644]
tools/testing/selftests/arm64/signal/Makefile
tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
tools/testing/selftests/bpf/prog_tests/flow_dissector.c
tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c
tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
tools/testing/selftests/bpf/progs/bpf_cubic.c
tools/testing/selftests/bpf/progs/bpf_iter_netlink.c
tools/testing/selftests/bpf/progs/fentry_test.c
tools/testing/selftests/bpf/progs/fexit_test.c
tools/testing/selftests/bpf/progs/sockopt_sk.c
tools/testing/selftests/bpf/progs/test_sockmap_kern.h
tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c
tools/testing/selftests/bpf/test_maps.c
tools/testing/selftests/bpf/test_sockmap.c
tools/testing/selftests/kmod/kmod.sh
tools/testing/selftests/kselftest.h
tools/testing/selftests/net/fib_nexthops.sh
tools/testing/selftests/net/so_txtime.c
tools/testing/selftests/netfilter/Makefile
tools/testing/selftests/netfilter/nft_conntrack_helper.sh [new file with mode: 0755]
tools/testing/selftests/pidfd/pidfd.h
tools/testing/selftests/pidfd/pidfd_getfd_test.c
tools/testing/selftests/pidfd/pidfd_setns_test.c
tools/testing/selftests/powerpc/nx-gzip/gunz_test.c
tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c
tools/testing/selftests/powerpc/pmu/ebb/Makefile
tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
tools/testing/selftests/tpm2/test_smoke.sh
tools/testing/selftests/tpm2/test_space.sh
tools/testing/selftests/tpm2/tpm2.py
tools/testing/selftests/tpm2/tpm2_tests.py
tools/testing/selftests/wireguard/netns.sh
tools/testing/selftests/x86/Makefile
tools/testing/selftests/x86/helpers.h [new file with mode: 0644]
tools/testing/selftests/x86/single_step_syscall.c
tools/testing/selftests/x86/syscall_arg_fault.c
tools/testing/selftests/x86/syscall_nt.c
tools/testing/selftests/x86/test_vsyscall.c
tools/testing/selftests/x86/unwind_vdso.c
tools/virtio/linux/kernel.h
tools/virtio/linux/virtio.h
tools/virtio/virtio_test.c
tools/virtio/vringh_test.c
virt/kvm/kvm_main.c

index 87b9dd8..d5f4804 100644 (file)
@@ -143,6 +143,9 @@ x509.genkey
 /allrandom.config
 /allyes.config
 
+# Kconfig savedefconfig output
+/defconfig
+
 # Kdevelop4
 *.kdev4
 
index c69d9c7..6da12df 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -90,11 +90,16 @@ Frank Rowand <frowand.list@gmail.com> <frank.rowand@sonymobile.com>
 Frank Zago <fzago@systemfabricworks.com>
 Gao Xiang <xiang@kernel.org> <gaoxiang25@huawei.com>
 Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com>
+Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
+Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
+Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@linux.vnet.ibm.com>
 Greg Kroah-Hartman <greg@echidna.(none)>
 Greg Kroah-Hartman <gregkh@suse.de>
 Greg Kroah-Hartman <greg@kroah.com>
 Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
 Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
+Heiko Carstens <hca@linux.ibm.com> <h.carstens@de.ibm.com>
+Heiko Carstens <hca@linux.ibm.com> <heiko.carstens@de.ibm.com>
 Henk Vergonet <Henk.Vergonet@gmail.com>
 Henrik Kretzschmar <henne@nachtwindheim.de>
 Henrik Rydberg <rydberg@bitmath.org>
index 1e6c28b..f307506 100644 (file)
@@ -56,11 +56,6 @@ Description: The /dev/kmsg character device node provides userspace access
                  seek after the last record available at the time
                  the last SYSLOG_ACTION_CLEAR was issued.
 
-               Due to the record nature of this interface with a "read all"
-               behavior and the specific positions each seek operation sets,
-               SEEK_CUR is not supported, returning -ESPIPE (invalid seek) to
-               errno whenever requested.
-
                The output format consists of a prefix carrying the syslog
                prefix including priority and facility, the 64 bit message
                sequence number and the monotonic timestamp in microseconds,
index 151c595..f58cfb0 100644 (file)
@@ -1,6 +1,6 @@
 What:          /sys/bus/platform/devices/ci_hdrc.0/inputs/a_bus_req
 Date:          Feb 2014
-Contact:       Li Jun <b47624@freescale.com>
+Contact:       Li Jun <jun.li@nxp.com>
 Description:
                Can be set and read.
                Set a_bus_req(A-device bus request) input to be 1 if
@@ -17,7 +17,7 @@ Description:
 
 What:          /sys/bus/platform/devices/ci_hdrc.0/inputs/a_bus_drop
 Date:          Feb 2014
-Contact:       Li Jun <b47624@freescale.com>
+Contact:       Li Jun <jun.li@nxp.com>
 Description:
                Can be set and read
                The a_bus_drop(A-device bus drop) input is 1 when the
@@ -32,7 +32,7 @@ Description:
 
 What:          /sys/bus/platform/devices/ci_hdrc.0/inputs/b_bus_req
 Date:          Feb 2014
-Contact:       Li Jun <b47624@freescale.com>
+Contact:       Li Jun <jun.li@nxp.com>
 Description:
                Can be set and read.
                The b_bus_req(B-device bus request) input is 1 during the time
@@ -47,7 +47,7 @@ Description:
 
 What:          /sys/bus/platform/devices/ci_hdrc.0/inputs/a_clr_err
 Date:          Feb 2014
-Contact:       Li Jun <b47624@freescale.com>
+Contact:       Li Jun <jun.li@nxp.com>
 Description:
                Only can be set.
                The a_clr_err(A-device Vbus error clear) input is used to clear
index 5fb5269..5aad534 100644 (file)
@@ -258,7 +258,7 @@ Configuring the kernel
 Compiling the kernel
 --------------------
 
- - Make sure you have at least gcc 4.6 available.
+ - Make sure you have at least gcc 4.9 available.
    For more information, refer to :ref:`Documentation/process/changes.rst <changes>`.
 
    Please note that you can still run a.out user programs with this kernel.
index ce3e05e..d09471a 100644 (file)
@@ -1356,8 +1356,8 @@ PAGE_SIZE multiple when read back.
 
          thp_fault_alloc
                Number of transparent hugepages which were allocated to satisfy
-               a page fault, including COW faults. This counter is not present
-               when CONFIG_TRANSPARENT_HUGEPAGE is not set.
+               a page fault. This counter is not present when CONFIG_TRANSPARENT_HUGEPAGE
+                is not set.
 
          thp_collapse_alloc
                Number of transparent hugepages which were allocated to allow
index ec62fcc..6cf8adc 100644 (file)
@@ -11,6 +11,7 @@ Device Mapper
     dm-clone
     dm-crypt
     dm-dust
+    dm-ebs
     dm-flakey
     dm-init
     dm-integrity
index 6a233e4..b2acd0d 100644 (file)
@@ -305,8 +305,7 @@ monitor how successfully the system is providing huge pages for use.
 
 thp_fault_alloc
        is incremented every time a huge page is successfully
-       allocated to handle a page fault. This applies to both the
-       first time a page is faulted and for COW faults.
+       allocated to handle a page fault.
 
 thp_collapse_alloc
        is incremented by khugepaged when it has found
index 314fa5b..f28853f 100644 (file)
@@ -171,6 +171,7 @@ infrastructure:
 
 
   3) ID_AA64PFR1_EL1 - Processor Feature Register 1
+
      +------------------------------+---------+---------+
      | Name                         |  bits   | visible |
      +------------------------------+---------+---------+
@@ -181,6 +182,7 @@ infrastructure:
 
 
   4) MIDR_EL1 - Main ID Register
+
      +------------------------------+---------+---------+
      | Name                         |  bits   | visible |
      +------------------------------+---------+---------+
index 936cf2a..3f7c3a7 100644 (file)
@@ -147,6 +147,14 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | Qualcomm Tech. | Falkor v{1,2}   | E1041           | QCOM_FALKOR_ERRATUM_1041    |
 +----------------+-----------------+-----------------+-----------------------------+
+| Qualcomm Tech. | Kryo4xx Gold    | N/A             | ARM64_ERRATUM_1463225       |
++----------------+-----------------+-----------------+-----------------------------+
+| Qualcomm Tech. | Kryo4xx Gold    | N/A             | ARM64_ERRATUM_1418040       |
++----------------+-----------------+-----------------+-----------------------------+
+| Qualcomm Tech. | Kryo4xx Silver  | N/A             | ARM64_ERRATUM_1530923       |
++----------------+-----------------+-----------------+-----------------------------+
+| Qualcomm Tech. | Kryo4xx Silver  | N/A             | ARM64_ERRATUM_1024718       |
++----------------+-----------------+-----------------+-----------------------------+
 +----------------+-----------------+-----------------+-----------------------------+
 | Fujitsu        | A64FX           | E#010001        | FUJITSU_ERRATUM_010001      |
 +----------------+-----------------+-----------------+-----------------------------+
index 0d237d4..19d4d15 100644 (file)
@@ -492,13 +492,6 @@ set max_budget to higher values than those to which BFQ would have set
 it with auto-tuning. An alternative way to achieve this goal is to
 just increase the value of timeout_sync, leaving max_budget equal to 0.
 
-weights
--------
-
-Read-only parameter, used to show the weights of the currently active
-BFQ queues.
-
-
 4. Group scheduling with BFQ
 ============================
 
@@ -566,7 +559,7 @@ Parameters to set
 For each group, there is only the following parameter to set.
 
 weight (namely blkio.bfq.weight or io.bfq-weight): the weight of the
-group inside its parent. Available values: 1..10000 (default 100). The
+group inside its parent. Available values: 1..1000 (default 100). The
 linear mapping between ioprio and weights, described at the beginning
 of the tunable section, is still valid, but all weights higher than
 IOPRIO_BE_NR*10 are mapped to ioprio 0.
index c47d974..172f957 100644 (file)
@@ -86,6 +86,20 @@ then the next program in the chain (A) will see those changes,
 *not* the original input ``setsockopt`` arguments. The potentially
 modified values will be then passed down to the kernel.
 
+Large optval
+============
+When the ``optval`` is greater than the ``PAGE_SIZE``, the BPF program
+can access only the first ``PAGE_SIZE`` of that data. So it has to options:
+
+* Set ``optlen`` to zero, which indicates that the kernel should
+  use the original buffer from the userspace. Any modifications
+  done by the BPF program to the ``optval`` are ignored.
+* Set ``optlen`` to the value less than ``PAGE_SIZE``, which
+  indicates that the kernel should use BPF's trimmed ``optval``.
+
+When the BPF program returns with the ``optlen`` greater than
+``PAGE_SIZE``, the userspace will receive ``EFAULT`` errno.
+
 Example
 =======
 
index 2d8d2fe..f416204 100644 (file)
@@ -206,6 +206,14 @@ others should not be larger than the returned value.
 
 ::
 
+       bool
+       dma_need_sync(struct device *dev, dma_addr_t dma_addr);
+
+Returns %true if dma_sync_single_for_{device,cpu} calls are required to
+transfer memory ownership.  Returns %false if those calls can be skipped.
+
+::
+
        unsigned long
        dma_get_merge_boundary(struct device *dev);
 
index 6068266..7ca8c7b 100644 (file)
@@ -33,7 +33,7 @@ all combinations of get*(), pin*(), FOLL_LONGTERM, and more. Also, the
 pin_user_pages*() APIs are clearly distinct from the get_user_pages*() APIs, so
 that's a natural dividing line, and a good point to make separate wrapper calls.
 In other words, use pin_user_pages*() for DMA-pinned pages, and
-get_user_pages*() for other cases. There are four cases described later on in
+get_user_pages*() for other cases. There are five cases described later on in
 this document, to further clarify that concept.
 
 FOLL_PIN and FOLL_GET are mutually exclusive for a given gup call. However,
index ce4bbd9..b38379f 100644 (file)
@@ -114,12 +114,6 @@ the below options are available:
   To dynamically limit for which functions to generate reports, see the
   `DebugFS interface`_ blacklist/whitelist feature.
 
-  For ``__always_inline`` functions, replace ``__always_inline`` with
-  ``__no_kcsan_or_inline`` (which implies ``__always_inline``)::
-
-    static __no_kcsan_or_inline void foo(void) {
-        ...
-
 * To disable data race detection for a particular compilation unit, add to the
   ``Makefile``::
 
index ea55b24..1628862 100644 (file)
@@ -61,3 +61,43 @@ test, or an end-to-end test.
   kernel by installing a production configuration of the kernel on production
   hardware with a production userspace and then trying to exercise some behavior
   that depends on interactions between the hardware, the kernel, and userspace.
+
+KUnit isn't working, what should I do?
+======================================
+
+Unfortunately, there are a number of things which can break, but here are some
+things to try.
+
+1. Try running ``./tools/testing/kunit/kunit.py run`` with the ``--raw_output``
+   parameter. This might show details or error messages hidden by the kunit_tool
+   parser.
+2. Instead of running ``kunit.py run``, try running ``kunit.py config``,
+   ``kunit.py build``, and ``kunit.py exec`` independently. This can help track
+   down where an issue is occurring. (If you think the parser is at fault, you
+   can run it manually against stdin or a file with ``kunit.py parse``.)
+3. Running the UML kernel directly can often reveal issues or error messages
+   kunit_tool ignores. This should be as simple as running ``./vmlinux`` after
+   building the UML kernel (e.g., by using ``kunit.py build``). Note that UML
+   has some unusual requirements (such as the host having a tmpfs filesystem
+   mounted), and has had issues in the past when built statically and the host
+   has KASLR enabled. (On older host kernels, you may need to run ``setarch
+   `uname -m` -R ./vmlinux`` to disable KASLR.)
+4. Make sure the kernel .config has ``CONFIG_KUNIT=y`` and at least one test
+   (e.g. ``CONFIG_KUNIT_EXAMPLE_TEST=y``). kunit_tool will keep its .config
+   around, so you can see what config was used after running ``kunit.py run``.
+   It also preserves any config changes you might make, so you can
+   enable/disable things with ``make ARCH=um menuconfig`` or similar, and then
+   re-run kunit_tool.
+5. Try to run ``make ARCH=um defconfig`` before running ``kunit.py run``. This
+   may help clean up any residual config items which could be causing problems.
+6. Finally, try running KUnit outside UML. KUnit and KUnit tests can run be
+   built into any kernel, or can be built as a module and loaded at runtime.
+   Doing so should allow you to determine if UML is causing the issue you're
+   seeing. When tests are built-in, they will execute when the kernel boots, and
+   modules will automatically execute associated tests when loaded. Test results
+   can be collected from ``/sys/kernel/debug/kunit/<test suite>/results``, and
+   can be parsed with ``kunit.py parse``. For more details, see "KUnit on
+   non-UML architectures" in :doc:`usage`.
+
+If none of the above tricks help, you are always welcome to email any issues to
+kunit-dev@googlegroups.com.
index a638989..91c4d00 100644 (file)
@@ -2,7 +2,6 @@
 DT_DOC_CHECKER ?= dt-doc-validate
 DT_EXTRACT_EX ?= dt-extract-example
 DT_MK_SCHEMA ?= dt-mk-schema
-DT_MK_SCHEMA_USERONLY_FLAG := $(if $(DT_SCHEMA_FILES), -u)
 
 DT_SCHEMA_MIN_VERSION = 2020.5
 
@@ -35,21 +34,40 @@ quiet_cmd_mk_schema = SCHEMA  $@
 
 DT_DOCS = $(shell $(find_cmd) | sed -e 's|^$(srctree)/||')
 
-DT_SCHEMA_FILES ?= $(DT_DOCS)
-
-extra-$(CHECK_DT_BINDING) += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES))
-extra-$(CHECK_DT_BINDING) += $(patsubst $(src)/%.yaml,%.example.dt.yaml, $(DT_SCHEMA_FILES))
-extra-$(CHECK_DT_BINDING) += processed-schema-examples.yaml
-
 override DTC_FLAGS := \
        -Wno-avoid_unnecessary_addr_size \
-       -Wno-graph_child_address
+       -Wno-graph_child_address \
+       -Wno-interrupt_provider
 
 $(obj)/processed-schema-examples.yaml: $(DT_DOCS) check_dtschema_version FORCE
        $(call if_changed,mk_schema)
 
-$(obj)/processed-schema.yaml: DT_MK_SCHEMA_FLAGS := $(DT_MK_SCHEMA_USERONLY_FLAG)
+ifeq ($(DT_SCHEMA_FILES),)
+
+# Unless DT_SCHEMA_FILES is specified, use the full schema for dtbs_check too.
+# Just copy processed-schema-examples.yaml
+
+$(obj)/processed-schema.yaml: $(obj)/processed-schema-examples.yaml FORCE
+       $(call if_changed,copy)
+
+DT_SCHEMA_FILES = $(DT_DOCS)
+
+else
+
+# If DT_SCHEMA_FILES is specified, use it for processed-schema.yaml
+
+$(obj)/processed-schema.yaml: DT_MK_SCHEMA_FLAGS := -u
 $(obj)/processed-schema.yaml: $(DT_SCHEMA_FILES) check_dtschema_version FORCE
        $(call if_changed,mk_schema)
 
-extra-y += processed-schema.yaml
+endif
+
+extra-$(CHECK_DT_BINDING) += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES))
+extra-$(CHECK_DT_BINDING) += $(patsubst $(src)/%.yaml,%.example.dt.yaml, $(DT_SCHEMA_FILES))
+extra-$(CHECK_DT_BINDING) += processed-schema-examples.yaml
+extra-$(CHECK_DTBS) += processed-schema.yaml
+
+# Hack: avoid 'Argument list too long' error for 'make clean'. Remove most of
+# build artifacts here before they are processed by scripts/Makefile.clean
+clean-files = $(shell find $(obj) \( -name '*.example.dts' -o \
+                       -name '*.example.dt.yaml' \) -delete 2>/dev/null)
index 7150474..10b8459 100644 (file)
@@ -47,7 +47,7 @@ Required properties:
                          &lsio_mu1 1 2
                          &lsio_mu1 1 3
                          &lsio_mu1 3 3>;
-               See Documentation/devicetree/bindings/mailbox/fsl,mu.txt
+               See Documentation/devicetree/bindings/mailbox/fsl,mu.yaml
                for detailed mailbox binding.
 
 Note: Each mu which supports general interrupt should have an alias correctly
index c4c9119..a0c6c5d 100644 (file)
@@ -80,14 +80,14 @@ examples:
         ranges = <1 0x00000000 0x42000000 0x02000000>,
                  <5 0x00000000 0x46000000 0x01000000>;
 
-        ethernet@1,01f00000 {
+        ethernet@1,1f00000 {
             compatible = "smsc,lan9115";
             reg = <1 0x01f00000 0x1000>;
             interrupts = <0 48 4>;
             phy-mode = "mii";
         };
 
-        uart@5,00200000 {
+        serial@5,200000 {
             compatible = "ns16550a";
             reg = <5 0x00200000 0x20>;
             interrupts = <0 49 4>;
index b5f3ed0..a753654 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Clock bindings for Freescale i.MX27
 
 maintainers:
-  - Fabio Estevam <fabio.estevam@freescale.com>
+  - Fabio Estevam <fabio.estevam@nxp.com>
 
 description: |
   The clock consumer should specify the desired clock by having the clock
index 1b6f75d..a25a374 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Clock bindings for Freescale i.MX31
 
 maintainers:
-  - Fabio Estevam <fabio.estevam@freescale.com>
+  - Fabio Estevam <fabio.estevam@nxp.com>
 
 description: |
   The clock consumer should specify the desired clock by having the clock
index f5c2b3d..4d9e7c7 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Clock bindings for Freescale i.MX5
 
 maintainers:
-  - Fabio Estevam <fabio.estevam@freescale.com>
+  - Fabio Estevam <fabio.estevam@nxp.com>
 
 description: |
   The clock consumer should specify the desired clock by having the clock
index 6e14e08..0d1db3f 100644 (file)
@@ -37,7 +37,7 @@ Optional properties:
        simple-card or audio-graph-card binding. See their binding
        documents on how to describe the way the sii902x device is
        connected to the rest of the audio system:
-       Documentation/devicetree/bindings/sound/simple-card.txt
+       Documentation/devicetree/bindings/sound/simple-card.yaml
        Documentation/devicetree/bindings/sound/audio-graph-card.txt
        Note: In case of the audio-graph-card binding the used port
        index should be 3.
index 5bf77f6..5a99490 100644 (file)
@@ -68,7 +68,7 @@ Required properties:
   datasheet
 - clocks : phandle to the PRE axi clock input, as described
   in Documentation/devicetree/bindings/clock/clock-bindings.txt and
-  Documentation/devicetree/bindings/clock/imx6q-clock.txt.
+  Documentation/devicetree/bindings/clock/imx6q-clock.yaml.
 - clock-names: should be "axi"
 - interrupts: should contain the PRE interrupt
 - fsl,iram: phandle pointing to the mmio-sram device node, that should be
@@ -94,7 +94,7 @@ Required properties:
   datasheet
 - clocks : phandles to the PRG ipg and axi clock inputs, as described
   in Documentation/devicetree/bindings/clock/clock-bindings.txt and
-  Documentation/devicetree/bindings/clock/imx6q-clock.txt.
+  Documentation/devicetree/bindings/clock/imx6q-clock.yaml.
 - clock-names: should be "ipg" and "axi"
 - fsl,pres: phandles to the PRE units attached to this PRG, with the fixed
   PRE as the first entry and the muxable PREs following.
index 38c637f..8e6e7d7 100644 (file)
@@ -30,8 +30,8 @@ Required properties:
                 "di2_sel" - IPU2 DI0 mux
                 "di3_sel" - IPU2 DI1 mux
         The needed clock numbers for each are documented in
-        Documentation/devicetree/bindings/clock/imx5-clock.txt, and in
-        Documentation/devicetree/bindings/clock/imx6q-clock.txt.
+        Documentation/devicetree/bindings/clock/imx5-clock.yaml, and in
+        Documentation/devicetree/bindings/clock/imx6q-clock.yaml.
 
 Optional properties:
  - pinctrl-names : should be "default" on i.MX53, not used on i.MX6q
index af95586..7884fd7 100644 (file)
@@ -87,6 +87,7 @@ Required properties:
   * "qcom,dsi-phy-20nm"
   * "qcom,dsi-phy-28nm-8960"
   * "qcom,dsi-phy-14nm"
+  * "qcom,dsi-phy-14nm-660"
   * "qcom,dsi-phy-10nm"
   * "qcom,dsi-phy-10nm-8998"
 - reg: Physical base address and length of the registers of PLL, PHY. Some
index fd779cd..1af0ff1 100644 (file)
@@ -112,6 +112,34 @@ Example a6xx (with GMU):
                interconnects = <&rsc_hlos MASTER_GFX3D &rsc_hlos SLAVE_EBI1>;
                interconnect-names = "gfx-mem";
 
+               gpu_opp_table: opp-table {
+                       compatible = "operating-points-v2";
+
+                       opp-430000000 {
+                               opp-hz = /bits/ 64 <430000000>;
+                               opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+                               opp-peak-kBps = <5412000>;
+                       };
+
+                       opp-355000000 {
+                               opp-hz = /bits/ 64 <355000000>;
+                               opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+                               opp-peak-kBps = <3072000>;
+                       };
+
+                       opp-267000000 {
+                               opp-hz = /bits/ 64 <267000000>;
+                               opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+                               opp-peak-kBps = <3072000>;
+                       };
+
+                       opp-180000000 {
+                               opp-hz = /bits/ 64 <180000000>;
+                               opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+                               opp-peak-kBps = <1804000>;
+                       };
+               };
+
                qcom,gmu = <&gmu>;
 
                zap-shader {
index 41fd571..be69e0c 100644 (file)
@@ -33,7 +33,7 @@ additionalProperties: false
 
 examples:
   - |
-    sysreg {
+    sysreg@0 {
         compatible = "arm,versatile-sysreg", "syscon", "simple-mfd";
         reg = <0x00000 0x1000>;
 
index ec8ae74..7204da5 100644 (file)
@@ -24,7 +24,7 @@ properties:
     description: |
       Should contain a list of phandles pointing to display interface port
       of vop devices. vop definitions as defined in
-      Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
+      Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml
 
 required:
   - compatible
index ba45558..e1c49b6 100644 (file)
@@ -12,7 +12,7 @@ Required properties for the top level node:
    Only the GPIO_ACTIVE_HIGH and GPIO_ACTIVE_LOW flags are supported.
 - #interrupt-cells : Specifies the number of cells needed to encode an
    interrupt. Should be 2. The first cell defines the interrupt number,
-   the second encodes the triger flags encoded as described in
+   the second encodes the trigger flags encoded as described in
    Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
 - compatible:
   - "mediatek,mt7621-gpio" for Mediatek controllers
index e134053..e6bbcae 100644 (file)
@@ -10,7 +10,7 @@ Interrupt number definition:
  16-31  : private  irq, and we use 16 as the co-processor timer.
  31-1024: common irq for soc ip.
 
-Interrupt triger mode: (Defined in dt-bindings/interrupt-controller/irq.h)
+Interrupt trigger mode: (Defined in dt-bindings/interrupt-controller/irq.h)
  IRQ_TYPE_LEVEL_HIGH (default)
  IRQ_TYPE_LEVEL_LOW
  IRQ_TYPE_EDGE_RISING
index 4438432..ad76edc 100644 (file)
@@ -87,7 +87,7 @@ Example:
                ranges;
 
                /* APU<->RPU0 IPI mailbox controller */
-               ipi_mailbox_rpu0: mailbox@ff90400 {
+               ipi_mailbox_rpu0: mailbox@ff990400 {
                        reg = <0xff990400 0x20>,
                              <0xff990420 0x20>,
                              <0xff990080 0x20>,
index 8c4d649..2d7cdf1 100644 (file)
@@ -8,7 +8,7 @@ The embedded controller requires the SPI controller driver to signal readiness
 to receive a transfer (that is, when TX FIFO contains the response data) by
 strobing the ACK pin with the ready signal. See the "ready-gpios" property of the
 SSP binding as documented in:
-<Documentation/devicetree/bindings/spi/spi-pxa2xx.txt>.
+<Documentation/devicetree/bindings/spi/marvell,mmp2-ssp.yaml>.
 
 Example:
        &ssp3 {
index 219bcbd..9ef5bac 100644 (file)
@@ -3,7 +3,7 @@ MediaTek SoC built-in Bluetooth Devices
 
 This device is a serial attached device to BTIF device and thus it must be a
 child node of the serial node with BTIF. The dt-bindings details for BTIF
-device can be known via Documentation/devicetree/bindings/serial/8250.txt.
+device can be known via Documentation/devicetree/bindings/serial/8250.yaml.
 
 Required properties:
 
index b686131..1b8e8b4 100644 (file)
@@ -114,7 +114,7 @@ with values derived from the SoC user manual.
    [flags]>
 
 On other mach-shmobile platforms GPIO is handled by the gpio-rcar driver.
-Please refer to Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
+Please refer to Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml
 for documentation of the GPIO device tree bindings on those platforms.
 
 
index 2696826..d5f6919 100644 (file)
@@ -5,7 +5,7 @@ It is based on common bindings for device graphs.
 see ${LINUX}/Documentation/devicetree/bindings/graph.txt
 
 Basically, Audio Graph Card property is same as Simple Card.
-see ${LINUX}/Documentation/devicetree/bindings/sound/simple-card.txt
+see ${LINUX}/Documentation/devicetree/bindings/sound/simple-card.yaml
 
 Below are same as Simple-Card.
 
index 4d51f3f..a6ffcde 100644 (file)
@@ -5,7 +5,7 @@ codec or external codecs.
 
 sti sound drivers allows to expose sti SoC audio interface through the
 generic ASoC simple card. For details about sound card declaration please refer to
-Documentation/devicetree/bindings/sound/simple-card.txt.
+Documentation/devicetree/bindings/sound/simple-card.yaml.
 
 1) sti-uniperiph-dai: audio dai device.
 ---------------------------------------
index 9147df2..38efb50 100644 (file)
@@ -34,12 +34,15 @@ properties:
     maxItems: 1
 
   clocks:
-    maxItems: 1
+    minItems: 1
+    maxItems: 2
+    items:
+      - description: controller register bus clock
+      - description: baud rate generator and delay control clock
 
   clock-names:
-    description: input clock for the baud rate generator
-    items:
-      - const: core
+    minItems: 1
+    maxItems: 2
 
 if:
   properties:
@@ -51,17 +54,22 @@ if:
 then:
   properties:
     clocks:
-      contains:
-        items:
-          - description: controller register bus clock
-          - description: baud rate generator and delay control clock
+      minItems: 2
 
     clock-names:
-      minItems: 2
       items:
         - const: core
         - const: pclk
 
+else:
+  properties:
+    clocks:
+      maxItems: 1
+
+    clock-names:
+      items:
+        - const: core
+
 required:
   - compatible
   - reg
index 790311a..c8c1e91 100644 (file)
@@ -19,7 +19,7 @@ Required properties:
 
 SPI Controller nodes must be child of GENI based Qualcomm Universal
 Peripharal. Please refer GENI based QUP wrapper controller node bindings
-described in Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt.
+described in Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.yaml.
 
 SPI slave nodes must be children of the SPI master node and conform to SPI bus
 binding as described in Documentation/devicetree/bindings/spi/spi-bus.txt.
index fcd25a0..727d045 100644 (file)
@@ -41,7 +41,7 @@ examples:
     #include <dt-bindings/interrupt-controller/arm-gic.h>
 
     // Example 1: SDM845 TSENS
-    soc: soc@0 {
+    soc: soc {
             #address-cells = <2>;
             #size-cells = <2>;
 
index b8515d3..3ec9cc8 100644 (file)
@@ -224,7 +224,7 @@ examples:
     #include <dt-bindings/thermal/thermal.h>
 
     // Example 1: SDM845 TSENS
-    soc: soc@0 {
+    soc {
             #address-cells = <2>;
             #size-cells = <2>;
 
index 25b9209..ea14de8 100644 (file)
@@ -35,7 +35,7 @@ examples:
     #include <dt-bindings/soc/ti,sci_pm_domain.h>
     vtm: thermal@42050000 {
         compatible = "ti,am654-vtm";
-        reg = <0x0 0x42050000 0x0 0x25c>;
+        reg = <0x42050000 0x25c>;
         power-domains = <&k3_pds 80 TI_SCI_PD_EXCLUSIVE>;
         #thermal-sensor-cells = <1>;
     };
index 15cfec0..f5c7e99 100644 (file)
@@ -8,7 +8,7 @@ regs is accessed by cpu co-processor 4 registers with mtcr/mfcr.
  - PTIM_CTLR "cr<0, 14>" Control reg to start reset timer.
  - PTIM_TSR  "cr<1, 14>" Interrupt cleanup status reg.
  - PTIM_CCVR "cr<3, 14>" Current counter value reg.
- - PTIM_LVR  "cr<6, 14>" Window value reg to triger next event.
+ - PTIM_LVR  "cr<6, 14>" Window value reg to trigger next event.
 
 ==============================
 timer node bindings definition
index e4e83d3..8b019ac 100644 (file)
@@ -127,8 +127,8 @@ examples:
                 #address-cells = <1>;
                 #size-cells = <0>;
 
-                string@0409 {
-                        reg = <0x0409>;
+                string@409 {
+                        reg = <0x409>;
                         manufacturer = "ASPEED";
                         product = "USB Virtual Hub";
                         serial-number = "0000";
index 220cf46..8c74a99 100644 (file)
@@ -1,4 +1,4 @@
-:orphan:
+.. SPDX-License-Identifier: GPL-2.0
 
 Writing DeviceTree Bindings in json-schema
 ==========================================
@@ -124,9 +124,12 @@ dtc must also be built with YAML output support enabled. This requires that
 libyaml and its headers be installed on the host system. For some distributions
 that involves installing the development package, such as:
 
-Debian:
+Debian::
+
   apt-get install libyaml-dev
-Fedora:
+
+Fedora::
+
   dnf -y install libyaml-devel
 
 Running checks
index 660dbaf..fcda5d6 100644 (file)
@@ -560,8 +560,8 @@ When the NFS export feature is enabled, all directory index entries are
 verified on mount time to check that upper file handles are not stale.
 This verification may cause significant overhead in some cases.
 
-Note: the mount options index=off,nfs_export=on are conflicting and will
-result in an error.
+Note: the mount options index=off,nfs_export=on are conflicting for a
+read-write mount and will result in an error.
 
 
 Testsuite
index 0b8cd83..38d951f 100644 (file)
@@ -1,14 +1,26 @@
 ==============================
-Linux I2C slave eeprom backend
+Linux I2C slave EEPROM backend
 ==============================
 
-by Wolfram Sang <wsa@sang-engineering.com> in 2014-15
+by Wolfram Sang <wsa@sang-engineering.com> in 2014-20
 
-This is a proof-of-concept backend which acts like an EEPROM on the connected
-I2C bus. The memory contents can be modified from userspace via this file
-located in sysfs::
+This backend simulates an EEPROM on the connected I2C bus. Its memory contents
+can be accessed from userspace via this file located in sysfs::
 
        /sys/bus/i2c/devices/<device-directory>/slave-eeprom
 
+The following types are available: 24c02, 24c32, 24c64, and 24c512. Read-only
+variants are also supported. The name needed for instantiating has the form
+'slave-<type>[ro]'. Examples follow:
+
+24c02, read/write, address 0x64:
+  # echo slave-24c02 0x1064 > /sys/bus/i2c/devices/i2c-1/new_device
+
+24c512, read-only, address 0x42:
+  # echo slave-24c512ro 0x1042 > /sys/bus/i2c/devices/i2c-1/new_device
+
+You can also preload data during boot if a device-property named
+'firmware-name' contains a valid filename (DT or ACPI only).
+
 As of 2015, Linux doesn't support poll on binary sysfs files, so there is no
 notification when another master changed the content.
index a45cccf..85ccc87 100644 (file)
@@ -182,7 +182,8 @@ module 8123.ko, which is built from the following files::
        8123_pci.c
        8123_bin.o_shipped      <= Binary blob
 
---- 3.1 Shared Makefile
+3.1 Shared Makefile
+-------------------
 
        An external module always includes a wrapper makefile that
        supports building the module using "make" with no arguments.
@@ -470,9 +471,9 @@ build.
 
        The syntax of the Module.symvers file is::
 
-       <CRC>       <Symbol>         <Module>                         <Export Type>     <Namespace>
+               <CRC>       <Symbol>         <Module>                         <Export Type>     <Namespace>
 
-       0xe1cc2a05  usb_stor_suspend drivers/usb/storage/usb-storage  EXPORT_SYMBOL_GPL USB_STORAGE
+               0xe1cc2a05  usb_stor_suspend drivers/usb/storage/usb-storage  EXPORT_SYMBOL_GPL USB_STORAGE
 
        The fields are separated by tabs and values may be empty (e.g.
        if no namespace is defined for an exported symbol).
index 5033938..3b25655 100644 (file)
@@ -101,7 +101,7 @@ Structure randomisation
 
 If you enable ``CONFIG_GCC_PLUGIN_RANDSTRUCT``, you will need to
 pre-generate the random seed in
-``scripts/gcc-plgins/randomize_layout_seed.h`` so the same value
+``scripts/gcc-plugins/randomize_layout_seed.h`` so the same value
 is used in rebuilds.
 
 Debug info conflicts
index c5a646b..2b75760 100644 (file)
@@ -68,4 +68,4 @@ and frameworks can be controlled from the same registers, all of these
 drivers access their registers through the same regmap.
 
 For more information regarding the devicetree bindings of the TCU drivers,
-have a look at Documentation/devicetree/bindings/timer/ingenic,tcu.txt.
+have a look at Documentation/devicetree/bindings/timer/ingenic,tcu.yaml.
index e93d982..82fce60 100644 (file)
@@ -434,7 +434,7 @@ can set up your network then:
        ifconfig arc0 insight
        route add insight arc0
        route add freedom arc0  /* I would use the subnet here (like I said
-                                       to to in "single protocol" above),
+                                       to in "single protocol" above),
                                        but the rest of the subnet
                                        unfortunately lies across the PPP
                                        link on freedom, which confuses
index 824afd7..f060cfb 100644 (file)
@@ -6,7 +6,7 @@ AX.25
 
 To use the amateur radio protocols within Linux you will need to get a
 suitable copy of the AX.25 Utilities. More detailed information about
-AX.25, NET/ROM and ROSE, associated programs and and utilities can be
+AX.25, NET/ROM and ROSE, associated programs and utilities can be
 found on http://www.linux-ax25.org.
 
 There is an active mailing list for discussing Linux amateur radio matters
index 4cef88d..638ac1e 100644 (file)
@@ -144,7 +144,7 @@ UCAN_COMMAND_SET_BITTIMING
 
 *Host2Dev; mandatory*
 
-Setup bittiming by sending the the structure
+Setup bittiming by sending the structure
 ``ucan_ctl_payload_t.cmd_set_bittiming`` (see ``struct bittiming`` for
 details)
 
@@ -232,7 +232,7 @@ UCAN_IN_TX_COMPLETE
   zero
 
 The CAN device has sent a message to the CAN bus. It answers with a
-list of of tuples <echo-ids, flags>.
+list of tuples <echo-ids, flags>.
 
 The echo-id identifies the frame from (echos the id from a previous
 UCAN_OUT_TX message). The flag indicates the result of the
index 563d56c..a8d15dd 100644 (file)
@@ -95,7 +95,7 @@ Ethernet switch.
 Networking stack hooks
 ----------------------
 
-When a master netdev is used with DSA, a small hook is placed in in the
+When a master netdev is used with DSA, a small hook is placed in the
 networking stack is in order to have the DSA subsystem process the Ethernet
 switch specific tagging protocol. DSA accomplishes this by registering a
 specific (and fake) Ethernet type (later becoming ``skb->protocol``) with the
index 36ca823..6f4bf84 100644 (file)
@@ -30,8 +30,8 @@ Socket API
 
 The address family, socket addresses etc. are defined in the
 include/net/af_ieee802154.h header or in the special header
-in the userspace package (see either http://wpan.cakelab.org/ or the
-git tree at https://github.com/linux-wpan/wpan-tools).
+in the userspace package (see either https://linux-wpan.org/wpan-tools.html
+or the git tree at https://github.com/linux-wpan/wpan-tools).
 
 6LoWPAN Linux implementation
 ============================
index b72f89d..837d51f 100644 (file)
@@ -741,7 +741,7 @@ tcp_fastopen - INTEGER
 
        Default: 0x1
 
-       Note that that additional client or server features are only
+       Note that additional client or server features are only
        effective if the basic support (0x1 and 0x2) are enabled respectively.
 
 tcp_fastopen_blackhole_timeout_sec - INTEGER
index be36c46..2afccc6 100644 (file)
@@ -114,7 +114,7 @@ drop_entry - INTEGER
        modes (when there is no enough available memory, the strategy
        is enabled and the variable is automatically set to 2,
        otherwise the strategy is disabled and the variable is set to
-       1), and 3 means that that the strategy is always enabled.
+       1), and 3 means that the strategy is always enabled.
 
 drop_packet - INTEGER
        - 0  - disabled (default)
index 68552b9..39c2249 100644 (file)
@@ -186,7 +186,7 @@ About the AF_RXRPC driver:
      time [tunable] after the last connection using it discarded, in case a new
      connection is made that could use it.
 
- (#) A client-side connection is only shared between calls if they have have
+ (#) A client-side connection is only shared between calls if they have
      the same key struct describing their security (and assuming the calls
      would otherwise share the connection).  Non-secured calls would also be
      able to share connections with each other.
index 1217c2f..788dc83 100644 (file)
@@ -213,7 +213,7 @@ request buffers are not in memory. The operating system handles the fault by
 updating CSB with the following data:
 
        csb.flags = CSB_V;
-       csb.cc = CSB_CC_TRANSLATION;
+       csb.cc = CSB_CC_FAULT_ADDRESS;
        csb.ce = CSB_CE_TERMINATION;
        csb.address = fault_address;
 
index 5cfb54c..8f68e72 100644 (file)
@@ -29,7 +29,7 @@ you probably needn't concern yourself with pcmciautils.
 ====================== ===============  ========================================
         Program        Minimal version       Command to check the version
 ====================== ===============  ========================================
-GNU C                  4.8              gcc --version
+GNU C                  4.9              gcc --version
 GNU make               3.81             make --version
 binutils               2.23             ld -v
 flex                   2.5.35           flex --version
index 2657a55..1bee6f8 100644 (file)
@@ -319,6 +319,26 @@ If you are afraid to mix up your local variable names, you have another
 problem, which is called the function-growth-hormone-imbalance syndrome.
 See chapter 6 (Functions).
 
+For symbol names and documentation, avoid introducing new usage of
+'master / slave' (or 'slave' independent of 'master') and 'blacklist /
+whitelist'.
+
+Recommended replacements for 'master / slave' are:
+    '{primary,main} / {secondary,replica,subordinate}'
+    '{initiator,requester} / {target,responder}'
+    '{controller,host} / {device,worker,proxy}'
+    'leader / follower'
+    'director / performer'
+
+Recommended replacements for 'blacklist/whitelist' are:
+    'denylist / allowlist'
+    'blocklist / passlist'
+
+Exceptions for introducing new usage is to maintain a userspace ABI/API,
+or when updating code for an existing (as of 2020) hardware or protocol
+specification that mandates those terms. For new specifications
+translate specification usage of the terminology to the kernel coding
+standard where possible.
 
 5) Typedefs
 -----------
index 426f945..320788f 100644 (file)
@@ -4339,14 +4339,15 @@ Errors:
 #define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001
 
   struct kvm_vmx_nested_state_hdr {
-       __u32 flags;
        __u64 vmxon_pa;
        __u64 vmcs12_pa;
-       __u64 preemption_timer_deadline;
 
        struct {
                __u16 flags;
        } smm;
+
+       __u32 flags;
+       __u64 preemption_timer_deadline;
   };
 
   struct kvm_vmx_nested_state_data {
index c59b4fa..2c669c0 100644 (file)
@@ -2924,6 +2924,7 @@ F:        include/uapi/linux/atm*
 
 ATMEL MACB ETHERNET DRIVER
 M:     Nicolas Ferre <nicolas.ferre@microchip.com>
+M:     Claudiu Beznea <claudiu.beznea@microchip.com>
 S:     Supported
 F:     drivers/net/ethernet/cadence/
 
@@ -3301,7 +3302,7 @@ X:        arch/riscv/net/bpf_jit_comp32.c
 
 BPF JIT for S390
 M:     Ilya Leoshkevich <iii@linux.ibm.com>
-M:     Heiko Carstens <heiko.carstens@de.ibm.com>
+M:     Heiko Carstens <hca@linux.ibm.com>
 M:     Vasily Gorbik <gor@linux.ibm.com>
 L:     netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
@@ -3941,7 +3942,7 @@ L:        linux-crypto@vger.kernel.org
 S:     Supported
 F:     drivers/char/hw_random/cctrng.c
 F:     drivers/char/hw_random/cctrng.h
-F:     Documentation/devicetree/bindings/rng/arm-cctrng.txt
+F:     Documentation/devicetree/bindings/rng/arm-cctrng.yaml
 W:     https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocell-700-family
 
 CEC FRAMEWORK
@@ -5016,7 +5017,6 @@ F:        drivers/mfd/da91??-*.c
 F:     drivers/pinctrl/pinctrl-da90??.c
 F:     drivers/power/supply/da9052-battery.c
 F:     drivers/power/supply/da91??-*.c
-F:     drivers/regulator/da903x.c
 F:     drivers/regulator/da9???-regulator.[ch]
 F:     drivers/regulator/slg51000-regulator.[ch]
 F:     drivers/rtc/rtc-da90??.c
@@ -5107,7 +5107,7 @@ M:        Vinod Koul <vkoul@kernel.org>
 L:     dmaengine@vger.kernel.org
 S:     Maintained
 Q:     https://patchwork.kernel.org/project/linux-dmaengine/list/
-T:     git git://git.infradead.org/users/vkoul/slave-dma.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine.git
 F:     Documentation/devicetree/bindings/dma/
 F:     Documentation/driver-api/dmaengine/
 F:     drivers/dma/
@@ -5486,7 +5486,7 @@ F:        include/uapi/drm/r128_drm.h
 DRM DRIVER FOR RAYDIUM RM67191 PANELS
 M:     Robert Chiras <robert.chiras@nxp.com>
 S:     Maintained
-F:     Documentation/devicetree/bindings/display/panel/raydium,rm67191.txt
+F:     Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml
 F:     drivers/gpu/drm/panel/panel-raydium-rm67191.c
 
 DRM DRIVER FOR ROCKTECH JH057N00900 PANELS
@@ -8338,7 +8338,7 @@ M:        Alexander Aring <alex.aring@gmail.com>
 M:     Stefan Schmidt <stefan@datenfreihafen.org>
 L:     linux-wpan@vger.kernel.org
 S:     Maintained
-W:     http://wpan.cakelab.org/
+W:     https://linux-wpan.org/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan-next.git
 F:     Documentation/networking/ieee802154.rst
@@ -10813,7 +10813,7 @@ F:      Documentation/devicetree/bindings/dma/mtk-*
 F:     drivers/dma/mediatek/
 
 MEDIATEK ETHERNET DRIVER
-M:     Felix Fietkau <nbd@openwrt.org>
+M:     Felix Fietkau <nbd@nbd.name>
 M:     John Crispin <john@phrozen.org>
 M:     Sean Wang <sean.wang@mediatek.com>
 M:     Mark Lee <Mark-MC.Lee@mediatek.com>
@@ -12700,13 +12700,13 @@ F:    arch/mips/boot/dts/ralink/omega2p.dts
 
 OP-TEE DRIVER
 M:     Jens Wiklander <jens.wiklander@linaro.org>
-L:     tee-dev@lists.linaro.org
+L:     op-tee@lists.trustedfirmware.org
 S:     Maintained
 F:     drivers/tee/optee/
 
 OP-TEE RANDOM NUMBER GENERATOR (RNG) DRIVER
 M:     Sumit Garg <sumit.garg@linaro.org>
-L:     tee-dev@lists.linaro.org
+L:     op-tee@lists.trustedfirmware.org
 S:     Maintained
 F:     drivers/char/hw_random/optee-rng.c
 
@@ -14579,8 +14579,8 @@ RENESAS R-CAR THERMAL DRIVERS
 M:     Niklas Söderlund <niklas.soderlund@ragnatech.se>
 L:     linux-renesas-soc@vger.kernel.org
 S:     Supported
-F:     Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt
-F:     Documentation/devicetree/bindings/thermal/rcar-thermal.txt
+F:     Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
+F:     Documentation/devicetree/bindings/thermal/rcar-thermal.yaml
 F:     drivers/thermal/rcar_gen3_thermal.c
 F:     drivers/thermal/rcar_thermal.c
 
@@ -14836,7 +14836,7 @@ S:      Maintained
 F:     drivers/video/fbdev/savage/
 
 S390
-M:     Heiko Carstens <heiko.carstens@de.ibm.com>
+M:     Heiko Carstens <hca@linux.ibm.com>
 M:     Vasily Gorbik <gor@linux.ibm.com>
 M:     Christian Borntraeger <borntraeger@de.ibm.com>
 L:     linux-s390@vger.kernel.org
@@ -14867,7 +14867,7 @@ F:      drivers/s390/block/dasd*
 F:     include/linux/dasd_mod.h
 
 S390 IOMMU (PCI)
-M:     Gerald Schaefer <gerald.schaefer@de.ibm.com>
+M:     Gerald Schaefer <gerald.schaefer@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
 W:     http://www.ibm.com/developerworks/linux/linux390/
@@ -14895,7 +14895,7 @@ F:      drivers/s390/net/
 
 S390 PCI SUBSYSTEM
 M:     Niklas Schnelle <schnelle@linux.ibm.com>
-M:     Gerald Schaefer <gerald.schaefer@de.ibm.com>
+M:     Gerald Schaefer <gerald.schaefer@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
 W:     http://www.ibm.com/developerworks/linux/linux390/
@@ -16063,8 +16063,10 @@ SPARSE CHECKER
 M:     "Luc Van Oostenryck" <luc.vanoostenryck@gmail.com>
 L:     linux-sparse@vger.kernel.org
 S:     Maintained
-W:     https://sparse.wiki.kernel.org/
+W:     https://sparse.docs.kernel.org/
 T:     git git://git.kernel.org/pub/scm/devel/sparse/sparse.git
+Q:     https://patchwork.kernel.org/project/linux-sparse/list/
+B:     https://bugzilla.kernel.org/enter_bug.cgi?component=Sparse&product=Tools
 F:     include/linux/compiler.h
 
 SPEAR CLOCK FRAMEWORK SUPPORT
@@ -16777,7 +16779,7 @@ F:      include/media/i2c/tw9910.h
 
 TEE SUBSYSTEM
 M:     Jens Wiklander <jens.wiklander@linaro.org>
-L:     tee-dev@lists.linaro.org
+L:     op-tee@lists.trustedfirmware.org
 S:     Maintained
 F:     Documentation/tee.txt
 F:     drivers/tee/
@@ -17516,7 +17518,7 @@ F:      Documentation/admin-guide/ufs.rst
 F:     fs/ufs/
 
 UHID USERSPACE HID IO DRIVER
-M:     David Herrmann <dh.herrmann@googlemail.com>
+M:     David Rheinsberg <david.rheinsberg@gmail.com>
 L:     linux-input@vger.kernel.org
 S:     Maintained
 F:     drivers/hid/uhid.c
@@ -18475,7 +18477,7 @@ S:      Maintained
 F:     drivers/rtc/rtc-sd3078.c
 
 WIIMOTE HID DRIVER
-M:     David Herrmann <dh.herrmann@googlemail.com>
+M:     David Rheinsberg <david.rheinsberg@gmail.com>
 L:     linux-input@vger.kernel.org
 S:     Maintained
 F:     drivers/hid/hid-wiimote*
index ac2c61c..249a51d 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 8
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc6
 NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
@@ -970,8 +970,8 @@ LDFLAGS_vmlinux     += --pack-dyn-relocs=relr
 endif
 
 # Align the bit size of userspace programs with the kernel
-KBUILD_USERCFLAGS  += $(filter -m32 -m64, $(KBUILD_CFLAGS))
-KBUILD_USERLDFLAGS += $(filter -m32 -m64, $(KBUILD_CFLAGS))
+KBUILD_USERCFLAGS  += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
+KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
 
 # make the checker run with the right architecture
 CHECKFLAGS += --arch=$(ARCH)
index fddc700..197896c 100644 (file)
@@ -170,6 +170,15 @@ config ARC_CPU_HS
 
 endchoice
 
+config ARC_TUNE_MCPU
+       string "Override default -mcpu compiler flag"
+       default ""
+       help
+         Override default -mcpu=xxx compiler flag (which is set depending on
+         the ISA version) with the specified value.
+         NOTE: If specified flag isn't supported by current compiler the
+         ISA default value will be used as a fallback.
+
 config CPU_BIG_ENDIAN
        bool "Enable Big Endian Mode"
        help
@@ -465,6 +474,12 @@ config ARC_IRQ_NO_AUTOSAVE
          This is programmable and can be optionally disabled in which case
          software INTERRUPT_PROLOGUE/EPILGUE do the needed work
 
+config ARC_LPB_DISABLE
+       bool "Disable loop buffer (LPB)"
+       help
+         On HS cores, loop buffer (LPB) is programmable in runtime and can
+         be optionally disabled.
+
 endif # ISA_ARCV2
 
 endmenu   # "ARC CPU Configuration"
index 20e9ab6..d00f8b8 100644 (file)
@@ -10,8 +10,25 @@ CROSS_COMPILE := $(call cc-cross-prefix, arc-linux- arceb-linux-)
 endif
 
 cflags-y       += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
-cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
-cflags-$(CONFIG_ISA_ARCV2)     += -mcpu=hs38
+
+tune-mcpu-def-$(CONFIG_ISA_ARCOMPACT)  := -mcpu=arc700
+tune-mcpu-def-$(CONFIG_ISA_ARCV2)      := -mcpu=hs38
+
+ifeq ($(CONFIG_ARC_TUNE_MCPU),"")
+cflags-y                               += $(tune-mcpu-def-y)
+else
+tune-mcpu                              := $(shell echo $(CONFIG_ARC_TUNE_MCPU))
+tune-mcpu-ok                           := $(call cc-option-yn, $(tune-mcpu))
+ifeq ($(tune-mcpu-ok),y)
+cflags-y                               += $(tune-mcpu)
+else
+# The flag provided by 'CONFIG_ARC_TUNE_MCPU' option isn't known by this compiler
+# (probably the compiler is too old). Use ISA default mcpu flag instead as a safe option.
+$(warning ** WARNING ** CONFIG_ARC_TUNE_MCPU flag '$(tune-mcpu)' is unknown, fallback to '$(tune-mcpu-def-y)')
+cflags-y                               += $(tune-mcpu-def-y)
+endif
+endif
+
 
 ifdef CONFIG_ARC_CURR_IN_REG
 # For a global register defintion, make sure it gets passed to every file
index c77a0e3..0284ace 100644 (file)
@@ -19,7 +19,7 @@
 #define  R_ARC_32_PCREL                0x31
 
 /*to set parameters in the core dumps */
-#define ELF_ARCH               EM_ARCOMPACT
+#define ELF_ARCH               EM_ARC_INUSE
 #define ELF_CLASS              ELFCLASS32
 
 #ifdef CONFIG_CPU_BIG_ENDIAN
index 7fc73fe..863d63a 100644 (file)
@@ -90,6 +90,9 @@ static inline void arch_local_irq_restore(unsigned long flags)
 /*
  * Unconditionally Enable IRQs
  */
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+extern void arch_local_irq_enable(void);
+#else
 static inline void arch_local_irq_enable(void)
 {
        unsigned long temp;
@@ -102,7 +105,7 @@ static inline void arch_local_irq_enable(void)
        : "n"((STATUS_E1_MASK | STATUS_E2_MASK))
        : "cc", "memory");
 }
-
+#endif
 
 /*
  * Unconditionally Disable IRQs
index 60406ec..ea00c8a 100644 (file)
@@ -165,7 +165,6 @@ END(EV_Extension)
 tracesys:
        ; save EFA in case tracer wants the PC of traced task
        ; using ERET won't work since next-PC has already committed
-       lr  r12, [efa]
        GET_CURR_TASK_FIELD_PTR   TASK_THREAD, r11
        st  r12, [r11, THREAD_FAULT_ADDR]       ; thread.fault_address
 
@@ -208,15 +207,9 @@ tracesys_exit:
 ; Breakpoint TRAP
 ; ---------------------------------------------
 trap_with_param:
-
-       ; stop_pc info by gdb needs this info
-       lr  r0, [efa]
+       mov r0, r12     ; EFA in case ptracer/gdb wants stop_pc
        mov r1, sp
 
-       ; Now that we have read EFA, it is safe to do "fake" rtie
-       ;   and get out of CPU exception mode
-       FAKE_RET_FROM_EXCPN
-
        ; Save callee regs in case gdb wants to have a look
        ; SP will grow up by size of CALLEE Reg-File
        ; NOTE: clobbers r12
@@ -243,6 +236,10 @@ ENTRY(EV_Trap)
 
        EXCEPTION_PROLOGUE
 
+       lr  r12, [efa]
+
+       FAKE_RET_FROM_EXCPN
+
        ;============ TRAP 1   :breakpoints
        ; Check ECR for trap with arg (PROLOGUE ensures r10 has ECR)
        bmsk.f 0, r10, 7
@@ -250,9 +247,6 @@ ENTRY(EV_Trap)
 
        ;============ TRAP  (no param): syscall top level
 
-       ; First return from Exception to pure K mode (Exception/IRQs renabled)
-       FAKE_RET_FROM_EXCPN
-
        ; If syscall tracing ongoing, invoke pre-post-hooks
        GET_CURR_THR_INFO_FLAGS   r10
        btst r10, TIF_SYSCALL_TRACE
index 6eb23f1..17fd1ed 100644 (file)
        bclr    r5, r5, STATUS_AD_BIT
 #endif
        kflag   r5
+
+#ifdef CONFIG_ARC_LPB_DISABLE
+       lr      r5, [ARC_REG_LPB_BUILD]
+       breq    r5, 0, 1f               ; LPB doesn't exist
+       mov     r5, 1
+       sr      r5, [ARC_REG_LPB_CTRL]
+1:
+#endif /* CONFIG_ARC_LPB_DISABLE */
 #endif
        ; Config DSP_CTRL properly, so kernel may use integer multiply,
        ; multiply-accumulate, and divide operations
index dad8a65..41f07b3 100644 (file)
@@ -58,10 +58,12 @@ static const struct id_to_str arc_legacy_rel[] = {
        { 0x00,         NULL   }
 };
 
-static const struct id_to_str arc_cpu_rel[] = {
+static const struct id_to_str arc_hs_ver54_rel[] = {
        /* UARCH.MAJOR, Release */
        {  0,           "R3.10a"},
        {  1,           "R3.50a"},
+       {  2,           "R3.60a"},
+       {  3,           "R4.00a"},
        {  0xFF,        NULL   }
 };
 
@@ -117,12 +119,6 @@ static void decode_arc_core(struct cpuinfo_arc *cpu)
        struct bcr_uarch_build_arcv2 uarch;
        const struct id_to_str *tbl;
 
-       /*
-        * Up until (including) the first core4 release (0x54) things were
-        * simple: AUX IDENTITY.ARCVER was sufficient to identify arc family
-        * and release: 0x50 to 0x53 was HS38, 0x54 was HS48 (dual issue)
-        */
-
        if (cpu->core.family < 0x54) { /* includes arc700 */
 
                for (tbl = &arc_legacy_rel[0]; tbl->id != 0; tbl++) {
@@ -143,11 +139,10 @@ static void decode_arc_core(struct cpuinfo_arc *cpu)
        }
 
        /*
-        * However the subsequent HS release (same 0x54) allow HS38 or HS48
-        * configurations and encode this info in a different BCR.
-        * The BCR was introduced in 0x54 so can't be read unconditionally.
+        * Initial HS cores bumped AUX IDENTITY.ARCVER for each release until
+        * ARCVER 0x54 which introduced AUX MICRO_ARCH_BUILD and subsequent
+        * releases only update it.
         */
-
        READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
 
        if (uarch.prod == 4) {
@@ -158,7 +153,7 @@ static void decode_arc_core(struct cpuinfo_arc *cpu)
                cpu->name = "HS38";
        }
 
-       for (tbl = &arc_cpu_rel[0]; tbl->id != 0xFF; tbl++) {
+       for (tbl = &arc_hs_ver54_rel[0]; tbl->id != 0xFF; tbl++) {
                if (uarch.maj == tbl->id) {
                        cpu->release = tbl->str;
                        break;
index 05e7b5d..04f0b12 100644 (file)
 &mmc2 {
        status = "okay";
        vmmc-supply = <&wl12xx_vmmc>;
-       ti,non-removable;
+       non-removable;
        bus-width = <4>;
        cap-power-off-card;
        pinctrl-names = "default";
index 91f93bc..dd93222 100644 (file)
@@ -22,6 +22,7 @@
        pinctrl-0 = <&emmc_pins>;
        bus-width = <8>;
        status = "okay";
+       non-removable;
 };
 
 &am33xx_pinmux {
index 3124d94..e07dd79 100644 (file)
@@ -75,7 +75,6 @@
        bus-width = <4>;
        non-removable;
        cap-power-off-card;
-       ti,needs-special-hs-handling;
        keep-power-in-suspend;
        pinctrl-names = "default";
        pinctrl-0 = <&mmc3_pins &wl18xx_pins>;
index 5811fb8..83f9452 100644 (file)
        bus-width = <4>;
        non-removable;
        cap-power-off-card;
-       ti,needs-special-hs-handling;
        keep-power-in-suspend;
        pinctrl-names = "default";
        pinctrl-0 = <&mmc3_pins &wl18xx_pins>;
index 4092cd1..609c8db 100644 (file)
@@ -75,7 +75,6 @@
        bus-width = <4>;
        non-removable;
        cap-power-off-card;
-       ti,needs-special-hs-handling;
        keep-power-in-suspend;
        pinctrl-names = "default";
        pinctrl-0 = <&mmc3_pins &wl18xx_pins>;
index 68252da..a4fc6b1 100644 (file)
        bus-width = <4>;
        pinctrl-names = "default";
        pinctrl-0 = <&mmc3_pins &wlan_pins>;
-       ti,non-removable;
-       ti,needs-special-hs-handling;
+       non-removable;
        cap-power-off-card;
        keep-power-in-suspend;
 
index 32f515a..78b6e1f 100644 (file)
 &mmc2 {
        status = "okay";
        vmmc-supply = <&wl12xx_vmmc>;
-       ti,non-removable;
+       non-removable;
        bus-width = <4>;
        cap-power-off-card;
        keep-power-in-suspend;
index fef5828..dbedf72 100644 (file)
        pinctrl-0 = <&emmc_pins>;
        vmmc-supply = <&vmmcsd_fixed>;
        bus-width = <8>;
-       ti,non-removable;
+       non-removable;
        status = "okay";
 };
 
index 6495a12..4e90f9c 100644 (file)
        vmmc-supply = <&vmmcsd_fixed>;
        bus-width = <8>;
        pinctrl-0 = <&mmc1_pins_default>;
-       ti,non-removable;
+       non-removable;
        status = "okay";
 };
 
index 244df9c..f03e72c 100644 (file)
        vmmc-supply = <&vmmcsd_fixed>;
        bus-width = <8>;
        pinctrl-0 = <&mmc2_pins_default>;
-       ti,non-removable;
+       non-removable;
        status = "okay";
 };
 
index 6d7608d..f9a027b 100644 (file)
        pinctrl-0 = <&emmc_pins>;
        vmmc-supply = <&ldo3_reg>;
        bus-width = <8>;
-       ti,non-removable;
+       non-removable;
 };
 
 &mmc3 {
        pinctrl-0 = <&wireless_pins>;
        vmmmc-supply = <&v3v3c_reg>;
        bus-width = <4>;
-       ti,non-removable;
+       non-removable;
        dmas = <&edma_xbar 12 0 1
                &edma_xbar 13 0 2>;
        dma-names = "tx", "rx";
index 3d0672b..7e46b4c 100644 (file)
@@ -69,7 +69,7 @@
        pinctrl-0 = <&emmc_pins>;
        vmmc-supply = <&vmmc_reg>;
        bus-width = <8>;
-       ti,non-removable;
+       non-removable;
        status = "disabled";
 };
 
index 4da7190..f0b2222 100644 (file)
@@ -88,7 +88,6 @@
                        AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0)
                        AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0)
                        AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
-                       AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKR, PIN_INPUT, MUX_MODE4)           /* (B12) mcasp0_aclkr.mmc0_sdwp */
                >;
        };
 
index 7ff11d6..a9cbefc 100644 (file)
                        ranges = <0x0 0x60000 0x1000>;
 
                        mmc1: mmc@0 {
-                               compatible = "ti,omap4-hsmmc";
-                               ti,dual-volt;
+                               compatible = "ti,am335-sdhci";
                                ti,needs-special-reset;
-                               ti,needs-special-hs-handling;
                                dmas = <&edma_xbar 24 0 0
                                        &edma_xbar 25 0 0>;
                                dma-names = "tx", "rx";
                        ranges = <0x0 0xd8000 0x1000>;
 
                        mmc2: mmc@0 {
-                               compatible = "ti,omap4-hsmmc";
+                               compatible = "ti,am335-sdhci";
                                ti,needs-special-reset;
                                dmas = <&edma 2 0
                                        &edma 3 0>;
index 3b177c9..5fdce10 100644 (file)
                        ranges = <0x0 0x47810000 0x1000>;
 
                        mmc3: mmc@0 {
-                               compatible = "ti,omap4-hsmmc";
+                               compatible = "ti,am335-sdhci";
                                ti,needs-special-reset;
                                interrupts = <29>;
                                reg = <0x0 0x1000>;
+                               status = "disabled";
                        };
                };
 
                              <0x47400010 0x4>;
                        reg-names = "rev", "sysc";
                        ti,sysc-mask = <(SYSC_OMAP4_FREEEMU |
-                                        SYSC_OMAP2_SOFTRESET)>;
+                                        SYSC_OMAP4_SOFTRESET)>;
                        ti,sysc-midle = <SYSC_IDLE_FORCE>,
                                        <SYSC_IDLE_NO>,
                                        <SYSC_IDLE_SMART>;
                        clock-names = "fck";
                        #address-cells = <1>;
                        #size-cells = <1>;
-                       ranges = <0x0 0x47400000 0x5000>;
+                       ranges = <0x0 0x47400000 0x8000>;
 
                        usb0_phy: usb-phy@1300 {
                                compatible = "ti,am335x-usb-phy";
index b4861f7..51ad9e8 100644 (file)
                        ranges = <0x0 0x47810000 0x1000>;
 
                        mmc3: mmc@0 {
-                               compatible = "ti,omap4-hsmmc";
+                               compatible = "ti,am437-sdhci";
                                ti,needs-special-reset;
                                interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
                                reg = <0x0 0x1000>;
+                               status = "disabled";
                        };
                };
 
index 063113a..a6b4fca 100644 (file)
        pinctrl-0 = <&emmc_pins>;
        vmmc-supply = <&vmmc_3v3>;
        bus-width = <8>;
-       ti,non-removable;
+       non-removable;
 };
 
 &spi0 {
index d692e3b..7737863 100644 (file)
 
                backlight = <&lcd_bl>;
 
-               panel-timing {
-                       clock-frequency = <33000000>;
-                       hactive = <800>;
-                       vactive = <480>;
-                       hfront-porch = <210>;
-                       hback-porch = <16>;
-                       hsync-len = <30>;
-                       vback-porch = <10>;
-                       vfront-porch = <22>;
-                       vsync-len = <13>;
-                       hsync-active = <0>;
-                       vsync-active = <0>;
-                       de-active = <1>;
-                       pixelclk-active = <1>;
-               };
-
                port {
                        lcd_in: endpoint {
                                remote-endpoint = <&dpi_out>;
        pinctrl-names = "default", "sleep";
        pinctrl-0 = <&emmc_pins_default>;
        pinctrl-1 = <&emmc_pins_sleep>;
-       ti,non-removable;
+       non-removable;
 };
 
 &mmc3 {
        pinctrl-1 = <&mmc3_pins_sleep>;
        cap-power-off-card;
        keep-power-in-suspend;
-       ti,non-removable;
+       non-removable;
 
        #address-cells = <1>;
        #size-cells = <0>;
index 0d0f9fe..906ac29 100644 (file)
                        ranges = <0x0 0x60000 0x1000>;
 
                        mmc1: mmc@0 {
-                               compatible = "ti,omap4-hsmmc";
+                               compatible = "ti,am437-sdhci";
                                reg = <0x0 0x1000>;
-                               ti,dual-volt;
                                ti,needs-special-reset;
                                dmas = <&edma 24 0>,
                                        <&edma 25 0>;
                        reg = <0xcc020 0x4>;
                        reg-names = "rev";
                        /* Domains (P, C): per_pwrdm, l4ls_clkdm */
-                       clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>;
-                       clock-names = "fck";
+                       clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>,
+                       <&dcan0_fck>;
+                       clock-names = "fck", "osc";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x0 0xcc000 0x2000>;
                        dcan0: can@0 {
                                compatible = "ti,am4372-d_can", "ti,am3352-d_can";
                                reg = <0x0 0x2000>;
+                               clocks = <&dcan0_fck>;
+                               clock-names = "fck";
                                syscon-raminit = <&scm_conf 0x644 0>;
                                interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
                                status = "disabled";
                        reg = <0xd0020 0x4>;
                        reg-names = "rev";
                        /* Domains (P, C): per_pwrdm, l4ls_clkdm */
-                       clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>;
-                       clock-names = "fck";
+                       clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>,
+                       <&dcan1_fck>;
+                       clock-names = "fck", "osc";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x0 0xd0000 0x2000>;
                        dcan1: can@0 {
                                compatible = "ti,am4372-d_can", "ti,am3352-d_can";
                                reg = <0x0 0x2000>;
+                               clocks = <&dcan1_fck>;
+                               clock-name = "fck";
                                syscon-raminit = <&scm_conf 0x644 1>;
                                interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
                                status = "disabled";
                        ranges = <0x0 0xd8000 0x1000>;
 
                        mmc2: mmc@0 {
-                               compatible = "ti,omap4-hsmmc";
+                               compatible = "ti,am437-sdhci";
                                reg = <0x0 0x1000>;
                                ti,needs-special-reset;
                                dmas = <&edma 2 0>,
index 4d5a7ca..08eabf0 100644 (file)
 
                enable-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
 
-               panel-timing {
-                       clock-frequency = <9000000>;
-                       hactive = <480>;
-                       vactive = <272>;
-                       hfront-porch = <2>;
-                       hback-porch = <2>;
-                       hsync-len = <41>;
-                       vfront-porch = <2>;
-                       vback-porch = <2>;
-                       vsync-len = <10>;
-                       hsync-active = <0>;
-                       vsync-active = <0>;
-                       de-active = <1>;
-                       pixelclk-active = <1>;
-               };
-
                port {
                        lcd_in: endpoint {
                                remote-endpoint = <&dpi_out>;
        pinctrl-1 = <&mmc3_pins_sleep>;
        cap-power-off-card;
        keep-power-in-suspend;
-       ti,non-removable;
+       non-removable;
 
        #address-cells = <1>;
        #size-cells = <0>;
index 27259fd..7d4e0df 100644 (file)
 
                backlight = <&lcd_bl>;
 
-               panel-timing {
-                       clock-frequency = <33000000>;
-                       hactive = <800>;
-                       vactive = <480>;
-                       hfront-porch = <210>;
-                       hback-porch = <16>;
-                       hsync-len = <30>;
-                       vback-porch = <10>;
-                       vfront-porch = <22>;
-                       vsync-len = <13>;
-                       hsync-active = <0>;
-                       vsync-active = <0>;
-                       de-active = <1>;
-                       pixelclk-active = <1>;
-               };
-
                port {
                        lcd_in: endpoint {
                                remote-endpoint = <&dpi_out>;
index 9877d77..4c51c6b 100644 (file)
 
 &cpsw_emac0 {
        phy-handle = <&phy0>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-rxid";
 };
 
 &ocp {
index da6d70f..3175266 100644 (file)
                        status = "disabled";
                };
 
-               dma@20000 {
+               dma: dma@20000 {
                        compatible = "arm,pl330", "arm,primecell";
                        reg = <0x20000 0x1000>;
                        interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
                        clocks = <&iprocslow>;
                        clock-names = "apb_pclk";
                        #dma-cells = <1>;
+                       dma-coherent;
+                       status = "disabled";
                };
 
                sdio: sdhci@21000 {
                        status = "disabled";
                };
 
-               mailbox: mailbox@25000 {
+               mailbox: mailbox@25c00 {
                        compatible = "brcm,iproc-fa2-mbox";
-                       reg = <0x25000 0x445>;
-                       interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
+                       reg = <0x25c00 0x400>;
+                       interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
                        #mbox-cells = <1>;
                        brcm,rx-status-len = <32>;
                        brcm,use-bcm-hdr;
index 3343253..29bbecd 100644 (file)
@@ -17,6 +17,7 @@
        };
 
        memory {
+               device_type = "memory";
                reg = <0x00000000 0x08000000
                       0x88000000 0x18000000>;
        };
index 8c388eb..7be4c4e 100644 (file)
 
 /* USB 3 support needed to be complete */
 
+&dma {
+       status = "okay";
+};
+
 &amac0 {
        status = "okay";
 };
index c339771..e58ed7e 100644 (file)
 
 /* USB 3 support needed to be complete */
 
+&dma {
+       status = "okay";
+};
+
 &amac0 {
        status = "okay";
 };
index 1c72ec8..716da62 100644 (file)
 
 /* XHCI support needed to be complete */
 
+&dma {
+       status = "okay";
+};
+
 &amac0 {
        status = "okay";
 };
index 96a021c..a49c2fd 100644 (file)
 
 /* USB 3 and SLIC support needed to be complete */
 
+&dma {
+       status = "okay";
+};
+
 &amac0 {
        status = "okay";
 };
index b2c7f21..dd6dff6 100644 (file)
 
 /* USB 3 and SLIC support needed to be complete */
 
+&dma {
+       status = "okay";
+};
+
 &amac0 {
        status = "okay";
 };
index 536fb24..a71371b 100644 (file)
        status = "okay";
 };
 
+&dma {
+       status = "okay";
+};
+
 &amac0 {
        status = "okay";
 };
index 3fcca12..7b84b54 100644 (file)
        };
 };
 
+&dma {
+       status = "okay";
+};
+
 &amac0 {
        status = "okay";
 };
index f89a64c..2cf6a52 100644 (file)
        rx-num-evt = <32>;
 };
 
-&mailbox5 {
-       status = "okay";
-       mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
-               status = "okay";
-       };
-       mbox_dsp1_ipc3x: mbox_dsp1_ipc3x {
-               status = "okay";
-       };
-};
-
-&mailbox6 {
-       status = "okay";
-       mbox_ipu2_ipc3x: mbox_ipu2_ipc3x {
-               status = "okay";
-       };
-       mbox_dsp2_ipc3x: mbox_dsp2_ipc3x {
-               status = "okay";
-       };
-};
-
 &pcie1_rc {
        status = "okay";
 };
index 62ca895..0c6f266 100644 (file)
                                        <SYSC_IDLE_SMART>,
                                        <SYSC_IDLE_SMART_WKUP>;
                        /* Domains (P, C): l4per_pwrdm, l4per_clkdm */
-                       clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 0>,
-                                <&timer_sys_clk_div>;
-                       clock-names = "fck", "timer_sys_ck";
+                       clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 0>;
+                       clock-names = "fck";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x0 0x36000 0x1000>;
                                        <SYSC_IDLE_SMART>,
                                        <SYSC_IDLE_SMART_WKUP>;
                        /* Domains (P, C): ipu_pwrdm, ipu_clkdm */
-                       clocks = <&ipu_clkctrl DRA7_IPU_TIMER5_CLKCTRL 0>, <&timer_sys_clk_div>;
-                       clock-names = "fck", "timer_sys_ck";
+                       clocks = <&ipu_clkctrl DRA7_IPU_TIMER5_CLKCTRL 0>;
+                       clock-names = "fck";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x0 0x20000 0x1000>;
                        timer5: timer@0 {
                                compatible = "ti,omap5430-timer";
                                reg = <0x0 0x80>;
-                               clocks = <&ipu_clkctrl DRA7_IPU_TIMER5_CLKCTRL 24>;
-                               clock-names = "fck";
+                               clocks = <&ipu_clkctrl DRA7_IPU_TIMER5_CLKCTRL 24>, <&timer_sys_clk_div>;
+                               clock-names = "fck", "timer_sys_ck";
                                interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
                        };
                };
                                        <SYSC_IDLE_SMART>,
                                        <SYSC_IDLE_SMART_WKUP>;
                        /* Domains (P, C): ipu_pwrdm, ipu_clkdm */
-                       clocks = <&ipu_clkctrl DRA7_IPU_TIMER6_CLKCTRL 0>,
-                                <&timer_sys_clk_div>;
-                       clock-names = "fck", "timer_sys_ck";
+                       clocks = <&ipu_clkctrl DRA7_IPU_TIMER6_CLKCTRL 0>;
+                       clock-names = "fck";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x0 0x22000 0x1000>;
                        timer6: timer@0 {
                                compatible = "ti,omap5430-timer";
                                reg = <0x0 0x80>;
-                               clocks = <&ipu_clkctrl DRA7_IPU_TIMER6_CLKCTRL 24>;
-                               clock-names = "fck";
+                               clocks = <&ipu_clkctrl DRA7_IPU_TIMER6_CLKCTRL 24>, <&timer_sys_clk_div>;
+                               clock-names = "fck", "timer_sys_ck";
                                interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
                        };
                };
                        timer14: timer@0 {
                                compatible = "ti,omap5430-timer";
                                reg = <0x0 0x80>;
-                               clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER14_CLKCTRL 24>;
-                               clock-names = "fck";
+                               clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER14_CLKCTRL 24>, <&timer_sys_clk_div>;
+                               clock-names = "fck", "timer_sys_ck";
                                interrupts = <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>;
                                ti,timer-pwm;
                        };
                        timer15: timer@0 {
                                compatible = "ti,omap5430-timer";
                                reg = <0x0 0x80>;
-                               clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER15_CLKCTRL 24>;
-                               clock-names = "fck";
+                               clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER15_CLKCTRL 24>, <&timer_sys_clk_div>;
+                               clock-names = "fck", "timer_sys_ck";
                                interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>;
                                ti,timer-pwm;
                        };
                        timer16: timer@0 {
                                compatible = "ti,omap5430-timer";
                                reg = <0x0 0x80>;
-                               clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER16_CLKCTRL 24>;
-                               clock-names = "fck";
+                               clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER16_CLKCTRL 24>, <&timer_sys_clk_div>;
+                               clock-names = "fck", "timer_sys_ck";
                                interrupts = <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>;
                                ti,timer-pwm;
                        };
index c38e86e..8c33510 100644 (file)
                simple-audio-card,frame-master = <&sound_codec>;
 
                sound_cpu: simple-audio-card,cpu {
-                       sound-dai = <&ssi2>;
+                       sound-dai = <&ssi1>;
                };
 
                sound_codec: simple-audio-card,codec {
index f05e918..53a25fb 100644 (file)
        status = "okay";
 };
 
-&wdog1 {
-       pinctrl-names = "default";
-       pinctrl-0 = <&pinctrl_wdog>;
-       fsl,ext-reset-output;
-       status = "okay";
-};
-
 &iomuxc {
        pinctrl-0 = <&pinctrl_reset_out &pinctrl_gpio>;
 
                        MX6UL_PAD_NAND_DATA03__USDHC2_DATA3     0x170f9
                >;
        };
-
-       pinctrl_wdog: wdoggrp {
-               fsl,pins = <
-                       MX6UL_PAD_GPIO1_IO09__WDOG1_WDOG_ANY    0x30b0
-               >;
-       };
 };
index a17af4d..61ba21a 100644 (file)
        status = "okay";
 };
 
+&wdog1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_wdog>;
+       fsl,ext-reset-output;
+       status = "okay";
+};
+
 &iomuxc {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_reset_out>;
                        MX6UL_PAD_SNVS_TAMPER9__GPIO5_IO09      0x1b0b0
                >;
        };
+
+       pinctrl_wdog: wdoggrp {
+               fsl,pins = <
+                       MX6UL_PAD_GPIO1_IO09__WDOG1_WDOG_ANY    0x18b0
+               >;
+       };
 };
index ae89dea..91129dc 100644 (file)
@@ -11,7 +11,7 @@
        #size-cells = <1>;
        interrupt-parent = <&gic>;
 
-       L2: l2-cache-controller@c4200000 {
+       L2: cache-controller@c4200000 {
                compatible = "arm,pl310-cache";
                reg = <0xc4200000 0x1000>;
                cache-unified;
index e39eee6..08a7d3c 100644 (file)
                #interrupt-cells = <2>;
                #address-cells = <1>;
                #size-cells = <0>;
-               spi-max-frequency = <3000000>;
+               spi-max-frequency = <9600000>;
                spi-cs-high;
+               spi-cpol;
+               spi-cpha;
 
                cpcap_adc: adc {
                        compatible = "motorola,mapphone-cpcap-adc";
index 4089d97..3dbcae3 100644 (file)
                        linux,code = <SW_FRONT_PROXIMITY>;
                        linux,can-disable;
                };
+
+               machine_cover {
+                       label = "Machine Cover";
+                       gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */
+                       linux,input-type = <EV_SW>;
+                       linux,code = <SW_MACHINE_COVER>;
+                       linux,can-disable;
+               };
        };
 
        isp1707: isp1707 {
        pinctrl-0 = <&mmc1_pins>;
        vmmc-supply = <&vmmc1>;
        bus-width = <4>;
-       /* For debugging, it is often good idea to remove this GPIO.
-          It means you can remove back cover (to reboot by removing
-          battery) and still use the MMC card. */
-       cd-gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */
 };
 
 /* most boards use vaux3, only some old versions use vmmc2 instead */
index 8047e8c..4548d87 100644 (file)
        ethernet@gpmc {
                reg = <5 0 0xff>;
                interrupt-parent = <&gpio2>;
-               interrupts = <12 IRQ_TYPE_EDGE_FALLING>;        /* gpio_44 */
+               interrupts = <12 IRQ_TYPE_LEVEL_LOW>;           /* gpio_44 */
 
                phy-mode = "mii";
 
index 6c2b07f..4400f5f 100644 (file)
        ti,no-idle;
        timer@0 {
                assigned-clocks = <&l4_wkup_clkctrl OMAP4_TIMER1_CLKCTRL 24>;
-               assigned-clock-parents = <&sys_clkin_ck>;
+               assigned-clock-parents = <&sys_32k_ck>;
        };
 };
index c2b54af..78f3267 100644 (file)
                        };
                };
 
-               L2: l2-cache@fffef000 {
+               L2: cache-controller@fffef000 {
                        compatible = "arm,pl310-cache";
                        reg = <0xfffef000 0x1000>;
                        interrupts = <0 38 0x04>;
index 3b8571b..8f614c4 100644 (file)
                        reg = <0xffcfb100 0x80>;
                };
 
-               L2: l2-cache@fffff000 {
+               L2: cache-controller@fffff000 {
                        compatible = "arm,pl310-cache";
                        reg = <0xfffff000 0x1000>;
                        interrupts = <0 18 IRQ_TYPE_LEVEL_HIGH>;
index e6308fb..a88ee52 100644 (file)
                };
        };
 
-       mcc {
-               compatible = "arm,vexpress,config-bus";
-               arm,vexpress,config-bridge = <&v2m_sysreg>;
-
-               oscclk0 {
-                       /* MCC static memory clock */
-                       compatible = "arm,vexpress-osc";
-                       arm,vexpress-sysreg,func = <1 0>;
-                       freq-range = <25000000 60000000>;
-                       #clock-cells = <0>;
-                       clock-output-names = "v2m:oscclk0";
-               };
-
-               v2m_oscclk1: oscclk1 {
-                       /* CLCD clock */
-                       compatible = "arm,vexpress-osc";
-                       arm,vexpress-sysreg,func = <1 1>;
-                       freq-range = <23750000 65000000>;
-                       #clock-cells = <0>;
-                       clock-output-names = "v2m:oscclk1";
-               };
-
-               v2m_oscclk2: oscclk2 {
-                       /* IO FPGA peripheral clock */
-                       compatible = "arm,vexpress-osc";
-                       arm,vexpress-sysreg,func = <1 2>;
-                       freq-range = <24000000 24000000>;
-                       #clock-cells = <0>;
-                       clock-output-names = "v2m:oscclk2";
-               };
-
-               volt-vio {
-                       /* Logic level voltage */
-                       compatible = "arm,vexpress-volt";
-                       arm,vexpress-sysreg,func = <2 0>;
-                       regulator-name = "VIO";
-                       regulator-always-on;
-                       label = "VIO";
-               };
-
-               temp-mcc {
-                       /* MCC internal operating temperature */
-                       compatible = "arm,vexpress-temp";
-                       arm,vexpress-sysreg,func = <4 0>;
-                       label = "MCC";
-               };
-
-               reset {
-                       compatible = "arm,vexpress-reset";
-                       arm,vexpress-sysreg,func = <5 0>;
-               };
-
-               muxfpga {
-                       compatible = "arm,vexpress-muxfpga";
-                       arm,vexpress-sysreg,func = <7 0>;
-               };
-
-               shutdown {
-                       compatible = "arm,vexpress-shutdown";
-                       arm,vexpress-sysreg,func = <8 0>;
-               };
-
-               reboot {
-                       compatible = "arm,vexpress-reboot";
-                       arm,vexpress-sysreg,func = <9 0>;
-               };
-
-               dvimode {
-                       compatible = "arm,vexpress-dvimode";
-                       arm,vexpress-sysreg,func = <11 0>;
-               };
-       };
-
        bus@8000000 {
                motherboard-bus {
                        model = "V2M-P1";
                                                };
                                        };
                                };
+
+                               mcc {
+                                       compatible = "arm,vexpress,config-bus";
+                                       arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+                                       oscclk0 {
+                                               /* MCC static memory clock */
+                                               compatible = "arm,vexpress-osc";
+                                               arm,vexpress-sysreg,func = <1 0>;
+                                               freq-range = <25000000 60000000>;
+                                               #clock-cells = <0>;
+                                               clock-output-names = "v2m:oscclk0";
+                                       };
+
+                                       v2m_oscclk1: oscclk1 {
+                                               /* CLCD clock */
+                                               compatible = "arm,vexpress-osc";
+                                               arm,vexpress-sysreg,func = <1 1>;
+                                               freq-range = <23750000 65000000>;
+                                               #clock-cells = <0>;
+                                               clock-output-names = "v2m:oscclk1";
+                                       };
+
+                                       v2m_oscclk2: oscclk2 {
+                                               /* IO FPGA peripheral clock */
+                                               compatible = "arm,vexpress-osc";
+                                               arm,vexpress-sysreg,func = <1 2>;
+                                               freq-range = <24000000 24000000>;
+                                               #clock-cells = <0>;
+                                               clock-output-names = "v2m:oscclk2";
+                                       };
+
+                                       volt-vio {
+                                               /* Logic level voltage */
+                                               compatible = "arm,vexpress-volt";
+                                               arm,vexpress-sysreg,func = <2 0>;
+                                               regulator-name = "VIO";
+                                               regulator-always-on;
+                                               label = "VIO";
+                                       };
+
+                                       temp-mcc {
+                                               /* MCC internal operating temperature */
+                                               compatible = "arm,vexpress-temp";
+                                               arm,vexpress-sysreg,func = <4 0>;
+                                               label = "MCC";
+                                       };
+
+                                       reset {
+                                               compatible = "arm,vexpress-reset";
+                                               arm,vexpress-sysreg,func = <5 0>;
+                                       };
+
+                                       muxfpga {
+                                               compatible = "arm,vexpress-muxfpga";
+                                               arm,vexpress-sysreg,func = <7 0>;
+                                       };
+
+                                       shutdown {
+                                               compatible = "arm,vexpress-shutdown";
+                                               arm,vexpress-sysreg,func = <8 0>;
+                                       };
+
+                                       reboot {
+                                               compatible = "arm,vexpress-reboot";
+                                               arm,vexpress-sysreg,func = <9 0>;
+                                       };
+
+                                       dvimode {
+                                               compatible = "arm,vexpress-dvimode";
+                                               arm,vexpress-sysreg,func = <11 0>;
+                                       };
+                               };
                        };
                };
        };
index 84dc0ba..5dcf3c6 100644 (file)
@@ -87,4 +87,11 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
        return dram_base + SZ_512M;
 }
 
+struct efi_arm_entry_state {
+       u32     cpsr_before_ebs;
+       u32     sctlr_before_ebs;
+       u32     cpsr_after_ebs;
+       u32     sctlr_after_ebs;
+};
+
 #endif /* _ASM_ARM_EFI_H */
index c036a4a..a1570c8 100644 (file)
 #if defined(__APCS_26__)
 #error Sorry, your compiler targets APCS-26 but this kernel requires APCS-32
 #endif
-/*
- * GCC 4.8.0-4.8.2: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58854
- *           miscompiles find_get_entry(), and can result in EXT3 and EXT4
- *           filesystem corruption (possibly other FS too).
- */
-#if defined(GCC_VERSION) && GCC_VERSION >= 40800 && GCC_VERSION < 40803
-#error Your compiler is too buggy; it is known to miscompile kernels
-#error and result in filesystem corruption and oopses.
-#endif
 
 int main(void)
 {
index 6aa938b..1df0ee0 100644 (file)
@@ -53,6 +53,7 @@ config ARCH_BCM_NSP
        select ARM_ERRATA_754322
        select ARM_ERRATA_775420
        select ARM_ERRATA_764369 if SMP
+       select ARM_TIMER_SP804
        select THERMAL
        select THERMAL_OF
        help
index 2a685ad..ae84c08 100644 (file)
@@ -289,6 +289,6 @@ struct platform_device *__init imx_add_spi_imx(
                const struct spi_imx_master *pdata);
 
 struct platform_device *imx_add_imx_dma(char *name, resource_size_t iobase,
-                                       int irq, int irq_err);
+                                       int irq);
 struct platform_device *imx_add_imx_sdma(char *name,
        resource_size_t iobase, int irq, struct sdma_platform_data *pdata);
index 78628ef..355de84 100644 (file)
@@ -24,7 +24,8 @@ struct platform_device *__init mxc_register_gpio(char *name, int id,
                        .flags = IORESOURCE_IRQ,
                },
        };
+       unsigned int nres;
 
-       return platform_device_register_resndata(&mxc_aips_bus,
-                       name, id, res, ARRAY_SIZE(res), NULL, 0);
+       nres = irq_high ? ARRAY_SIZE(res) : ARRAY_SIZE(res) - 1;
+       return platform_device_register_resndata(&mxc_aips_bus, name, id, res, nres, NULL, 0);
 }
index 26b47b3..12656f2 100644 (file)
@@ -6,7 +6,7 @@
 #include "devices-common.h"
 
 struct platform_device __init __maybe_unused *imx_add_imx_dma(char *name,
-       resource_size_t iobase, int irq, int irq_err)
+       resource_size_t iobase, int irq)
 {
        struct resource res[] = {
                {
@@ -17,10 +17,6 @@ struct platform_device __init __maybe_unused *imx_add_imx_dma(char *name,
                        .start = irq,
                        .end = irq,
                        .flags = IORESOURCE_IRQ,
-               }, {
-                       .start = irq_err,
-                       .end = irq_err,
-                       .flags = IORESOURCE_IRQ,
                },
        };
 
index 50a2eda..b834026 100644 (file)
@@ -78,8 +78,7 @@ void __init imx21_soc_init(void)
        mxc_register_gpio("imx21-gpio", 5, MX21_GPIO6_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0);
 
        pinctrl_provide_dummies();
-       imx_add_imx_dma("imx21-dma", MX21_DMA_BASE_ADDR,
-                       MX21_INT_DMACH0, 0); /* No ERR irq */
+       imx_add_imx_dma("imx21-dma", MX21_DMA_BASE_ADDR, MX21_INT_DMACH0);
        platform_device_register_simple("imx21-audmux", 0, imx21_audmux_res,
                                        ARRAY_SIZE(imx21_audmux_res));
 }
index 4e41251..2717614 100644 (file)
@@ -79,8 +79,7 @@ void __init imx27_soc_init(void)
        mxc_register_gpio("imx21-gpio", 5, MX27_GPIO6_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0);
 
        pinctrl_provide_dummies();
-       imx_add_imx_dma("imx27-dma", MX27_DMA_BASE_ADDR,
-                       MX27_INT_DMACH0, 0); /* No ERR irq */
+       imx_add_imx_dma("imx27-dma", MX27_DMA_BASE_ADDR, MX27_INT_DMACH0);
        /* imx27 has the imx21 type audmux */
        platform_device_register_simple("imx21-audmux", 0, imx27_audmux_res,
                                        ARRAY_SIZE(imx27_audmux_res));
index f057df8..e9962b4 100644 (file)
@@ -295,14 +295,14 @@ static int __init imx_suspend_alloc_ocram(
        if (!ocram_pool) {
                pr_warn("%s: ocram pool unavailable!\n", __func__);
                ret = -ENODEV;
-               goto put_node;
+               goto put_device;
        }
 
        ocram_base = gen_pool_alloc(ocram_pool, size);
        if (!ocram_base) {
                pr_warn("%s: unable to alloc ocram!\n", __func__);
                ret = -ENOMEM;
-               goto put_node;
+               goto put_device;
        }
 
        phys = gen_pool_virt_to_phys(ocram_pool, ocram_base);
@@ -312,6 +312,8 @@ static int __init imx_suspend_alloc_ocram(
        if (virt_out)
                *virt_out = virt;
 
+put_device:
+       put_device(&pdev->dev);
 put_node:
        of_node_put(node);
 
index dd34dff..40c74b4 100644 (file)
@@ -493,14 +493,14 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
        if (!ocram_pool) {
                pr_warn("%s: ocram pool unavailable!\n", __func__);
                ret = -ENODEV;
-               goto put_node;
+               goto put_device;
        }
 
        ocram_base = gen_pool_alloc(ocram_pool, MX6Q_SUSPEND_OCRAM_SIZE);
        if (!ocram_base) {
                pr_warn("%s: unable to alloc ocram!\n", __func__);
                ret = -ENOMEM;
-               goto put_node;
+               goto put_device;
        }
 
        ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base);
@@ -523,7 +523,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
        ret = imx6_pm_get_base(&pm_info->mmdc_base, socdata->mmdc_compat);
        if (ret) {
                pr_warn("%s: failed to get mmdc base %d!\n", __func__, ret);
-               goto put_node;
+               goto put_device;
        }
 
        ret = imx6_pm_get_base(&pm_info->src_base, socdata->src_compat);
@@ -570,7 +570,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
                &imx6_suspend,
                MX6Q_SUSPEND_OCRAM_SIZE - sizeof(*pm_info));
 
-       goto put_node;
+       goto put_device;
 
 pl310_cache_map_failed:
        iounmap(pm_info->gpc_base.vbase);
@@ -580,6 +580,8 @@ iomuxc_map_failed:
        iounmap(pm_info->src_base.vbase);
 src_map_failed:
        iounmap(pm_info->mmdc_base.vbase);
+put_device:
+       put_device(&pdev->dev);
 put_node:
        of_node_put(node);
 
index 82706af..15b29a1 100644 (file)
@@ -3435,7 +3435,7 @@ static int omap_hwmod_allocate_module(struct device *dev, struct omap_hwmod *oh,
                regs = ioremap(data->module_pa,
                               data->module_size);
                if (!regs)
-                       return -ENOMEM;
+                       goto out_free_sysc;
        }
 
        /*
@@ -3445,13 +3445,13 @@ static int omap_hwmod_allocate_module(struct device *dev, struct omap_hwmod *oh,
        if (oh->class->name && strcmp(oh->class->name, data->name)) {
                class = kmemdup(oh->class, sizeof(*oh->class), GFP_KERNEL);
                if (!class)
-                       return -ENOMEM;
+                       goto out_unmap;
        }
 
        if (list_empty(&oh->slave_ports)) {
                oi = kcalloc(1, sizeof(*oi), GFP_KERNEL);
                if (!oi)
-                       return -ENOMEM;
+                       goto out_free_class;
 
                /*
                 * Note that we assume interconnect interface clocks will be
@@ -3478,6 +3478,14 @@ static int omap_hwmod_allocate_module(struct device *dev, struct omap_hwmod *oh,
        spin_unlock_irqrestore(&oh->_lock, flags);
 
        return 0;
+
+out_free_class:
+       kfree(class);
+out_unmap:
+       iounmap(regs);
+out_free_sysc:
+       kfree(sysc);
+       return -ENOMEM;
 }
 
 static const struct omap_hwmod_reset omap24xx_reset_quirks[] = {
@@ -3489,7 +3497,7 @@ static const struct omap_hwmod_reset dra7_reset_quirks[] = {
 };
 
 static const struct omap_hwmod_reset omap_reset_quirks[] = {
-       { .match = "dss", .len = 3, .reset = omap_dss_reset, },
+       { .match = "dss_core", .len = 8, .reset = omap_dss_reset, },
        { .match = "hdq1w", .len = 5, .reset = omap_hdq1w_reset, },
        { .match = "i2c", .len = 3, .reset = omap_i2c_reset, },
        { .match = "wd_timer", .len = 8, .reset = omap2_wd_timer_reset, },
index dcb9893..ffecbf2 100644 (file)
@@ -20,14 +20,6 @@ static const char *const stih41x_dt_match[] __initconst = {
        NULL
 };
 
-static void sti_l2_write_sec(unsigned long val, unsigned reg)
-{
-       /*
-        * We can't write to secure registers as we are in non-secure
-        * mode, until we have some SMI service available.
-        */
-}
-
 DT_MACHINE_START(STM, "STi SoC with Flattened Device Tree")
        .dt_compat      = stih41x_dt_match,
        .l2c_aux_val    = L2C_AUX_CTRL_SHARED_OVERRIDE |
@@ -36,5 +28,4 @@ DT_MACHINE_START(STM, "STi SoC with Flattened Device Tree")
                          L2C_AUX_CTRL_WAY_SIZE(4),
        .l2c_aux_mask   = 0xc0000fff,
        .smp            = smp_ops(sti_smp_ops),
-       .l2c_write_sec  = sti_l2_write_sec,
 MACHINE_END
index fd4e1ce..e93145d 100644 (file)
@@ -241,7 +241,6 @@ static int __init fdt_find_hyper_node(unsigned long node, const char *uname,
  * see Documentation/devicetree/bindings/arm/xen.txt for the
  * documentation of the Xen Device Tree format.
  */
-#define GRANT_TABLE_PHYSADDR 0
 void __init xen_early_init(void)
 {
        of_scan_flat_dt(fdt_find_hyper_node, NULL);
index a4a094b..66dc41f 100644 (file)
@@ -1518,9 +1518,9 @@ config ARM64_PTR_AUTH
        default y
        depends on !KVM || ARM64_VHE
        depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
-       # GCC 9.1 and later inserts a .note.gnu.property section note for PAC
+       # Modern compilers insert a .note.gnu.property section note for PAC
        # which is only understood by binutils starting with version 2.33.1.
-       depends on !CC_IS_GCC || GCC_VERSION < 90100 || LD_VERSION >= 233010000
+       depends on LD_IS_LLD || LD_VERSION >= 233010000 || (CC_IS_GCC && GCC_VERSION < 90100)
        depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE
        depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
        help
index d1fc9c2..9498d1d 100644 (file)
@@ -77,7 +77,7 @@
                method = "smc";
        };
 
-       intc: intc@fffc1000 {
+       intc: interrupt-controller@fffc1000 {
                compatible = "arm,gic-400", "arm,cortex-a15-gic";
                #interrupt-cells = <3>;
                interrupt-controller;
                        status = "disabled";
                };
 
-               nand: nand@ffb90000 {
+               nand: nand-controller@ffb90000 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "altr,socfpga-denali-nand";
                        clock-names = "timer";
                };
 
-               uart0: serial0@ffc02000 {
+               uart0: serial@ffc02000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0xffc02000 0x100>;
                        interrupts = <0 108 4>;
                        status = "disabled";
                };
 
-               uart1: serial1@ffc02100 {
+               uart1: serial@ffc02100 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0xffc02100 0x100>;
                        interrupts = <0 109 4>;
index f6c4a15..feadd21 100644 (file)
 };
 
 &qspi {
+       status = "okay";
        flash@0 {
                #address-cells = <1>;
                #size-cells = <1>;
index 9946515..c079667 100644 (file)
 };
 
 &qspi {
+       status = "okay";
        flash@0 {
                #address-cells = <1>;
                #size-cells = <1>;
 
                        qspi_boot: partition@0 {
                                label = "Boot and fpga data";
-                               reg = <0x0 0x034B0000>;
+                               reg = <0x0 0x03FE0000>;
                        };
 
-                       qspi_rootfs: partition@4000000 {
+                       qspi_rootfs: partition@3FE0000 {
                                label = "Root Filesystem - JFFS2";
-                               reg = <0x034B0000 0x0EB50000>;
+                               reg = <0x03FE0000 0x0C020000>;
                        };
                };
        };
index 6a226fa..9e43f4d 100644 (file)
@@ -10,7 +10,7 @@
 #include <dt-bindings/input/input.h>
 #include <dt-bindings/sound/meson-aiu.h>
 
-#include "meson-gxl-s905x.dtsi"
+#include "meson-gxl-s805x.dtsi"
 
 / {
        compatible = "libretech,aml-s805x-ac", "amlogic,s805x",
index 867e30f..eb7f5a3 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <dt-bindings/input/input.h>
 
-#include "meson-gxl-s905x.dtsi"
+#include "meson-gxl-s805x.dtsi"
 
 / {
        compatible = "amlogic,p241", "amlogic,s805x", "amlogic,meson-gxl";
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x.dtsi
new file mode 100644 (file)
index 0000000..f9d7056
--- /dev/null
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2020 BayLibre SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include "meson-gxl-s905x.dtsi"
+
+/ {
+       compatible = "amlogic,s805x", "amlogic,meson-gxl";
+};
+
+/* The S805X Package doesn't seem to handle the 744MHz OPP correctly */
+&mali {
+       assigned-clocks = <&clkc CLKID_MALI_0_SEL>,
+                         <&clkc CLKID_MALI_0>,
+                         <&clkc CLKID_MALI>; /* Glitch free mux */
+       assigned-clock-parents = <&clkc CLKID_FCLK_DIV3>,
+                                <0>, /* Do Nothing */
+                                <&clkc CLKID_MALI_0>;
+       assigned-clock-rates = <0>, /* Do Nothing */
+                              <666666666>,
+                              <0>; /* Do Nothing */
+};
index fc59c85..6c8b189 100644 (file)
        };
 };
 
+&hwrng {
+       clocks = <&clkc CLKID_RNG0>;
+       clock-names = "core";
+};
+
 &i2c_A {
        clocks = <&clkc CLKID_I2C>;
 };
index fb0137a..94911b1 100644 (file)
 
                        ldo1_reg: LDO1 {
                                regulator-name = "LDO1";
-                               regulator-min-microvolt = <3000000>;
+                               regulator-min-microvolt = <1600000>;
                                regulator-max-microvolt = <3300000>;
                                regulator-boot-on;
                                regulator-always-on;
 
                        ldo2_reg: LDO2 {
                                regulator-name = "LDO2";
-                               regulator-min-microvolt = <900000>;
+                               regulator-min-microvolt = <800000>;
                                regulator-max-microvolt = <900000>;
                                regulator-boot-on;
                                regulator-always-on;
index e5ec832..0f1d7f8 100644 (file)
 
                        ldo1_reg: LDO1 {
                                regulator-name = "LDO1";
-                               regulator-min-microvolt = <3000000>;
+                               regulator-min-microvolt = <1600000>;
                                regulator-max-microvolt = <3300000>;
                                regulator-boot-on;
                                regulator-always-on;
 
                        ldo2_reg: LDO2 {
                                regulator-name = "LDO2";
-                               regulator-min-microvolt = <900000>;
+                               regulator-min-microvolt = <800000>;
                                regulator-max-microvolt = <900000>;
                                regulator-boot-on;
                                regulator-always-on;
index d07e0e6..a1e5483 100644 (file)
 
                        ldo1_reg: LDO1 {
                                regulator-name = "LDO1";
-                               regulator-min-microvolt = <3000000>;
+                               regulator-min-microvolt = <1600000>;
                                regulator-max-microvolt = <3300000>;
                                regulator-boot-on;
                                regulator-always-on;
 
                        ldo2_reg: LDO2 {
                                regulator-name = "LDO2";
-                               regulator-min-microvolt = <900000>;
+                               regulator-min-microvolt = <800000>;
                                regulator-max-microvolt = <900000>;
                                regulator-boot-on;
                                regulator-always-on;
index 51d9483..92f478d 100644 (file)
@@ -98,6 +98,7 @@
 };
 
 &qspi {
+       status = "okay";
        flash@0 {
                #address-cells = <1>;
                #size-cells = <1>;
index 883e8ba..2ca7ba6 100644 (file)
@@ -194,7 +194,7 @@ CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_ACPI=y
 CONFIG_PCI_AARDVARK=y
 CONFIG_PCI_TEGRA=y
-CONFIG_PCIE_RCAR=y
+CONFIG_PCIE_RCAR_HOST=y
 CONFIG_PCI_HOST_GENERIC=y
 CONFIG_PCI_XGENE=y
 CONFIG_PCIE_ALTERA=y
index 5e5dc05..12f0eb5 100644 (file)
@@ -73,11 +73,11 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
        ".pushsection .altinstructions,\"a\"\n"                         \
        ALTINSTR_ENTRY(feature)                                         \
        ".popsection\n"                                                 \
-       ".pushsection .altinstr_replacement, \"a\"\n"                   \
+       ".subsection 1\n"                                               \
        "663:\n\t"                                                      \
        newinstr "\n"                                                   \
        "664:\n\t"                                                      \
-       ".popsection\n\t"                                               \
+       ".previous\n\t"                                                 \
        ".org   . - (664b-663b) + (662b-661b)\n\t"                      \
        ".org   . - (662b-661b) + (664b-663b)\n"                        \
        ".endif\n"
@@ -117,9 +117,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
 662:   .pushsection .altinstructions, "a"
        altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
        .popsection
-       .pushsection .altinstr_replacement, "ax"
+       .subsection 1
 663:   \insn2
-664:   .popsection
+664:   .previous
        .org    . - (664b-663b) + (662b-661b)
        .org    . - (662b-661b) + (664b-663b)
        .endif
@@ -160,7 +160,7 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
        .pushsection .altinstructions, "a"
        altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f
        .popsection
-       .pushsection .altinstr_replacement, "ax"
+       .subsection 1
        .align 2        /* So GAS knows label 661 is suitably aligned */
 661:
 .endm
@@ -179,9 +179,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
 .macro alternative_else
 662:
        .if .Lasm_alt_mode==0
-       .pushsection .altinstr_replacement, "ax"
+       .subsection 1
        .else
-       .popsection
+       .previous
        .endif
 663:
 .endm
@@ -192,7 +192,7 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
 .macro alternative_endif
 664:
        .if .Lasm_alt_mode==0
-       .popsection
+       .previous
        .endif
        .org    . - (664b-663b) + (662b-661b)
        .org    . - (662b-661b) + (664b-663b)
index a358e97..6647ae4 100644 (file)
@@ -109,7 +109,7 @@ static inline u32 gic_read_pmr(void)
        return read_sysreg_s(SYS_ICC_PMR_EL1);
 }
 
-static inline void gic_write_pmr(u32 val)
+static __always_inline void gic_write_pmr(u32 val)
 {
        write_sysreg_s(val, SYS_ICC_PMR_EL1);
 }
index 7ae54d7..9f0ec21 100644 (file)
@@ -58,6 +58,7 @@ struct arch_timer_erratum_workaround {
        u64 (*read_cntvct_el0)(void);
        int (*set_next_event_phys)(unsigned long, struct clock_event_device *);
        int (*set_next_event_virt)(unsigned long, struct clock_event_device *);
+       bool disable_compat_vdso;
 };
 
 DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
index 5d1f4ae..f7c3d1f 100644 (file)
@@ -675,7 +675,7 @@ static inline bool system_supports_generic_auth(void)
                cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
 }
 
-static inline bool system_uses_irq_prio_masking(void)
+static __always_inline bool system_uses_irq_prio_masking(void)
 {
        return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
               cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);
index a87a93f..7219cdd 100644 (file)
@@ -86,6 +86,7 @@
 #define QCOM_CPU_PART_FALKOR           0xC00
 #define QCOM_CPU_PART_KRYO             0x200
 #define QCOM_CPU_PART_KRYO_3XX_SILVER  0x803
+#define QCOM_CPU_PART_KRYO_4XX_GOLD    0x804
 #define QCOM_CPU_PART_KRYO_4XX_SILVER  0x805
 
 #define NVIDIA_CPU_PART_DENVER         0x003
 #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
 #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
 #define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
+#define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD)
 #define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)
 #define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
 #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
index e5ceea2..0b298f4 100644 (file)
@@ -109,6 +109,8 @@ void disable_debug_monitors(enum dbg_active_el el);
 
 void user_rewind_single_step(struct task_struct *task);
 void user_fastforward_single_step(struct task_struct *task);
+void user_regs_reset_single_step(struct user_pt_regs *regs,
+                                struct task_struct *task);
 
 void kernel_enable_single_step(struct pt_regs *regs);
 void kernel_disable_single_step(void);
index 81fefd2..ba89a9a 100644 (file)
@@ -12,7 +12,6 @@
  * instead.
  */
 #define BTI_C hint 34 ;
-#define BTI_J hint 36 ;
 
 /*
  * When using in-kernel BTI we need to ensure that PCS-conformant assembly
        SYM_START(name, SYM_L_WEAK, SYM_A_NONE)         \
        BTI_C
 
-#define SYM_INNER_LABEL(name, linkage)                 \
-       .type name SYM_T_NONE ASM_NL                    \
-       SYM_ENTRY(name, linkage, SYM_A_NONE)            \
-       BTI_J
-
 #endif
 
 /*
index 68140fd..8444df0 100644 (file)
@@ -19,6 +19,9 @@
 
 typedef struct {
        atomic64_t      id;
+#ifdef CONFIG_COMPAT
+       void            *sigpage;
+#endif
        void            *vdso;
        unsigned long   flags;
 } mm_context_t;
index 2e7e0f4..4d867c6 100644 (file)
@@ -67,7 +67,7 @@ extern bool arm64_use_ng_mappings;
 #define PAGE_HYP               __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
 #define PAGE_HYP_EXEC          __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
 #define PAGE_HYP_RO            __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
-#define PAGE_HYP_DEVICE                __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
+#define PAGE_HYP_DEVICE                __pgprot(_PROT_DEFAULT | PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_HYP | PTE_HYP_XN)
 
 #define PAGE_S2_MEMATTR(attr)                                          \
        ({                                                              \
index 65299a2..cfc0672 100644 (file)
@@ -34,6 +34,10 @@ static inline long syscall_get_error(struct task_struct *task,
                                     struct pt_regs *regs)
 {
        unsigned long error = regs->regs[0];
+
+       if (is_compat_thread(task_thread_info(task)))
+               error = sign_extend64(error, 31);
+
        return IS_ERR_VALUE(error) ? error : 0;
 }
 
@@ -47,7 +51,13 @@ static inline void syscall_set_return_value(struct task_struct *task,
                                            struct pt_regs *regs,
                                            int error, long val)
 {
-       regs->regs[0] = (long) error ? error : val;
+       if (error)
+               val = error;
+
+       if (is_compat_thread(task_thread_info(task)))
+               val = lower_32_bits(val);
+
+       regs->regs[0] = val;
 }
 
 #define SYSCALL_MAX_ARGS 6
index 6ea8b6a..5e784e1 100644 (file)
@@ -93,6 +93,7 @@ void arch_release_task_struct(struct task_struct *tsk);
 #define _TIF_SYSCALL_EMU       (1 << TIF_SYSCALL_EMU)
 #define _TIF_UPROBE            (1 << TIF_UPROBE)
 #define _TIF_FSCHECK           (1 << TIF_FSCHECK)
+#define _TIF_SINGLESTEP                (1 << TIF_SINGLESTEP)
 #define _TIF_32BIT             (1 << TIF_32BIT)
 #define _TIF_SVE               (1 << TIF_SVE)
 
index df6ea65..b054d9f 100644 (file)
@@ -2,7 +2,10 @@
 #ifndef __ASM_VDSOCLOCKSOURCE_H
 #define __ASM_VDSOCLOCKSOURCE_H
 
-#define VDSO_ARCH_CLOCKMODES   \
-       VDSO_CLOCKMODE_ARCHTIMER
+#define VDSO_ARCH_CLOCKMODES                                   \
+       /* vdso clocksource for both 32 and 64bit tasks */      \
+       VDSO_CLOCKMODE_ARCHTIMER,                               \
+       /* vdso clocksource for 64bit tasks only */             \
+       VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT
 
 #endif
index b6907ae..9a625e8 100644 (file)
@@ -111,7 +111,7 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
         * update. Return something. Core will do another round and then
         * see the mode change and fallback to the syscall.
         */
-       if (clock_mode == VDSO_CLOCKMODE_NONE)
+       if (clock_mode != VDSO_CLOCKMODE_ARCHTIMER)
                return 0;
 
        /*
@@ -152,6 +152,12 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
        return ret;
 }
 
+static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
+{
+       return vd->clock_mode == VDSO_CLOCKMODE_ARCHTIMER;
+}
+#define vdso_clocksource_ok    vdso_clocksource_ok
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
index 151f285..a561cbb 100644 (file)
@@ -29,9 +29,7 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE
 
 obj-$(CONFIG_COMPAT)                   += sys32.o signal32.o                   \
                                           sys_compat.o
-ifneq ($(CONFIG_COMPAT_VDSO), y)
 obj-$(CONFIG_COMPAT)                   += sigreturn32.o
-endif
 obj-$(CONFIG_KUSER_HELPERS)            += kuser32.o
 obj-$(CONFIG_FUNCTION_TRACER)          += ftrace.o entry-ftrace.o
 obj-$(CONFIG_MODULES)                  += module.o
index d1757ef..7303994 100644 (file)
@@ -43,20 +43,8 @@ bool alternative_is_applied(u16 cpufeature)
  */
 static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
 {
-       unsigned long replptr;
-
-       if (kernel_text_address(pc))
-               return true;
-
-       replptr = (unsigned long)ALT_REPL_PTR(alt);
-       if (pc >= replptr && pc <= (replptr + alt->alt_len))
-               return false;
-
-       /*
-        * Branching into *another* alternate sequence is doomed, and
-        * we're not even trying to fix it up.
-        */
-       BUG();
+       unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
+       return !(pc >= replptr && pc <= (replptr + alt->alt_len));
 }
 
 #define align_down(x, a)       ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
index ad06d68..79728bf 100644 (file)
@@ -460,6 +460,8 @@ static const struct midr_range arm64_ssb_cpus[] = {
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
        MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
+       MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
+       MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
        {},
 };
 
@@ -470,12 +472,7 @@ static bool
 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
                               int scope)
 {
-       u32 midr = read_cpuid_id();
-       /* Cortex-A76 r0p0 - r3p1 */
-       struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
-
-       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
-       return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
+       return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
 }
 #endif
 
@@ -726,6 +723,8 @@ static const struct midr_range erratum_1418040_list[] = {
        MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
        /* Neoverse-N1 r0p0 to r3p1 */
        MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
+       /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
+       MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
        {},
 };
 #endif
@@ -770,11 +769,23 @@ static const struct midr_range erratum_speculative_at_list[] = {
 #ifdef CONFIG_ARM64_ERRATUM_1530923
        /* Cortex A55 r0p0 to r2p0 */
        MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
+       /* Kryo4xx Silver (rdpe => r1p0) */
+       MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
 #endif
        {},
 };
 #endif
 
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+static const struct midr_range erratum_1463225[] = {
+       /* Cortex-A76 r0p0 - r3p1 */
+       MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
+       /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
+       MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
+       {},
+};
+#endif
+
 const struct arm64_cpu_capabilities arm64_errata[] = {
 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
        {
@@ -914,6 +925,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                .capability = ARM64_WORKAROUND_1463225,
                .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
                .matches = has_cortex_a76_erratum_1463225,
+               .midr_range_list = erratum_1463225,
        },
 #endif
 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
index 4ae4167..9fae0ef 100644 (file)
@@ -1290,6 +1290,8 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
                MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
                MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
                MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
+               MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
+               MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
                { /* sentinel */ }
        };
        char const *str = "kpti command line option";
@@ -1406,6 +1408,8 @@ static bool cpu_has_broken_dbm(void)
        static const struct midr_range cpus[] = {
 #ifdef CONFIG_ARM64_ERRATUM_1024718
                MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0),  // A55 r0p0 -r1p0
+               /* Kryo4xx Silver (rdpe => r1p0) */
+               MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
 #endif
                {},
        };
index 5df4936..7310a4f 100644 (file)
@@ -141,17 +141,20 @@ postcore_initcall(debug_monitors_init);
 /*
  * Single step API and exception handling.
  */
-static void set_regs_spsr_ss(struct pt_regs *regs)
+static void set_user_regs_spsr_ss(struct user_pt_regs *regs)
 {
        regs->pstate |= DBG_SPSR_SS;
 }
-NOKPROBE_SYMBOL(set_regs_spsr_ss);
+NOKPROBE_SYMBOL(set_user_regs_spsr_ss);
 
-static void clear_regs_spsr_ss(struct pt_regs *regs)
+static void clear_user_regs_spsr_ss(struct user_pt_regs *regs)
 {
        regs->pstate &= ~DBG_SPSR_SS;
 }
-NOKPROBE_SYMBOL(clear_regs_spsr_ss);
+NOKPROBE_SYMBOL(clear_user_regs_spsr_ss);
+
+#define set_regs_spsr_ss(r)    set_user_regs_spsr_ss(&(r)->user_regs)
+#define clear_regs_spsr_ss(r)  clear_user_regs_spsr_ss(&(r)->user_regs)
 
 static DEFINE_SPINLOCK(debug_hook_lock);
 static LIST_HEAD(user_step_hook);
@@ -391,17 +394,26 @@ void user_rewind_single_step(struct task_struct *task)
         * If single step is active for this thread, then set SPSR.SS
         * to 1 to avoid returning to the active-pending state.
         */
-       if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP))
+       if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
                set_regs_spsr_ss(task_pt_regs(task));
 }
 NOKPROBE_SYMBOL(user_rewind_single_step);
 
 void user_fastforward_single_step(struct task_struct *task)
 {
-       if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP))
+       if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
                clear_regs_spsr_ss(task_pt_regs(task));
 }
 
+void user_regs_reset_single_step(struct user_pt_regs *regs,
+                                struct task_struct *task)
+{
+       if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
+               set_user_regs_spsr_ss(regs);
+       else
+               clear_user_regs_spsr_ss(regs);
+}
+
 /* Kernel API */
 void kernel_enable_single_step(struct pt_regs *regs)
 {
index 3dbdf97..d3be9db 100644 (file)
@@ -57,7 +57,7 @@ static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
        /*
         * The CPU masked interrupts, and we are leaving them masked during
         * do_debug_exception(). Update PMR as if we had called
-        * local_mask_daif().
+        * local_daif_mask().
         */
        if (system_uses_irq_prio_masking())
                gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
index 5304d19..35de8ba 100644 (file)
@@ -126,8 +126,10 @@ alternative_else_nop_endif
        add     \dst, \dst, #(\sym - .entry.tramp.text)
        .endm
 
-       // This macro corrupts x0-x3. It is the caller's duty
-       // to save/restore them if required.
+       /*
+        * This macro corrupts x0-x3. It is the caller's duty  to save/restore
+        * them if required.
+        */
        .macro  apply_ssbd, state, tmp1, tmp2
 #ifdef CONFIG_ARM64_SSBD
 alternative_cb arm64_enable_wa2_handling
@@ -167,13 +169,28 @@ alternative_cb_end
        stp     x28, x29, [sp, #16 * 14]
 
        .if     \el == 0
+       .if     \regsize == 32
+       /*
+        * If we're returning from a 32-bit task on a system affected by
+        * 1418040 then re-enable userspace access to the virtual counter.
+        */
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+alternative_if ARM64_WORKAROUND_1418040
+       mrs     x0, cntkctl_el1
+       orr     x0, x0, #2      // ARCH_TIMER_USR_VCT_ACCESS_EN
+       msr     cntkctl_el1, x0
+alternative_else_nop_endif
+#endif
+       .endif
        clear_gp_regs
        mrs     x21, sp_el0
        ldr_this_cpu    tsk, __entry_task, x20
        msr     sp_el0, tsk
 
-       // Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
-       // when scheduling.
+       /*
+        * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
+        * when scheduling.
+        */
        ldr     x19, [tsk, #TSK_TI_FLAGS]
        disable_step_tsk x19, x20
 
@@ -320,6 +337,14 @@ alternative_else_nop_endif
        tst     x22, #PSR_MODE32_BIT            // native task?
        b.eq    3f
 
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+alternative_if ARM64_WORKAROUND_1418040
+       mrs     x0, cntkctl_el1
+       bic     x0, x0, #2                      // ARCH_TIMER_USR_VCT_ACCESS_EN
+       msr     cntkctl_el1, x0
+alternative_else_nop_endif
+#endif
+
 #ifdef CONFIG_ARM64_ERRATUM_845719
 alternative_if ARM64_WORKAROUND_845719
 #ifdef CONFIG_PID_IN_CONTEXTIDR
@@ -331,21 +356,6 @@ alternative_if ARM64_WORKAROUND_845719
 alternative_else_nop_endif
 #endif
 3:
-#ifdef CONFIG_ARM64_ERRATUM_1418040
-alternative_if_not ARM64_WORKAROUND_1418040
-       b       4f
-alternative_else_nop_endif
-       /*
-        * if (x22.mode32 == cntkctl_el1.el0vcten)
-        *     cntkctl_el1.el0vcten = ~cntkctl_el1.el0vcten
-        */
-       mrs     x1, cntkctl_el1
-       eon     x0, x1, x22, lsr #3
-       tbz     x0, #1, 4f
-       eor     x1, x1, #2      // ARCH_TIMER_USR_VCT_ACCESS_EN
-       msr     cntkctl_el1, x1
-4:
-#endif
        scs_save tsk, x0
 
        /* No kernel C function calls after this as user keys are set. */
@@ -377,11 +387,11 @@ alternative_else_nop_endif
        .if     \el == 0
 alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-       bne     5f
+       bne     4f
        msr     far_el1, x30
        tramp_alias     x30, tramp_exit_native
        br      x30
-5:
+4:
        tramp_alias     x30, tramp_exit_compat
        br      x30
 #endif
index 4311992..1a157ca 100644 (file)
@@ -252,7 +252,7 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
        if (!kgdb_single_step)
                return DBG_HOOK_ERROR;
 
-       kgdb_handle_exception(1, SIGTRAP, 0, regs);
+       kgdb_handle_exception(0, SIGTRAP, 0, regs);
        return DBG_HOOK_HANDLED;
 }
 NOKPROBE_SYMBOL(kgdb_step_brk_fn);
index 0bbac61..666b225 100644 (file)
@@ -15,15 +15,34 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
                return 0;
 
        /*
-        * Compat (i.e. 32 bit) mode:
-        * - PC has been set in the pt_regs struct in kernel_entry,
-        * - Handle SP and LR here.
+        * Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but
+        * we're stuck with it for ABI compatability reasons.
+        *
+        * For a 32-bit consumer inspecting a 32-bit task, then it will look at
+        * the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h).
+        * These correspond directly to a prefix of the registers saved in our
+        * 'struct pt_regs', with the exception of the PC, so we copy that down
+        * (x15 corresponds to SP_hyp in the architecture).
+        *
+        * So far, so good.
+        *
+        * The oddity arises when a 64-bit consumer looks at a 32-bit task and
+        * asks for registers beyond PERF_REG_ARM_MAX. In this case, we return
+        * SP_usr, LR_usr and PC in the positions where the AArch64 SP, LR and
+        * PC registers would normally live. The initial idea was to allow a
+        * 64-bit unwinder to unwind a 32-bit task and, although it's not clear
+        * how well that works in practice, somebody might be relying on it.
+        *
+        * At the time we make a sample, we don't know whether the consumer is
+        * 32-bit or 64-bit, so we have to cater for both possibilities.
         */
        if (compat_user_mode(regs)) {
                if ((u32)idx == PERF_REG_ARM64_SP)
                        return regs->compat_sp;
                if ((u32)idx == PERF_REG_ARM64_LR)
                        return regs->compat_lr;
+               if (idx == 15)
+                       return regs->pc;
        }
 
        if ((u32)idx == PERF_REG_ARM64_SP)
index d1c95dc..5290f17 100644 (file)
@@ -120,15 +120,9 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
 
 void *alloc_insn_page(void)
 {
-       void *page;
-
-       page = vmalloc_exec(PAGE_SIZE);
-       if (page) {
-               set_memory_ro((unsigned long)page, 1);
-               set_vm_flush_reset_perms(page);
-       }
-
-       return page;
+       return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
+                       GFP_KERNEL, PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS,
+                       NUMA_NO_NODE, __builtin_return_address(0));
 }
 
 /* arm kprobe: install breakpoint in text */
index 68b7f34..1e02e98 100644 (file)
@@ -1811,19 +1811,42 @@ static void tracehook_report_syscall(struct pt_regs *regs,
        unsigned long saved_reg;
 
        /*
-        * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
-        * used to denote syscall entry/exit:
+        * We have some ABI weirdness here in the way that we handle syscall
+        * exit stops because we indicate whether or not the stop has been
+        * signalled from syscall entry or syscall exit by clobbering a general
+        * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee
+        * and restoring its old value after the stop. This means that:
+        *
+        * - Any writes by the tracer to this register during the stop are
+        *   ignored/discarded.
+        *
+        * - The actual value of the register is not available during the stop,
+        *   so the tracer cannot save it and restore it later.
+        *
+        * - Syscall stops behave differently to seccomp and pseudo-step traps
+        *   (the latter do not nobble any registers).
         */
        regno = (is_compat_task() ? 12 : 7);
        saved_reg = regs->regs[regno];
        regs->regs[regno] = dir;
 
-       if (dir == PTRACE_SYSCALL_EXIT)
+       if (dir == PTRACE_SYSCALL_ENTER) {
+               if (tracehook_report_syscall_entry(regs))
+                       forget_syscall(regs);
+               regs->regs[regno] = saved_reg;
+       } else if (!test_thread_flag(TIF_SINGLESTEP)) {
                tracehook_report_syscall_exit(regs, 0);
-       else if (tracehook_report_syscall_entry(regs))
-               forget_syscall(regs);
+               regs->regs[regno] = saved_reg;
+       } else {
+               regs->regs[regno] = saved_reg;
 
-       regs->regs[regno] = saved_reg;
+               /*
+                * Signal a pseudo-step exception since we are stepping but
+                * tracer modifications to the registers may have rewound the
+                * state machine.
+                */
+               tracehook_report_syscall_exit(regs, 1);
+       }
 }
 
 int syscall_trace_enter(struct pt_regs *regs)
@@ -1833,12 +1856,12 @@ int syscall_trace_enter(struct pt_regs *regs)
        if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
                tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
                if (!in_syscall(regs) || (flags & _TIF_SYSCALL_EMU))
-                       return -1;
+                       return NO_SYSCALL;
        }
 
        /* Do the secure computing after ptrace; failures should be fast. */
        if (secure_computing() == -1)
-               return -1;
+               return NO_SYSCALL;
 
        if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
                trace_sys_enter(regs, regs->syscallno);
@@ -1851,12 +1874,14 @@ int syscall_trace_enter(struct pt_regs *regs)
 
 void syscall_trace_exit(struct pt_regs *regs)
 {
+       unsigned long flags = READ_ONCE(current_thread_info()->flags);
+
        audit_syscall_exit(regs);
 
-       if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+       if (flags & _TIF_SYSCALL_TRACEPOINT)
                trace_sys_exit(regs, regs_return_value(regs));
 
-       if (test_thread_flag(TIF_SYSCALL_TRACE))
+       if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
                tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
 
        rseq_syscall(regs);
@@ -1934,8 +1959,8 @@ static int valid_native_regs(struct user_pt_regs *regs)
  */
 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
 {
-       if (!test_tsk_thread_flag(task, TIF_SINGLESTEP))
-               regs->pstate &= ~DBG_SPSR_SS;
+       /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
+       user_regs_reset_single_step(regs, task);
 
        if (is_compat_thread(task_thread_info(task)))
                return valid_compat_regs(regs);
index 801d56c..3b4f31f 100644 (file)
@@ -800,7 +800,6 @@ static void setup_restart_syscall(struct pt_regs *regs)
  */
 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
 {
-       struct task_struct *tsk = current;
        sigset_t *oldset = sigmask_to_save();
        int usig = ksig->sig;
        int ret;
@@ -824,14 +823,8 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
         */
        ret |= !valid_user_regs(&regs->user_regs, current);
 
-       /*
-        * Fast forward the stepping logic so we step into the signal
-        * handler.
-        */
-       if (!ret)
-               user_fastforward_single_step(tsk);
-
-       signal_setup_done(ret, ksig, 0);
+       /* Step into the signal handler if we are stepping */
+       signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
 }
 
 /*
index 82feca6..2f507f5 100644 (file)
@@ -342,38 +342,13 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
                retcode = ptr_to_compat(ka->sa.sa_restorer);
        } else {
                /* Set up sigreturn pointer */
-#ifdef CONFIG_COMPAT_VDSO
-               void *vdso_base = current->mm->context.vdso;
-               void *vdso_trampoline;
-
-               if (ka->sa.sa_flags & SA_SIGINFO) {
-                       if (thumb) {
-                               vdso_trampoline = VDSO_SYMBOL(vdso_base,
-                                                       compat_rt_sigreturn_thumb);
-                       } else {
-                               vdso_trampoline = VDSO_SYMBOL(vdso_base,
-                                                       compat_rt_sigreturn_arm);
-                       }
-               } else {
-                       if (thumb) {
-                               vdso_trampoline = VDSO_SYMBOL(vdso_base,
-                                                       compat_sigreturn_thumb);
-                       } else {
-                               vdso_trampoline = VDSO_SYMBOL(vdso_base,
-                                                       compat_sigreturn_arm);
-                       }
-               }
-
-               retcode = ptr_to_compat(vdso_trampoline) + thumb;
-#else
                unsigned int idx = thumb << 1;
 
                if (ka->sa.sa_flags & SA_SIGINFO)
                        idx += 3;
 
-               retcode = (unsigned long)current->mm->context.vdso +
+               retcode = (unsigned long)current->mm->context.sigpage +
                          (idx << 2) + thumb;
-#endif
        }
 
        regs->regs[0]   = usig;
index 5f5b868..5f0c048 100644 (file)
@@ -50,6 +50,9 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
                ret = do_ni_syscall(regs, scno);
        }
 
+       if (is_compat_task())
+               ret = lower_32_bits(ret);
+
        regs->regs[0] = ret;
 }
 
@@ -121,7 +124,21 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
        user_exit();
 
        if (has_syscall_work(flags)) {
-               /* set default errno for user-issued syscall(-1) */
+               /*
+                * The de-facto standard way to skip a system call using ptrace
+                * is to set the system call to -1 (NO_SYSCALL) and set x0 to a
+                * suitable error code for consumption by userspace. However,
+                * this cannot be distinguished from a user-issued syscall(-1)
+                * and so we must set x0 to -ENOSYS here in case the tracer doesn't
+                * issue the skip and we fall into trace_exit with x0 preserved.
+                *
+                * This is slightly odd because it also means that if a tracer
+                * sets the system call number to -1 but does not initialise x0,
+                * then x0 will be preserved for all system calls apart from a
+                * user-issued syscall(-1). However, requesting a skip and not
+                * setting the return value is unlikely to do anything sensible
+                * anyway.
+                */
                if (scno == NO_SYSCALL)
                        regs->regs[0] = -ENOSYS;
                scno = syscall_trace_enter(regs);
@@ -139,7 +156,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
        if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
                local_daif_mask();
                flags = current_thread_info()->flags;
-               if (!has_syscall_work(flags)) {
+               if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP)) {
                        /*
                         * We're off to userspace, where interrupts are
                         * always enabled after we restore the flags from
index 4e01657..e546df0 100644 (file)
@@ -191,15 +191,12 @@ enum aarch32_map {
 #ifdef CONFIG_COMPAT_VDSO
        AA32_MAP_VVAR,
        AA32_MAP_VDSO,
-#else
-       AA32_MAP_SIGPAGE
 #endif
+       AA32_MAP_SIGPAGE
 };
 
 static struct page *aarch32_vectors_page __ro_after_init;
-#ifndef CONFIG_COMPAT_VDSO
 static struct page *aarch32_sig_page __ro_after_init;
-#endif
 
 static struct vm_special_mapping aarch32_vdso_maps[] = {
        [AA32_MAP_VECTORS] = {
@@ -214,12 +211,11 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
                .name = "[vdso]",
                .mremap = aarch32_vdso_mremap,
        },
-#else
+#endif /* CONFIG_COMPAT_VDSO */
        [AA32_MAP_SIGPAGE] = {
                .name   = "[sigpage]", /* ABI */
                .pages  = &aarch32_sig_page,
        },
-#endif /* CONFIG_COMPAT_VDSO */
 };
 
 static int aarch32_alloc_kuser_vdso_page(void)
@@ -242,27 +238,11 @@ static int aarch32_alloc_kuser_vdso_page(void)
        return 0;
 }
 
-#ifdef CONFIG_COMPAT_VDSO
-static int __aarch32_alloc_vdso_pages(void)
-{
-       int ret;
-
-       vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
-       vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
-
-       ret = __vdso_init(VDSO_ABI_AA32);
-       if (ret)
-               return ret;
-
-       return aarch32_alloc_kuser_vdso_page();
-}
-#else
-static int __aarch32_alloc_vdso_pages(void)
+static int aarch32_alloc_sigpage(void)
 {
        extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
        int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
        unsigned long sigpage;
-       int ret;
 
        sigpage = get_zeroed_page(GFP_ATOMIC);
        if (!sigpage)
@@ -271,18 +251,34 @@ static int __aarch32_alloc_vdso_pages(void)
        memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
        aarch32_sig_page = virt_to_page(sigpage);
        flush_dcache_page(aarch32_sig_page);
+       return 0;
+}
 
-       ret = aarch32_alloc_kuser_vdso_page();
-       if (ret)
-               free_page(sigpage);
+#ifdef CONFIG_COMPAT_VDSO
+static int __aarch32_alloc_vdso_pages(void)
+{
+       vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
+       vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
 
-       return ret;
+       return __vdso_init(VDSO_ABI_AA32);
 }
 #endif /* CONFIG_COMPAT_VDSO */
 
 static int __init aarch32_alloc_vdso_pages(void)
 {
-       return __aarch32_alloc_vdso_pages();
+       int ret;
+
+#ifdef CONFIG_COMPAT_VDSO
+       ret = __aarch32_alloc_vdso_pages();
+       if (ret)
+               return ret;
+#endif
+
+       ret = aarch32_alloc_sigpage();
+       if (ret)
+               return ret;
+
+       return aarch32_alloc_kuser_vdso_page();
 }
 arch_initcall(aarch32_alloc_vdso_pages);
 
@@ -305,7 +301,6 @@ static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
        return PTR_ERR_OR_ZERO(ret);
 }
 
-#ifndef CONFIG_COMPAT_VDSO
 static int aarch32_sigreturn_setup(struct mm_struct *mm)
 {
        unsigned long addr;
@@ -328,12 +323,11 @@ static int aarch32_sigreturn_setup(struct mm_struct *mm)
        if (IS_ERR(ret))
                goto out;
 
-       mm->context.vdso = (void *)addr;
+       mm->context.sigpage = (void *)addr;
 
 out:
        return PTR_ERR_OR_ZERO(ret);
 }
-#endif /* !CONFIG_COMPAT_VDSO */
 
 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 {
@@ -352,10 +346,11 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
                                       mm,
                                       bprm,
                                       uses_interp);
-#else
-       ret = aarch32_sigreturn_setup(mm);
+       if (ret)
+               goto out;
 #endif /* CONFIG_COMPAT_VDSO */
 
+       ret = aarch32_sigreturn_setup(mm);
 out:
        mmap_write_unlock(mm);
        return ret;
index 556d424..45d5cfe 100644 (file)
@@ -23,13 +23,14 @@ btildflags-$(CONFIG_ARM64_BTI_KERNEL) += -z force-bti
 # potential future proofing if we end up with internal calls to the exported
 # routines, as x86 does (see 6f121e548f83 ("x86, vdso: Reimplement vdso.so
 # preparation in build-time C")).
-ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \
-               -Bsymbolic --eh-frame-hdr --build-id -n $(btildflags-y) -T
+ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv       \
+            -Bsymbolic $(call ld-option, --no-eh-frame-hdr) --build-id -n      \
+            $(btildflags-y) -T
 
 ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
 ccflags-y += -DDISABLE_BRANCH_PROFILING
 
-CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS)
+CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS)
 KBUILD_CFLAGS                  += $(DISABLE_LTO)
 KASAN_SANITIZE                 := n
 UBSAN_SANITIZE                 := n
index 620a3ef..0e18729 100644 (file)
 
        .text
 
+/*
+ * NOTE!!!  You may notice that all of the .cfi directives in this file have
+ * been commented out. This is because they have been shown to trigger segfaults
+ * in libgcc when unwinding out of a SIGCANCEL handler to invoke pthread
+ * cleanup handlers during the thread cancellation dance. By omitting the
+ * directives, we trigger an arm64-specific fallback path in the unwinder which
+ * recognises the signal frame and restores many of the registers directly from
+ * the sigcontext. Re-enabling the cfi directives here therefore needs to be
+ * much more comprehensive to reduce the risk of further regressions.
+ */
+
 /* Ensure that the mysterious NOP can be associated with a function. */
-       .cfi_startproc
+//     .cfi_startproc
 
 /*
- * .cfi_signal_frame causes the corresponding Frame Description Entry in the
- * .eh_frame section to be annotated as a signal frame. This allows DWARF
- * unwinders (e.g. libstdc++) to implement _Unwind_GetIPInfo(), which permits
- * unwinding out of the signal trampoline without the need for the mysterious
- * NOP.
+ * .cfi_signal_frame causes the corresponding Frame Description Entry (FDE) in
+ * the .eh_frame section to be annotated as a signal frame. This allows DWARF
+ * unwinders (e.g. libstdc++) to implement _Unwind_GetIPInfo() and identify
+ * the next frame using the unmodified return address instead of subtracting 1,
+ * which may yield the wrong FDE.
  */
-       .cfi_signal_frame
+//     .cfi_signal_frame
 
 /*
  * Tell the unwinder where to locate the frame record linking back to the
- * interrupted context. We don't provide unwind info for registers other
- * than the frame pointer and the link register here; in practice, this
- * is sufficient for unwinding in C/C++ based runtimes and the values in
- * the sigcontext may have been modified by this point anyway. Debuggers
+ * interrupted context. We don't provide unwind info for registers other than
+ * the frame pointer and the link register here; in practice, this is likely to
+ * be insufficient for unwinding in C/C++ based runtimes, especially without a
+ * means to restore the stack pointer. Thankfully, unwinders and debuggers
  * already have baked-in strategies for attempting to unwind out of signals.
  */
-       .cfi_def_cfa    x29, 0
-       .cfi_offset     x29, 0 * 8
-       .cfi_offset     x30, 1 * 8
+//     .cfi_def_cfa    x29, 0
+//     .cfi_offset     x29, 0 * 8
+//     .cfi_offset     x30, 1 * 8
 
 /*
  * This mysterious NOP is required for some unwinders (e.g. libc++) that
        nop     // Mysterious NOP
 
 /*
- * GDB relies on being able to identify the sigreturn instruction sequence to
- * unwind from signal handlers. We cannot, therefore, use SYM_FUNC_START()
- * here, as it will emit a BTI C instruction and break the unwinder. Thankfully,
- * this function is only ever called from a RET and so omitting the landing pad
- * is perfectly fine.
+ * GDB, libgcc and libunwind rely on being able to identify the sigreturn
+ * instruction sequence to unwind from signal handlers. We cannot, therefore,
+ * use SYM_FUNC_START() here, as it will emit a BTI C instruction and break the
+ * unwinder. Thankfully, this function is only ever called from a RET and so
+ * omitting the landing pad is perfectly fine.
  */
 SYM_CODE_START(__kernel_rt_sigreturn)
+//     PLEASE DO NOT MODIFY
        mov     x8, #__NR_rt_sigreturn
+//     PLEASE DO NOT MODIFY
        svc     #0
-       .cfi_endproc
+//     PLEASE DO NOT MODIFY
+//     .cfi_endproc
 SYM_CODE_END(__kernel_rt_sigreturn)
 
 emit_aarch64_feature_1_and
index 7ea1e82..d88148b 100644 (file)
@@ -140,7 +140,6 @@ hostprogs := $(munge)
 
 c-obj-vdso := note.o
 c-obj-vdso-gettimeofday := vgettimeofday.o
-asm-obj-vdso := sigreturn.o
 
 ifneq ($(c-gettimeofday-y),)
 VDSO_CFLAGS_gettimeofday_o += -include $(c-gettimeofday-y)
diff --git a/arch/arm64/kernel/vdso32/sigreturn.S b/arch/arm64/kernel/vdso32/sigreturn.S
deleted file mode 100644 (file)
index b009106..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file provides both A32 and T32 versions, in accordance with the
- * arm sigreturn code.
- *
- * Please read the comments in arch/arm64/kernel/vdso/sigreturn.S to
- * understand some of the craziness in here.
- *
- * Copyright (C) 2018 ARM Limited
- */
-
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/unistd.h>
-
-       .text
-
-       .arm
-       .fnstart
-       .save {r0-r15}
-       .pad #COMPAT_SIGFRAME_REGS_OFFSET
-       nop
-SYM_CODE_START(__kernel_sigreturn_arm)
-       mov r7, #__NR_compat_sigreturn
-       svc #0
-       .fnend
-SYM_CODE_END(__kernel_sigreturn_arm)
-
-       .fnstart
-       .save {r0-r15}
-       .pad #COMPAT_RT_SIGFRAME_REGS_OFFSET
-       nop
-SYM_CODE_START(__kernel_rt_sigreturn_arm)
-       mov r7, #__NR_compat_rt_sigreturn
-       svc #0
-       .fnend
-SYM_CODE_END(__kernel_rt_sigreturn_arm)
-
-       .thumb
-       .fnstart
-       .save {r0-r15}
-       .pad #COMPAT_SIGFRAME_REGS_OFFSET
-       nop
-SYM_CODE_START(__kernel_sigreturn_thumb)
-       mov r7, #__NR_compat_sigreturn
-       svc #0
-       .fnend
-SYM_CODE_END(__kernel_sigreturn_thumb)
-
-       .fnstart
-       .save {r0-r15}
-       .pad #COMPAT_RT_SIGFRAME_REGS_OFFSET
-       nop
-SYM_CODE_START(__kernel_rt_sigreturn_thumb)
-       mov r7, #__NR_compat_rt_sigreturn
-       svc #0
-       .fnend
-SYM_CODE_END(__kernel_rt_sigreturn_thumb)
index a394492..337d035 100644 (file)
@@ -64,19 +64,7 @@ VERSION
                __vdso_clock_gettime;
                __vdso_gettimeofday;
                __vdso_clock_getres;
-               __kernel_sigreturn_arm;
-               __kernel_sigreturn_thumb;
-               __kernel_rt_sigreturn_arm;
-               __kernel_rt_sigreturn_thumb;
                __vdso_clock_gettime64;
        local: *;
        };
 }
-
-/*
- * Make the sigreturn code visible to the kernel.
- */
-VDSO_compat_sigreturn_arm      = __kernel_sigreturn_arm;
-VDSO_compat_sigreturn_thumb    = __kernel_sigreturn_thumb;
-VDSO_compat_rt_sigreturn_arm   = __kernel_rt_sigreturn_arm;
-VDSO_compat_rt_sigreturn_thumb = __kernel_rt_sigreturn_thumb;
index 6827da7..5423ffe 100644 (file)
@@ -165,9 +165,6 @@ SECTIONS
                *(.altinstructions)
                __alt_instructions_end = .;
        }
-       .altinstr_replacement : {
-               *(.altinstr_replacement)
-       }
 
        . = ALIGN(SEGMENT_ALIGN);
        __inittext_end = .;
index 6e6ed55..e76c0e8 100644 (file)
@@ -136,11 +136,15 @@ SYM_CODE_START(__kvm_handle_stub_hvc)
 
 1:     cmp     x0, #HVC_RESET_VECTORS
        b.ne    1f
-reset:
+
        /*
-        * Reset kvm back to the hyp stub. Do not clobber x0-x4 in
-        * case we coming via HVC_SOFT_RESTART.
+        * Set the HVC_RESET_VECTORS return code before entering the common
+        * path so that we do not clobber x0-x2 in case we are coming via
+        * HVC_SOFT_RESTART.
         */
+       mov     x0, xzr
+reset:
+       /* Reset kvm back to the hyp stub. */
        mrs     x5, sctlr_el2
        mov_q   x6, SCTLR_ELx_FLAGS
        bic     x5, x5, x6              // Clear SCTL_M and etc
@@ -151,7 +155,6 @@ reset:
        /* Install stub vectors */
        adr_l   x5, __hyp_stub_vectors
        msr     vbar_el2, x5
-       mov     x0, xzr
        eret
 
 1:     /* Bad stub call */
index b5ae3a5..3c22416 100644 (file)
@@ -159,7 +159,10 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
 }
 
 /*
- * On VHE ensure that only guest events have EL0 counting enabled
+ * On VHE ensure that only guest events have EL0 counting enabled.
+ * This is called from both vcpu_{load,put} and the sysreg handling.
+ * Since the latter is preemptible, special care must be taken to
+ * disable preemption.
  */
 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
 {
@@ -169,12 +172,14 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
        if (!has_vhe())
                return;
 
+       preempt_disable();
        host = this_cpu_ptr(&kvm_host_data);
        events_guest = host->pmu_events.events_guest;
        events_host = host->pmu_events.events_host;
 
        kvm_vcpu_pmu_enable_el0(events_guest);
        kvm_vcpu_pmu_disable_el0(events_host);
+       preempt_enable();
 }
 
 /*
index 1e0f4c2..f7b52ce 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/arm-smccc.h>
 #include <linux/kvm_host.h>
+#include <linux/sched/stat.h>
 
 #include <asm/kvm_mmu.h>
 #include <asm/pvclock-abi.h>
@@ -73,6 +74,11 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
        return base;
 }
 
+static bool kvm_arm_pvtime_supported(void)
+{
+       return !!sched_info_on();
+}
+
 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
                            struct kvm_device_attr *attr)
 {
@@ -82,7 +88,8 @@ int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
        int ret = 0;
        int idx;
 
-       if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
+       if (!kvm_arm_pvtime_supported() ||
+           attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
                return -ENXIO;
 
        if (get_user(ipa, user))
@@ -110,7 +117,8 @@ int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
        u64 __user *user = (u64 __user *)attr->addr;
        u64 ipa;
 
-       if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
+       if (!kvm_arm_pvtime_supported() ||
+           attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
                return -ENXIO;
 
        ipa = vcpu->arch.steal.base;
@@ -125,7 +133,8 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
 {
        switch (attr->attr) {
        case KVM_ARM_VCPU_PVTIME_IPA:
-               return 0;
+               if (kvm_arm_pvtime_supported())
+                       return 0;
        }
        return -ENXIO;
 }
index d3b2090..6ed36be 100644 (file)
@@ -245,7 +245,7 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
  */
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
 {
-       int ret = -EINVAL;
+       int ret;
        bool loaded;
        u32 pstate;
 
@@ -269,15 +269,19 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
 
        if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
            test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) {
-               if (kvm_vcpu_enable_ptrauth(vcpu))
+               if (kvm_vcpu_enable_ptrauth(vcpu)) {
+                       ret = -EINVAL;
                        goto out;
+               }
        }
 
        switch (vcpu->arch.target) {
        default:
                if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
-                       if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1))
+                       if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) {
+                               ret = -EINVAL;
                                goto out;
+                       }
                        pstate = VCPU_RESET_PSTATE_SVC;
                } else {
                        pstate = VCPU_RESET_PSTATE_EL1;
index 27ac833..b5fa73c 100644 (file)
@@ -90,7 +90,15 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
            !irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
                disable_irq_nosync(irq);
 
+       /*
+        * The v4.1 doorbell can fire concurrently with the vPE being
+        * made non-resident. Ensure we only update pending_last
+        * *after* the non-residency sequence has completed.
+        */
+       raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
        vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
+       raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
+
        kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
        kvm_vcpu_kick(vcpu);
 
index e779b19..f66f4b1 100644 (file)
@@ -138,7 +138,8 @@ void __init setup_arch(char **cmdline_p)
        pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
                 __bss_stop, memory_start, memory_start, memory_end);
 
-       memblock_add(memory_start, memory_end - memory_start);
+       memblock_add(_rambase, memory_end - _rambase);
+       memblock_reserve(_rambase, memory_start - _rambase);
 
        /* Keep a copy of command line */
        *cmdline_p = &command_line[0];
index 29f4792..7d04210 100644 (file)
@@ -174,7 +174,7 @@ void __init cf_bootmem_alloc(void)
        m68k_memory[0].addr = _rambase;
        m68k_memory[0].size = _ramend - _rambase;
 
-       memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
+       memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0);
 
        /* compute total pages in system */
        num_pages = PFN_DOWN(_ramend - _rambase);
index 8d22828..bc72304 100644 (file)
@@ -92,7 +92,7 @@
                        "MIC1N", "Built-in Mic";
                simple-audio-card,pin-switches = "Speaker", "Headphones";
 
-               simple-audio-card,hp-det-gpio = <&gpf 21 GPIO_ACTIVE_HIGH>;
+               simple-audio-card,hp-det-gpio = <&gpf 21 GPIO_ACTIVE_LOW>;
                simple-audio-card,aux-devs = <&speaker_amp>, <&headphones_amp>;
 
                simple-audio-card,bitclock-master = <&dai_codec>;
index c628747..7dd4a80 100644 (file)
                                                                \
        /*                                                      \
         * We can't unroll if the number of iterations isn't    \
-        * compile-time constant. Unfortunately GCC versions    \
-        * up until 4.6 tend to miss obvious constants & cause  \
+        * compile-time constant. Unfortunately clang versions  \
+        * up until 8.0 tend to miss obvious constants & cause  \
         * this check to fail, even though they go on to        \
         * generate reasonable code for the switch statement,   \
         * so we skip the sanity check for those compilers.     \
         */                                                     \
-       BUILD_BUG_ON((CONFIG_GCC_VERSION >= 40700 ||            \
-                     CONFIG_CLANG_VERSION >= 80000) &&         \
-                    !__builtin_constant_p(times));             \
+       BUILD_BUG_ON(!__builtin_constant_p(times));             \
                                                                \
        switch (times) {                                        \
        case 32: fn(__VA_ARGS__); /* fall through */            \
index 7c32c95..f655af6 100644 (file)
@@ -723,12 +723,14 @@ static int simulate_loongson3_cpucfg(struct pt_regs *regs,
                perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
 
                /* Do not emulate on unsupported core models. */
-               if (!loongson3_cpucfg_emulation_enabled(&current_cpu_data))
+               preempt_disable();
+               if (!loongson3_cpucfg_emulation_enabled(&current_cpu_data)) {
+                       preempt_enable();
                        return -1;
-
+               }
                regs->regs[rd] = loongson3_cpucfg_read_synthesized(
                        &current_cpu_data, sel);
-
+               preempt_enable();
                return 0;
        }
 
@@ -2169,6 +2171,7 @@ static void configure_status(void)
 
        change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
                         status_set);
+       back_to_back_c0_hazard();
 }
 
 unsigned int hwrena;
index 5ae82d9..d242300 100644 (file)
@@ -1722,6 +1722,7 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
                          vcpu->arch.gprs[rt], *(u32 *)data);
                break;
 
+#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
        case sdl_op:
                run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
                                        vcpu->arch.host_cp0_badvaddr) & (~0x7);
@@ -1815,6 +1816,7 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
                          vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
                          vcpu->arch.gprs[rt], *(u64 *)data);
                break;
+#endif
 
 #ifdef CONFIG_CPU_LOONGSON64
        case sdc2_op:
@@ -2002,6 +2004,7 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
                }
                break;
 
+#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
        case ldl_op:
                run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
                                        vcpu->arch.host_cp0_badvaddr) & (~0x7);
@@ -2073,6 +2076,7 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
                        break;
                }
                break;
+#endif
 
 #ifdef CONFIG_CPU_LOONGSON64
        case ldc2_op:
index 521bd58..666d335 100644 (file)
@@ -67,8 +67,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        VCPU_STAT("vz_ghfc", vz_ghfc_exits),
        VCPU_STAT("vz_gpa", vz_gpa_exits),
        VCPU_STAT("vz_resvd", vz_resvd_exits),
+#ifdef CONFIG_CPU_LOONGSON64
        VCPU_STAT("vz_cpucfg", vz_cpucfg_exits),
 #endif
+#endif
        VCPU_STAT("halt_successful_poll", halt_successful_poll),
        VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
        VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
index aa37545..b103420 100644 (file)
@@ -514,8 +514,8 @@ void __init ltq_soc_init(void)
                clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH |
                               PMU_PPE_DP | PMU_PPE_TC);
                clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
-               clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY);
-               clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY);
+               clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY);
+               clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY);
                clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
                clkdev_add_pmu("1e116000.mei", "afe", 1, 2, PMU_ANALOG_DSL_AFE);
                clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
@@ -538,8 +538,8 @@ void __init ltq_soc_init(void)
                                PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM |
                                PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 |
                                PMU_PPE_QSB | PMU_PPE_TOP);
-               clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY);
-               clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY);
+               clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY);
+               clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY);
                clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
                clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
                clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
index 3b2552f..5958217 100644 (file)
@@ -627,9 +627,10 @@ static int bridge_probe(struct platform_device *pdev)
                return -ENOMEM;
        domain = irq_domain_create_hierarchy(parent, 0, 8, fn,
                                             &bridge_domain_ops, NULL);
-       irq_domain_free_fwnode(fn);
-       if (!domain)
+       if (!domain) {
+               irq_domain_free_fwnode(fn);
                return -ENOMEM;
+       }
 
        pci_set_flags(PCI_PROBE_ONLY);
 
index c152a68..3457276 100644 (file)
@@ -74,8 +74,11 @@ void *arch_dma_set_uncached(void *cpu_addr, size_t size)
         * We need to iterate through the pages, clearing the dcache for
         * them and setting the cache-inhibit bit.
         */
+       mmap_read_lock(&init_mm);
        error = walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
                        NULL);
+       mmap_read_unlock(&init_mm);
+
        if (error)
                return ERR_PTR(error);
        return cpu_addr;
@@ -85,9 +88,11 @@ void arch_dma_clear_uncached(void *cpu_addr, size_t size)
 {
        unsigned long va = (unsigned long)cpu_addr;
 
+       mmap_read_lock(&init_mm);
        /* walk_page_range shouldn't be able to fail here */
        WARN_ON(walk_page_range(&init_mm, va, va + size,
                        &clear_nocache_walk_ops, NULL));
+       mmap_read_unlock(&init_mm);
 }
 
 void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
index 965b1f3..b0c70a3 100644 (file)
@@ -77,6 +77,8 @@ struct coprocessor_completion_block {
 #define CSB_CC_CHAIN           (37)
 #define CSB_CC_SEQUENCE                (38)
 #define CSB_CC_HW              (39)
+/* P9 DD2 NX Workbook 3.2 (Table 4-36): Address translation fault */
+#define        CSB_CC_FAULT_ADDRESS    (250)
 
 #define CSB_SIZE               (0x10)
 #define CSB_ALIGN              CSB_SIZE
index fa08069..0fc8bad 100644 (file)
@@ -2551,7 +2551,7 @@ EXC_VIRT_NONE(0x5400, 0x100)
 INT_DEFINE_BEGIN(denorm_exception)
        IVEC=0x1500
        IHSRR=1
-       IBRANCH_COMMON=0
+       IBRANCH_TO_COMMON=0
        IKVM_REAL=1
 INT_DEFINE_END(denorm_exception)
 
index 2168372..74da65a 100644 (file)
@@ -87,7 +87,7 @@ static void *__init alloc_shared_lppaca(unsigned long size, unsigned long align,
         * This is very early in boot, so no harm done if the kernel crashes at
         * this point.
         */
-       BUG_ON(shared_lppaca_size >= shared_lppaca_total_size);
+       BUG_ON(shared_lppaca_size > shared_lppaca_total_size);
 
        return ptr;
 }
index e738ea6..6a73714 100644 (file)
@@ -40,7 +40,8 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
        /* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */
        if (kvmhv_on_pseries())
                return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr,
-                                         __pa(to), __pa(from), n);
+                                         (to != NULL) ? __pa(to): 0,
+                                         (from != NULL) ? __pa(from): 0, n);
 
        quadrant = 1;
        if (!pid)
index 1199fc2..d174106 100644 (file)
@@ -353,16 +353,15 @@ static bool pkey_access_permitted(int pkey, bool write, bool execute)
        int pkey_shift;
        u64 amr;
 
-       if (!is_pkey_enabled(pkey))
-               return true;
-
        pkey_shift = pkeyshift(pkey);
-       if (execute && !(read_iamr() & (IAMR_EX_BIT << pkey_shift)))
-               return true;
+       if (execute)
+               return !(read_iamr() & (IAMR_EX_BIT << pkey_shift));
+
+       amr = read_amr();
+       if (write)
+               return !(amr & (AMR_WR_BIT << pkey_shift));
 
-       amr = read_amr(); /* Delay reading amr until absolutely needed */
-       return ((!write && !(amr & (AMR_RD_BIT << pkey_shift))) ||
-               (write &&  !(amr & (AMR_WR_BIT << pkey_shift))));
+       return !(amr & (AMR_RD_BIT << pkey_shift));
 }
 
 bool arch_pte_access_permitted(u64 pte, bool write, bool execute)
index 4a75f2d..bce0e53 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/memblock.h>
 #include <linux/libfdt.h>
 #include <linux/crash_core.h>
+#include <asm/cacheflush.h>
 #include <asm/pgalloc.h>
 #include <asm/prom.h>
 #include <asm/kdump.h>
index 266a6ca..3d21fce 100644 (file)
@@ -79,7 +79,7 @@ static void update_csb(struct vas_window *window,
        csb_addr = (void __user *)be64_to_cpu(crb->csb_addr);
 
        memset(&csb, 0, sizeof(csb));
-       csb.cc = CSB_CC_TRANSLATION;
+       csb.cc = CSB_CC_FAULT_ADDRESS;
        csb.ce = CSB_CE_TERMINATION;
        csb.cs = 0;
        csb.count = 0;
index 128192e..3230c1d 100644 (file)
@@ -23,6 +23,8 @@ config RISCV
        select ARCH_HAS_SET_DIRECT_MAP
        select ARCH_HAS_SET_MEMORY
        select ARCH_HAS_STRICT_KERNEL_RWX if MMU
+       select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
+       select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
        select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
        select ARCH_WANT_FRAME_POINTERS
        select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
index 3f1737f..d0e24aa 100644 (file)
@@ -58,8 +58,16 @@ do {                                                                 \
  * The AQ/RL pair provides a RCpc critical section, but there's not really any
  * way we can take advantage of that here because the ordering is only enforced
  * on that one lock.  Thus, we're just doing a full fence.
+ *
+ * Since we allow writeX to be called from preemptive regions we need at least
+ * an "o" in the predecessor set to ensure device writes are visible before the
+ * task is marked as available for scheduling on a new hart.  While I don't see
+ * any concrete reason we need a full IO fence, it seems safer to just upgrade
+ * this in order to avoid any IO crossing a scheduling boundary.  In both
+ * instances the scheduler pairs this with an mb(), so nothing is necessary on
+ * the new hart.
  */
-#define smp_mb__after_spinlock()       RISCV_FENCE(rw,rw)
+#define smp_mb__after_spinlock()       RISCV_FENCE(iorw,iorw)
 
 #include <asm-generic/barrier.h>
 
index 041b45f..0934211 100644 (file)
@@ -3,8 +3,7 @@
 #ifndef __ASM_GDB_XML_H_
 #define __ASM_GDB_XML_H_
 
-#define kgdb_arch_gdb_stub_feature riscv_gdb_stub_feature
-static const char riscv_gdb_stub_feature[64] =
+const char riscv_gdb_stub_feature[64] =
                        "PacketSize=800;qXfer:features:read+;";
 
 static const char gdb_xfer_read_target[31] = "qXfer:features:read:target.xml:";
index 8177a45..46677da 100644 (file)
@@ -19,7 +19,6 @@
 
 #ifndef        __ASSEMBLY__
 
-extern int kgdb_has_hit_break(unsigned long addr);
 extern unsigned long kgdb_compiled_break;
 
 static inline void arch_kgdb_breakpoint(void)
@@ -106,7 +105,9 @@ static inline void arch_kgdb_breakpoint(void)
 #define DBG_REG_BADADDR_OFF 34
 #define DBG_REG_CAUSE_OFF 35
 
-#include <asm/gdb_xml.h>
+extern const char riscv_gdb_stub_feature[64];
+
+#define kgdb_arch_gdb_stub_feature riscv_gdb_stub_feature
 
 #endif
 #endif
index 1dd12a0..464a2bb 100644 (file)
 #include <linux/const.h>
 
 /* thread information allocation */
+#ifdef CONFIG_64BIT
+#define THREAD_SIZE_ORDER      (2)
+#else
 #define THREAD_SIZE_ORDER      (1)
+#endif
 #define THREAD_SIZE            (PAGE_SIZE << THREAD_SIZE_ORDER)
 
 #ifndef __ASSEMBLY__
index c3275f4..963ed7e 100644 (file)
@@ -44,18 +44,18 @@ DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ)
 DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ)
 DECLARE_INSN(sret, MATCH_SRET, MASK_SRET)
 
-int decode_register_index(unsigned long opcode, int offset)
+static int decode_register_index(unsigned long opcode, int offset)
 {
        return (opcode >> offset) & 0x1F;
 }
 
-int decode_register_index_short(unsigned long opcode, int offset)
+static int decode_register_index_short(unsigned long opcode, int offset)
 {
        return ((opcode >> offset) & 0x7) + 8;
 }
 
 /* Calculate the new address for after a step */
-int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
+static int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
 {
        unsigned long pc = regs->epc;
        unsigned long *regs_ptr = (unsigned long *)regs;
@@ -136,7 +136,7 @@ int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
        return 0;
 }
 
-int do_single_step(struct pt_regs *regs)
+static int do_single_step(struct pt_regs *regs)
 {
        /* Determine where the target instruction will send us to */
        unsigned long addr = 0;
@@ -320,7 +320,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
        return err;
 }
 
-int kgdb_riscv_kgdbbreak(unsigned long addr)
+static int kgdb_riscv_kgdbbreak(unsigned long addr)
 {
        if (stepped_address == addr)
                return KGDB_SW_SINGLE_STEP;
index 38ba55b..e4c7c2c 100644 (file)
@@ -17,7 +17,7 @@ vdso-syms += flush_icache
 obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o
 
 ifneq ($(c-gettimeofday-y),)
-  CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
+  CFLAGS_vgettimeofday.o += -fPIC -include $(c-gettimeofday-y)
 endif
 
 # Build rules
@@ -27,6 +27,9 @@ obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
 obj-y += vdso.o vdso-syms.o
 CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
 
+# Disable -pg to prevent insert call site
+CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os
+
 # Disable gcov profiling for VDSO code
 GCOV_PROFILE := n
 
index d264943..cc0d806 100644 (file)
@@ -9,16 +9,22 @@
 #include <linux/time.h>
 #include <linux/types.h>
 
+extern
+int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
 int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
 {
        return __cvdso_clock_gettime(clock, ts);
 }
 
+extern
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
 int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
 {
        return __cvdso_gettimeofday(tv, tz);
 }
 
+extern
+int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res);
 int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res)
 {
        return __cvdso_clock_getres(clock_id, res);
index 46038bc..0cf9a82 100644 (file)
@@ -1,5 +1,6 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_WATCH_QUEUE=y
 CONFIG_AUDIT=y
 CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -14,7 +15,6 @@ CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_NUMA_BALANCING=y
 CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
 CONFIG_BLK_CGROUP=y
 CONFIG_CFS_BANDWIDTH=y
 CONFIG_RT_GROUP_SCHED=y
@@ -31,9 +31,9 @@ CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_SCHED_AUTOGROUP=y
-CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 # CONFIG_SYSFS_SYSCALL is not set
+CONFIG_BPF_LSM=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
@@ -51,14 +51,11 @@ CONFIG_CHSC_SCH=y
 CONFIG_VFIO_CCW=m
 CONFIG_VFIO_AP=m
 CONFIG_CRASH_DUMP=y
-CONFIG_HIBERNATION=y
-CONFIG_PM_DEBUG=y
 CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
 CONFIG_CMM=m
 CONFIG_APPLDATA_BASE=y
 CONFIG_KVM=m
-CONFIG_VHOST_NET=m
-CONFIG_VHOST_VSOCK=m
+CONFIG_S390_UNWIND_SELFTEST=y
 CONFIG_OPROFILE=m
 CONFIG_KPROBES=y
 CONFIG_JUMP_LABEL=y
@@ -77,6 +74,8 @@ CONFIG_BLK_DEV_THROTTLING=y
 CONFIG_BLK_WBT=y
 CONFIG_BLK_CGROUP_IOLATENCY=y
 CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_BSD_DISKLABEL=y
@@ -96,7 +95,6 @@ CONFIG_CMA_DEBUG=y
 CONFIG_CMA_DEBUGFS=y
 CONFIG_MEM_SOFT_DIRTY=y
 CONFIG_ZSWAP=y
-CONFIG_ZBUD=m
 CONFIG_ZSMALLOC=m
 CONFIG_ZSMALLOC_STAT=y
 CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
@@ -130,6 +128,7 @@ CONFIG_SYN_COOKIES=y
 CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
+CONFIG_INET_ESPINTCP=y
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
@@ -144,6 +143,7 @@ CONFIG_TCP_CONG_ILLINOIS=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
+CONFIG_INET6_ESPINTCP=y
 CONFIG_INET6_IPCOMP=m
 CONFIG_IPV6_MIP6=m
 CONFIG_IPV6_VTI=m
@@ -151,7 +151,10 @@ CONFIG_IPV6_SIT=m
 CONFIG_IPV6_GRE=m
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_RPL_LWTUNNEL=y
+CONFIG_MPTCP=y
 CONFIG_NETFILTER=y
+CONFIG_BRIDGE_NETFILTER=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
@@ -317,6 +320,7 @@ CONFIG_L2TP_V3=y
 CONFIG_L2TP_IP=m
 CONFIG_L2TP_ETH=m
 CONFIG_BRIDGE=m
+CONFIG_BRIDGE_MRP=y
 CONFIG_VLAN_8021Q=m
 CONFIG_VLAN_8021Q_GVRP=y
 CONFIG_NET_SCHED=y
@@ -341,6 +345,7 @@ CONFIG_NET_SCH_CODEL=m
 CONFIG_NET_SCH_FQ_CODEL=m
 CONFIG_NET_SCH_INGRESS=m
 CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_SCH_ETS=m
 CONFIG_NET_CLS_BASIC=m
 CONFIG_NET_CLS_TCINDEX=m
 CONFIG_NET_CLS_ROUTE4=m
@@ -364,6 +369,7 @@ CONFIG_NET_ACT_PEDIT=m
 CONFIG_NET_ACT_SIMP=m
 CONFIG_NET_ACT_SKBEDIT=m
 CONFIG_NET_ACT_CSUM=m
+CONFIG_NET_ACT_GATE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_OPENVSWITCH=m
 CONFIG_VSOCKETS=m
@@ -374,6 +380,7 @@ CONFIG_BPF_JIT=y
 CONFIG_NET_PKTGEN=m
 # CONFIG_NET_DROP_MONITOR is not set
 CONFIG_PCI=y
+# CONFIG_PCIEASPM is not set
 CONFIG_PCI_DEBUG=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
@@ -435,6 +442,7 @@ CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_MULTIPATH_QL=m
 CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_MULTIPATH_HST=m
 CONFIG_DM_DELAY=m
 CONFIG_DM_UEVENT=y
 CONFIG_DM_FLAKEY=m
@@ -448,6 +456,8 @@ CONFIG_EQUALIZER=m
 CONFIG_IFB=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
+CONFIG_BAREUDP=m
 CONFIG_TUN=m
 CONFIG_VETH=m
 CONFIG_VIRTIO_NET=m
@@ -481,7 +491,6 @@ CONFIG_NLMON=m
 CONFIG_MLX4_EN=m
 CONFIG_MLX5_CORE=m
 CONFIG_MLX5_CORE_EN=y
-# CONFIG_MLXFW is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_MICROCHIP is not set
 # CONFIG_NET_VENDOR_MICROSEMI is not set
@@ -514,6 +523,7 @@ CONFIG_MLX5_CORE_EN=y
 # CONFIG_NET_VENDOR_TI is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -561,6 +571,8 @@ CONFIG_VFIO_MDEV_DEVICE=m
 CONFIG_VIRTIO_PCI=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
+CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
 CONFIG_S390_CCW_IOMMU=y
 CONFIG_S390_AP_IOMMU=y
 CONFIG_EXT4_FS=y
@@ -608,6 +620,7 @@ CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
+CONFIG_EXFAT_FS=m
 CONFIG_NTFS_FS=m
 CONFIG_NTFS_RW=y
 CONFIG_PROC_KCORE=y
@@ -650,8 +663,8 @@ CONFIG_NLS_UTF8=m
 CONFIG_DLM=m
 CONFIG_UNICODE=y
 CONFIG_PERSISTENT_KEYRINGS=y
-CONFIG_BIG_KEYS=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_KEY_NOTIFICATIONS=y
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
 CONFIG_FORTIFY_SOURCE=y
@@ -675,8 +688,11 @@ CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
+CONFIG_CRYPTO_GCM=y
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_SEQIV=y
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -685,6 +701,7 @@ CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -701,6 +718,7 @@ CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
 CONFIG_CRYPTO_CAST5=m
 CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=m
 CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SALSA20=m
@@ -719,6 +737,9 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
 CONFIG_CRYPTO_STATS=y
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 CONFIG_ZCRYPT=m
 CONFIG_PKEY=m
 CONFIG_CRYPTO_PAES_S390=m
@@ -774,6 +795,7 @@ CONFIG_DEBUG_SHIRQ=y
 CONFIG_PANIC_ON_OOPS=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_WQ_WATCHDOG=y
+CONFIG_TEST_LOCKUP=m
 CONFIG_DEBUG_TIMEKEEPING=y
 CONFIG_PROVE_LOCKING=y
 CONFIG_LOCK_STAT=y
@@ -786,7 +808,9 @@ CONFIG_BUG_ON_DATA_CORRUPTION=y
 CONFIG_DEBUG_CREDENTIALS=y
 CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=300
+# CONFIG_RCU_TRACE is not set
 CONFIG_LATENCYTOP=y
+CONFIG_BOOTTIME_TRACING=y
 CONFIG_FUNCTION_PROFILER=y
 CONFIG_STACK_TRACER=y
 CONFIG_IRQSOFF_TRACER=y
@@ -808,10 +832,12 @@ CONFIG_FAULT_INJECTION_DEBUG_FS=y
 CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
 CONFIG_LKDTM=m
 CONFIG_TEST_LIST_SORT=y
+CONFIG_TEST_MIN_HEAP=y
 CONFIG_TEST_SORT=y
 CONFIG_KPROBES_SANITY_TEST=y
 CONFIG_RBTREE_TEST=y
 CONFIG_INTERVAL_TREE_TEST=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_TEST_BITOPS=m
 CONFIG_TEST_BPF=m
index 7cd0648..5df9759 100644 (file)
@@ -1,5 +1,6 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_WATCH_QUEUE=y
 CONFIG_AUDIT=y
 CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -13,7 +14,6 @@ CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_NUMA_BALANCING=y
 CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
 CONFIG_BLK_CGROUP=y
 CONFIG_CFS_BANDWIDTH=y
 CONFIG_RT_GROUP_SCHED=y
@@ -30,9 +30,9 @@ CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_SCHED_AUTOGROUP=y
-CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 # CONFIG_SYSFS_SYSCALL is not set
+CONFIG_BPF_LSM=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
@@ -41,7 +41,6 @@ CONFIG_LIVEPATCH=y
 CONFIG_TUNE_ZEC12=y
 CONFIG_NR_CPUS=512
 CONFIG_NUMA=y
-# CONFIG_NUMA_EMU is not set
 CONFIG_HZ_100=y
 CONFIG_KEXEC_FILE=y
 CONFIG_KEXEC_SIG=y
@@ -51,14 +50,11 @@ CONFIG_CHSC_SCH=y
 CONFIG_VFIO_CCW=m
 CONFIG_VFIO_AP=m
 CONFIG_CRASH_DUMP=y
-CONFIG_HIBERNATION=y
-CONFIG_PM_DEBUG=y
 CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
 CONFIG_CMM=m
 CONFIG_APPLDATA_BASE=y
 CONFIG_KVM=m
-CONFIG_VHOST_NET=m
-CONFIG_VHOST_VSOCK=m
+CONFIG_S390_UNWIND_SELFTEST=m
 CONFIG_OPROFILE=m
 CONFIG_KPROBES=y
 CONFIG_JUMP_LABEL=y
@@ -74,6 +70,8 @@ CONFIG_BLK_DEV_THROTTLING=y
 CONFIG_BLK_WBT=y
 CONFIG_BLK_CGROUP_IOLATENCY=y
 CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_BSD_DISKLABEL=y
@@ -91,7 +89,6 @@ CONFIG_CLEANCACHE=y
 CONFIG_FRONTSWAP=y
 CONFIG_MEM_SOFT_DIRTY=y
 CONFIG_ZSWAP=y
-CONFIG_ZBUD=m
 CONFIG_ZSMALLOC=m
 CONFIG_ZSMALLOC_STAT=y
 CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
@@ -125,6 +122,7 @@ CONFIG_SYN_COOKIES=y
 CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
+CONFIG_INET_ESPINTCP=y
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
@@ -139,6 +137,7 @@ CONFIG_TCP_CONG_ILLINOIS=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
+CONFIG_INET6_ESPINTCP=y
 CONFIG_INET6_IPCOMP=m
 CONFIG_IPV6_MIP6=m
 CONFIG_IPV6_VTI=m
@@ -146,7 +145,10 @@ CONFIG_IPV6_SIT=m
 CONFIG_IPV6_GRE=m
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_RPL_LWTUNNEL=y
+CONFIG_MPTCP=y
 CONFIG_NETFILTER=y
+CONFIG_BRIDGE_NETFILTER=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
@@ -311,6 +313,7 @@ CONFIG_L2TP_V3=y
 CONFIG_L2TP_IP=m
 CONFIG_L2TP_ETH=m
 CONFIG_BRIDGE=m
+CONFIG_BRIDGE_MRP=y
 CONFIG_VLAN_8021Q=m
 CONFIG_VLAN_8021Q_GVRP=y
 CONFIG_NET_SCHED=y
@@ -335,6 +338,7 @@ CONFIG_NET_SCH_CODEL=m
 CONFIG_NET_SCH_FQ_CODEL=m
 CONFIG_NET_SCH_INGRESS=m
 CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_SCH_ETS=m
 CONFIG_NET_CLS_BASIC=m
 CONFIG_NET_CLS_TCINDEX=m
 CONFIG_NET_CLS_ROUTE4=m
@@ -358,6 +362,7 @@ CONFIG_NET_ACT_PEDIT=m
 CONFIG_NET_ACT_SIMP=m
 CONFIG_NET_ACT_SKBEDIT=m
 CONFIG_NET_ACT_CSUM=m
+CONFIG_NET_ACT_GATE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_OPENVSWITCH=m
 CONFIG_VSOCKETS=m
@@ -368,6 +373,7 @@ CONFIG_BPF_JIT=y
 CONFIG_NET_PKTGEN=m
 # CONFIG_NET_DROP_MONITOR is not set
 CONFIG_PCI=y
+# CONFIG_PCIEASPM is not set
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
 CONFIG_UEVENT_HELPER=y
@@ -430,6 +436,7 @@ CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_MULTIPATH_QL=m
 CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_MULTIPATH_HST=m
 CONFIG_DM_DELAY=m
 CONFIG_DM_UEVENT=y
 CONFIG_DM_FLAKEY=m
@@ -444,6 +451,8 @@ CONFIG_EQUALIZER=m
 CONFIG_IFB=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
+CONFIG_BAREUDP=m
 CONFIG_TUN=m
 CONFIG_VETH=m
 CONFIG_VIRTIO_NET=m
@@ -477,7 +486,6 @@ CONFIG_NLMON=m
 CONFIG_MLX4_EN=m
 CONFIG_MLX5_CORE=m
 CONFIG_MLX5_CORE_EN=y
-# CONFIG_MLXFW is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_MICROCHIP is not set
 # CONFIG_NET_VENDOR_MICROSEMI is not set
@@ -510,6 +518,7 @@ CONFIG_MLX5_CORE_EN=y
 # CONFIG_NET_VENDOR_TI is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -557,6 +566,8 @@ CONFIG_VFIO_MDEV_DEVICE=m
 CONFIG_VIRTIO_PCI=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
+CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
 CONFIG_S390_CCW_IOMMU=y
 CONFIG_S390_AP_IOMMU=y
 CONFIG_EXT4_FS=y
@@ -600,6 +611,7 @@ CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
+CONFIG_EXFAT_FS=m
 CONFIG_NTFS_FS=m
 CONFIG_NTFS_RW=y
 CONFIG_PROC_KCORE=y
@@ -642,8 +654,8 @@ CONFIG_NLS_UTF8=m
 CONFIG_DLM=m
 CONFIG_UNICODE=y
 CONFIG_PERSISTENT_KEYRINGS=y
-CONFIG_BIG_KEYS=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_KEY_NOTIFICATIONS=y
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
 CONFIG_SECURITY_SELINUX=y
@@ -667,8 +679,11 @@ CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
+CONFIG_CRYPTO_GCM=y
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_SEQIV=y
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_OFB=m
@@ -678,6 +693,7 @@ CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -694,6 +710,7 @@ CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
 CONFIG_CRYPTO_CAST5=m
 CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=m
 CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SALSA20=m
@@ -712,6 +729,9 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
 CONFIG_CRYPTO_STATS=y
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 CONFIG_ZCRYPT=m
 CONFIG_PKEY=m
 CONFIG_CRYPTO_PAES_S390=m
@@ -725,6 +745,7 @@ CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
 CONFIG_CRYPTO_CRC32_S390=y
 CONFIG_CORDIC=m
+CONFIG_PRIME_NUMBERS=m
 CONFIG_CRC4=m
 CONFIG_CRC7=m
 CONFIG_CRC8=m
@@ -739,10 +760,12 @@ CONFIG_DEBUG_SECTION_MISMATCH=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_PANIC_ON_OOPS=y
+CONFIG_TEST_LOCKUP=m
 CONFIG_BUG_ON_DATA_CORRUPTION=y
 CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 CONFIG_LATENCYTOP=y
+CONFIG_BOOTTIME_TRACING=y
 CONFIG_FUNCTION_PROFILER=y
 CONFIG_STACK_TRACER=y
 CONFIG_SCHED_TRACER=y
index 20c51e5..4091c50 100644 (file)
@@ -30,6 +30,7 @@ CONFIG_IBM_PARTITION=y
 # CONFIG_BOUNCE is not set
 CONFIG_NET=y
 # CONFIG_IUCV is not set
+# CONFIG_ETHTOOL_NETLINK is not set
 CONFIG_DEVTMPFS=y
 CONFIG_BLK_DEV_RAM=y
 # CONFIG_BLK_DEV_XPRAM is not set
@@ -55,6 +56,8 @@ CONFIG_RAW_DRIVER=y
 # CONFIG_MONWRITER is not set
 # CONFIG_S390_VMUR is not set
 # CONFIG_HID is not set
+# CONFIG_VIRTIO_MENU is not set
+# CONFIG_VHOST_MENU is not set
 # CONFIG_IOMMU_SUPPORT is not set
 # CONFIG_DNOTIFY is not set
 # CONFIG_INOTIFY_USER is not set
@@ -62,7 +65,9 @@ CONFIG_CONFIGFS_FS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_LSM="yama,loadpin,safesetid,integrity"
+# CONFIG_ZLIB_DFLTCC is not set
 CONFIG_PRINTK_TIME=y
+# CONFIG_SYMBOLIC_ERRNAME is not set
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
index cee3cb6..6ea0820 100644 (file)
 #define KVM_USER_MEM_SLOTS 32
 
 /*
- * These seem to be used for allocating ->chip in the routing table,
- * which we don't use. 4096 is an out-of-thin-air value. If we need
- * to look at ->chip later on, we'll need to revisit this.
+ * These seem to be used for allocating ->chip in the routing table, which we
+ * don't use. 1 is as small as we can get to reduce the needed memory. If we
+ * need to look at ->chip later on, we'll need to revisit this.
  */
 #define KVM_NR_IRQCHIPS 1
-#define KVM_IRQCHIP_NUM_PINS 4096
+#define KVM_IRQCHIP_NUM_PINS 1
 #define KVM_HALT_POLL_NS_DEFAULT 50000
 
 /* s390-specific vcpu->requests bit members */
index 6364460..263075a 100644 (file)
@@ -198,9 +198,10 @@ static debug_entry_t ***debug_areas_alloc(int pages_per_area, int nr_areas)
        if (!areas)
                goto fail_malloc_areas;
        for (i = 0; i < nr_areas; i++) {
+               /* GFP_NOWARN to avoid user triggerable WARN, we handle fails */
                areas[i] = kmalloc_array(pages_per_area,
                                         sizeof(debug_entry_t *),
-                                        GFP_KERNEL);
+                                        GFP_KERNEL | __GFP_NOWARN);
                if (!areas[i])
                        goto fail_malloc_areas2;
                for (j = 0; j < pages_per_area; j++) {
index cd241ee..0782772 100644 (file)
@@ -170,6 +170,8 @@ static noinline __init void setup_lowcore_early(void)
        psw_t psw;
 
        psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
+       if (IS_ENABLED(CONFIG_KASAN))
+               psw.mask |= PSW_MASK_DAT;
        psw.addr = (unsigned long) s390_base_ext_handler;
        S390_lowcore.external_new_psw = psw;
        psw.addr = (unsigned long) s390_base_pgm_handler;
index 496f74d..969b35b 100644 (file)
@@ -378,9 +378,9 @@ ENTRY(system_call)
        stmg    %r8,%r15,__LC_SAVE_AREA_SYNC
        BPOFF
        lg      %r12,__LC_CURRENT
-       lghi    %r13,__TASK_thread
        lghi    %r14,_PIF_SYSCALL
 .Lsysc_per:
+       lghi    %r13,__TASK_thread
        lg      %r15,__LC_KERNEL_STACK
        la      %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
        UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
index 85a711d..4f9e462 100644 (file)
@@ -881,12 +881,21 @@ out:
        return err;
 }
 
+static bool is_callchain_event(struct perf_event *event)
+{
+       u64 sample_type = event->attr.sample_type;
+
+       return sample_type & (PERF_SAMPLE_CALLCHAIN | PERF_SAMPLE_REGS_USER |
+                             PERF_SAMPLE_STACK_USER);
+}
+
 static int cpumsf_pmu_event_init(struct perf_event *event)
 {
        int err;
 
        /* No support for taken branch sampling */
-       if (has_branch_stack(event))
+       /* No support for callchain, stacks and registers */
+       if (has_branch_stack(event) || is_callchain_event(event))
                return -EOPNOTSUPP;
 
        switch (event->attr.type) {
index 5853c98..07aa15b 100644 (file)
@@ -1100,6 +1100,7 @@ void __init setup_arch(char **cmdline_p)
        if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
                nospec_auto_detect();
 
+       jump_label_init();
        parse_early_param();
 #ifdef CONFIG_CRASH_DUMP
        /* Deactivate elfcorehdr= kernel parameter */
index 82df06d..3b5a4d2 100644 (file)
@@ -117,7 +117,7 @@ static inline pte_t __rste_to_pte(unsigned long rste)
                                             _PAGE_YOUNG);
 #ifdef CONFIG_MEM_SOFT_DIRTY
                pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY,
-                                            _PAGE_DIRTY);
+                                            _PAGE_SOFT_DIRTY);
 #endif
                pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC,
                                             _PAGE_NOEXEC);
index 22a0be6..1d17413 100644 (file)
@@ -62,11 +62,15 @@ notrace void *s390_kernel_write(void *dst, const void *src, size_t size)
        long copied;
 
        spin_lock_irqsave(&s390_kernel_write_lock, flags);
-       while (size) {
-               copied = s390_kernel_write_odd(tmp, src, size);
-               tmp += copied;
-               src += copied;
-               size -= copied;
+       if (!(flags & PSW_MASK_DAT)) {
+               memcpy(dst, src, size);
+       } else {
+               while (size) {
+                       copied = s390_kernel_write_odd(tmp, src, size);
+                       tmp += copied;
+                       src += copied;
+                       size -= copied;
+               }
        }
        spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
 
index 08e1d61..fdebd28 100644 (file)
@@ -94,7 +94,18 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
                }
                zdev->fh = ccdf->fh;
                zdev->state = ZPCI_FN_STATE_CONFIGURED;
-               zpci_create_device(zdev);
+               ret = zpci_enable_device(zdev);
+               if (ret)
+                       break;
+
+               pdev = pci_scan_single_device(zdev->zbus->bus, zdev->devfn);
+               if (!pdev)
+                       break;
+
+               pci_bus_add_device(pdev);
+               pci_lock_rescan_remove();
+               pci_bus_add_devices(zdev->zbus->bus);
+               pci_unlock_rescan_remove();
                break;
        case 0x0302: /* Reserved -> Standby */
                if (!zdev) {
index 6a0cc52..883da0a 100644 (file)
@@ -67,7 +67,7 @@ config X86
        select ARCH_HAS_FILTER_PGPROT
        select ARCH_HAS_FORTIFY_SOURCE
        select ARCH_HAS_GCOV_PROFILE_ALL
-       select ARCH_HAS_KCOV                    if X86_64
+       select ARCH_HAS_KCOV                    if X86_64 && STACK_VALIDATION
        select ARCH_HAS_MEM_ENCRYPT
        select ARCH_HAS_MEMBARRIER_SYNC_CORE
        select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
index 7619742..5a828fd 100644 (file)
@@ -90,8 +90,8 @@ endif
 
 vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o
 
-vmlinux-objs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
 vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
+efi-obj-$(CONFIG_EFI_STUB) = $(objtree)/drivers/firmware/efi/libstub/lib.a
 
 # The compressed kernel is built with -fPIC/-fPIE so that a boot loader
 # can place it anywhere in memory and it will still run. However, since
@@ -115,7 +115,7 @@ endef
 quiet_cmd_check-and-link-vmlinux = LD      $@
       cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld)
 
-$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
+$(obj)/vmlinux: $(vmlinux-objs-y) $(efi-obj-y) FORCE
        $(call if_changed,check-and-link-vmlinux)
 
 OBJCOPYFLAGS_vmlinux.bin :=  -R .comment -S
index e821a7d..97d37f0 100644 (file)
@@ -213,7 +213,6 @@ SYM_FUNC_START(startup_32)
         * We place all of the values on our mini stack so lret can
         * used to perform that far jump.
         */
-       pushl   $__KERNEL_CS
        leal    startup_64(%ebp), %eax
 #ifdef CONFIG_EFI_MIXED
        movl    efi32_boot_args(%ebp), %edi
@@ -224,11 +223,20 @@ SYM_FUNC_START(startup_32)
        movl    efi32_boot_args+8(%ebp), %edx   // saved bootparams pointer
        cmpl    $0, %edx
        jnz     1f
+       /*
+        * efi_pe_entry uses MS calling convention, which requires 32 bytes of
+        * shadow space on the stack even if all arguments are passed in
+        * registers. We also need an additional 8 bytes for the space that
+        * would be occupied by the return address, and this also results in
+        * the correct stack alignment for entry.
+        */
+       subl    $40, %esp
        leal    efi_pe_entry(%ebp), %eax
        movl    %edi, %ecx                      // MS calling convention
        movl    %esi, %edx
 1:
 #endif
+       pushl   $__KERNEL_CS
        pushl   %eax
 
        /* Enter paged protected Mode, activating Long Mode */
@@ -784,6 +792,7 @@ SYM_DATA_LOCAL(boot_heap,   .fill BOOT_HEAP_SIZE, 1, 0)
 
 SYM_DATA_START_LOCAL(boot_stack)
        .fill BOOT_STACK_SIZE, 1, 0
+       .balign 16
 SYM_DATA_END_LABEL(boot_stack, SYM_L_LOCAL, boot_stack_end)
 
 /*
index b7a5790..08bf95d 100644 (file)
@@ -7,12 +7,20 @@ KASAN_SANITIZE := n
 UBSAN_SANITIZE := n
 KCOV_INSTRUMENT := n
 
-CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong
-CFLAGS_REMOVE_syscall_32.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong
-CFLAGS_REMOVE_syscall_64.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong
+CFLAGS_REMOVE_common.o         = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_syscall_64.o     = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_syscall_32.o     = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_syscall_x32.o    = $(CC_FLAGS_FTRACE)
+
+CFLAGS_common.o                        += -fno-stack-protector
+CFLAGS_syscall_64.o            += -fno-stack-protector
+CFLAGS_syscall_32.o            += -fno-stack-protector
+CFLAGS_syscall_x32.o           += -fno-stack-protector
 
 CFLAGS_syscall_64.o            += $(call cc-option,-Wno-override-init,)
 CFLAGS_syscall_32.o            += $(call cc-option,-Wno-override-init,)
+CFLAGS_syscall_x32.o           += $(call cc-option,-Wno-override-init,)
+
 obj-y                          := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
 obj-y                          += common.o
 
index bd3f141..f092884 100644 (file)
 #define CREATE_TRACE_POINTS
 #include <trace/events/syscalls.h>
 
+/* Check that the stack and regs on entry from user mode are sane. */
+static noinstr void check_user_regs(struct pt_regs *regs)
+{
+       if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) {
+               /*
+                * Make sure that the entry code gave us a sensible EFLAGS
+                * register.  Native because we want to check the actual CPU
+                * state, not the interrupt state as imagined by Xen.
+                */
+               unsigned long flags = native_save_fl();
+               WARN_ON_ONCE(flags & (X86_EFLAGS_AC | X86_EFLAGS_DF |
+                                     X86_EFLAGS_NT));
+
+               /* We think we came from user mode. Make sure pt_regs agrees. */
+               WARN_ON_ONCE(!user_mode(regs));
+
+               /*
+                * All entries from user mode (except #DF) should be on the
+                * normal thread stack and should have user pt_regs in the
+                * correct location.
+                */
+               WARN_ON_ONCE(!on_thread_stack());
+               WARN_ON_ONCE(regs != task_pt_regs(current));
+       }
+}
+
 #ifdef CONFIG_CONTEXT_TRACKING
 /**
  * enter_from_user_mode - Establish state when coming from user mode
@@ -127,9 +153,6 @@ static long syscall_trace_enter(struct pt_regs *regs)
        unsigned long ret = 0;
        u32 work;
 
-       if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
-               BUG_ON(regs != task_pt_regs(current));
-
        work = READ_ONCE(ti->flags);
 
        if (work & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU)) {
@@ -271,7 +294,7 @@ static void __prepare_exit_to_usermode(struct pt_regs *regs)
 #endif
 }
 
-__visible noinstr void prepare_exit_to_usermode(struct pt_regs *regs)
+static noinstr void prepare_exit_to_usermode(struct pt_regs *regs)
 {
        instrumentation_begin();
        __prepare_exit_to_usermode(regs);
@@ -346,6 +369,8 @@ __visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs)
 {
        struct thread_info *ti;
 
+       check_user_regs(regs);
+
        enter_from_user_mode();
        instrumentation_begin();
 
@@ -409,6 +434,8 @@ static void do_syscall_32_irqs_on(struct pt_regs *regs)
 /* Handles int $0x80 */
 __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
 {
+       check_user_regs(regs);
+
        enter_from_user_mode();
        instrumentation_begin();
 
@@ -460,6 +487,8 @@ __visible noinstr long do_fast_syscall_32(struct pt_regs *regs)
                                        vdso_image_32.sym_int80_landing_pad;
        bool success;
 
+       check_user_regs(regs);
+
        /*
         * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
         * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
@@ -510,6 +539,18 @@ __visible noinstr long do_fast_syscall_32(struct pt_regs *regs)
                (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
 #endif
 }
+
+/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
+__visible noinstr long do_SYSENTER_32(struct pt_regs *regs)
+{
+       /* SYSENTER loses RSP, but the vDSO saved it in RBP. */
+       regs->sp = regs->bp;
+
+       /* SYSENTER clobbers EFLAGS.IF.  Assume it was set in usermode. */
+       regs->flags |= X86_EFLAGS_IF;
+
+       return do_fast_syscall_32(regs);
+}
 #endif
 
 SYSCALL_DEFINE0(ni_syscall)
@@ -553,6 +594,7 @@ SYSCALL_DEFINE0(ni_syscall)
 bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs)
 {
        if (user_mode(regs)) {
+               check_user_regs(regs);
                enter_from_user_mode();
                return false;
        }
@@ -686,6 +728,7 @@ void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit)
  */
 void noinstr idtentry_enter_user(struct pt_regs *regs)
 {
+       check_user_regs(regs);
        enter_from_user_mode();
 }
 
index 024d7d2..2d0bd5d 100644 (file)
@@ -933,9 +933,8 @@ SYM_FUNC_START(entry_SYSENTER_32)
 
 .Lsysenter_past_esp:
        pushl   $__USER_DS              /* pt_regs->ss */
-       pushl   %ebp                    /* pt_regs->sp (stashed in bp) */
+       pushl   $0                      /* pt_regs->sp (placeholder) */
        pushfl                          /* pt_regs->flags (except IF = 0) */
-       orl     $X86_EFLAGS_IF, (%esp)  /* Fix IF */
        pushl   $__USER_CS              /* pt_regs->cs */
        pushl   $0                      /* pt_regs->ip = 0 (placeholder) */
        pushl   %eax                    /* pt_regs->orig_ax */
@@ -965,7 +964,7 @@ SYM_FUNC_START(entry_SYSENTER_32)
 .Lsysenter_flags_fixed:
 
        movl    %esp, %eax
-       call    do_fast_syscall_32
+       call    do_SYSENTER_32
        /* XEN PV guests always use IRET path */
        ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
                    "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
index 0f974ae..541fdaf 100644 (file)
@@ -57,29 +57,30 @@ SYM_CODE_START(entry_SYSENTER_compat)
 
        movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
-       /*
-        * User tracing code (ptrace or signal handlers) might assume that
-        * the saved RAX contains a 32-bit number when we're invoking a 32-bit
-        * syscall.  Just in case the high bits are nonzero, zero-extend
-        * the syscall number.  (This could almost certainly be deleted
-        * with no ill effects.)
-        */
-       movl    %eax, %eax
-
        /* Construct struct pt_regs on stack */
        pushq   $__USER32_DS            /* pt_regs->ss */
-       pushq   %rbp                    /* pt_regs->sp (stashed in bp) */
+       pushq   $0                      /* pt_regs->sp = 0 (placeholder) */
 
        /*
         * Push flags.  This is nasty.  First, interrupts are currently
-        * off, but we need pt_regs->flags to have IF set.  Second, even
-        * if TF was set when SYSENTER started, it's clear by now.  We fix
-        * that later using TIF_SINGLESTEP.
+        * off, but we need pt_regs->flags to have IF set.  Second, if TS
+        * was set in usermode, it's still set, and we're singlestepping
+        * through this code.  do_SYSENTER_32() will fix up IF.
         */
        pushfq                          /* pt_regs->flags (except IF = 0) */
-       orl     $X86_EFLAGS_IF, (%rsp)  /* Fix saved flags */
        pushq   $__USER32_CS            /* pt_regs->cs */
        pushq   $0                      /* pt_regs->ip = 0 (placeholder) */
+SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
+
+       /*
+        * User tracing code (ptrace or signal handlers) might assume that
+        * the saved RAX contains a 32-bit number when we're invoking a 32-bit
+        * syscall.  Just in case the high bits are nonzero, zero-extend
+        * the syscall number.  (This could almost certainly be deleted
+        * with no ill effects.)
+        */
+       movl    %eax, %eax
+
        pushq   %rax                    /* pt_regs->orig_ax */
        pushq   %rdi                    /* pt_regs->di */
        pushq   %rsi                    /* pt_regs->si */
@@ -135,7 +136,7 @@ SYM_CODE_START(entry_SYSENTER_compat)
 .Lsysenter_flags_fixed:
 
        movq    %rsp, %rdi
-       call    do_fast_syscall_32
+       call    do_SYSENTER_32
        /* XEN PV guests always use IRET path */
        ALTERNATIVE "testl %eax, %eax; jz swapgs_restore_regs_and_return_to_usermode", \
                    "jmp swapgs_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
index 12c42eb..9933c0e 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
 obj-y                                  += core.o probe.o
-obj-$(PERF_EVENTS_INTEL_RAPL)          += rapl.o
+obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL)   += rapl.o
 obj-y                                  += amd/
 obj-$(CONFIG_X86_LOCAL_APIC)            += msr.o
 obj-$(CONFIG_CPU_SUP_INTEL)            += intel/
index a54c6a4..6035df1 100644 (file)
@@ -375,7 +375,10 @@ void __init hyperv_init(void)
        guest_id = generate_guest_id(0, LINUX_VERSION_CODE, 0);
        wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
 
-       hv_hypercall_pg = vmalloc_exec(PAGE_SIZE);
+       hv_hypercall_pg = __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START,
+                       VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_ROX,
+                       VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
+                       __builtin_return_address(0));
        if (hv_hypercall_pg == NULL) {
                wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
                goto remove_cpuhp_state;
index 35460fe..0367efd 100644 (file)
@@ -201,12 +201,8 @@ arch_test_and_change_bit(long nr, volatile unsigned long *addr)
        return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
 }
 
-static __no_kcsan_or_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
+static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
 {
-       /*
-        * Because this is a plain access, we need to disable KCSAN here to
-        * avoid double instrumentation via instrumented bitops.
-        */
        return ((1UL << (nr & (BITS_PER_LONG-1))) &
                (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
 }
index fb34ff6..0281895 100644 (file)
@@ -75,6 +75,12 @@ do {                                                         \
        unreachable();                                          \
 } while (0)
 
+/*
+ * This instrumentation_begin() is strictly speaking incorrect; but it
+ * suppresses the complaints from WARN()s in noinstr code. If such a WARN()
+ * were to trigger, we'd rather wreck the machine in an attempt to get the
+ * message out than not know about it.
+ */
 #define __WARN_FLAGS(flags)                                    \
 do {                                                           \
        instrumentation_begin();                                \
index dd17c2d..da78ccb 100644 (file)
@@ -58,4 +58,9 @@ static inline bool handle_guest_split_lock(unsigned long ip)
        return false;
 }
 #endif
+#ifdef CONFIG_IA32_FEAT_CTL
+void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
+#else
+static inline void init_ia32_feat_ctl(struct cpuinfo_x86 *c) {}
+#endif
 #endif /* _ASM_X86_CPU_H */
index 6722ffc..3afa990 100644 (file)
@@ -11,5 +11,23 @@ extern cpumask_var_t cpu_sibling_setup_mask;
 
 extern void setup_cpu_local_masks(void);
 
+/*
+ * NMI and MCE exceptions need cpu_is_offline() _really_ early,
+ * provide an arch_ special for them to avoid instrumentation.
+ */
+#if NR_CPUS > 1
+static __always_inline bool arch_cpu_online(int cpu)
+{
+       return arch_test_bit(cpu, cpumask_bits(cpu_online_mask));
+}
+#else
+static __always_inline bool arch_cpu_online(int cpu)
+{
+       return cpu == 0;
+}
+#endif
+
+#define arch_cpu_is_offline(cpu)       unlikely(!arch_cpu_online(cpu))
+
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_X86_CPUMASK_H */
index 42159f4..845e748 100644 (file)
@@ -623,6 +623,11 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
  * MXCSR and XCR definitions:
  */
 
+static inline void ldmxcsr(u32 mxcsr)
+{
+       asm volatile("ldmxcsr %0" :: "m" (mxcsr));
+}
+
 extern unsigned int mxcsr_feature_mask;
 
 #define XCR_XFEATURE_ENABLED_MASK      0x00000000
index cf51c50..80d3b30 100644 (file)
@@ -353,10 +353,6 @@ static __always_inline void __##func(struct pt_regs *regs)
 
 #else  /* CONFIG_X86_64 */
 
-/* Maps to a regular IDTENTRY on 32bit for now */
-# define DECLARE_IDTENTRY_IST          DECLARE_IDTENTRY
-# define DEFINE_IDTENTRY_IST           DEFINE_IDTENTRY
-
 /**
  * DECLARE_IDTENTRY_DF - Declare functions for double fault 32bit variant
  * @vector:    Vector number (ignored for C)
@@ -387,28 +383,18 @@ __visible noinstr void func(struct pt_regs *regs,                 \
 #endif /* !CONFIG_X86_64 */
 
 /* C-Code mapping */
+#define DECLARE_IDTENTRY_NMI           DECLARE_IDTENTRY_RAW
+#define DEFINE_IDTENTRY_NMI            DEFINE_IDTENTRY_RAW
+
+#ifdef CONFIG_X86_64
 #define DECLARE_IDTENTRY_MCE           DECLARE_IDTENTRY_IST
 #define DEFINE_IDTENTRY_MCE            DEFINE_IDTENTRY_IST
 #define DEFINE_IDTENTRY_MCE_USER       DEFINE_IDTENTRY_NOIST
 
-#define DECLARE_IDTENTRY_NMI           DECLARE_IDTENTRY_RAW
-#define DEFINE_IDTENTRY_NMI            DEFINE_IDTENTRY_RAW
-
 #define DECLARE_IDTENTRY_DEBUG         DECLARE_IDTENTRY_IST
 #define DEFINE_IDTENTRY_DEBUG          DEFINE_IDTENTRY_IST
 #define DEFINE_IDTENTRY_DEBUG_USER     DEFINE_IDTENTRY_NOIST
-
-/**
- * DECLARE_IDTENTRY_XEN - Declare functions for XEN redirect IDT entry points
- * @vector:    Vector number (ignored for C)
- * @func:      Function name of the entry point
- *
- * Used for xennmi and xendebug redirections. No DEFINE as this is all ASM
- * indirection magic.
- */
-#define DECLARE_IDTENTRY_XEN(vector, func)                             \
-       asmlinkage void xen_asm_exc_xen##func(void);                    \
-       asmlinkage void asm_exc_xen##func(void)
+#endif
 
 #else /* !__ASSEMBLY__ */
 
@@ -455,9 +441,6 @@ __visible noinstr void func(struct pt_regs *regs,                   \
 # define DECLARE_IDTENTRY_MCE(vector, func)                            \
        DECLARE_IDTENTRY(vector, func)
 
-# define DECLARE_IDTENTRY_DEBUG(vector, func)                          \
-       DECLARE_IDTENTRY(vector, func)
-
 /* No ASM emitted for DF as this goes through a C shim */
 # define DECLARE_IDTENTRY_DF(vector, func)
 
@@ -469,10 +452,6 @@ __visible noinstr void func(struct pt_regs *regs,                  \
 /* No ASM code emitted for NMI */
 #define DECLARE_IDTENTRY_NMI(vector, func)
 
-/* XEN NMI and DB wrapper */
-#define DECLARE_IDTENTRY_XEN(vector, func)                             \
-       idtentry vector asm_exc_xen##func exc_##func has_error_code=0
-
 /*
  * ASM code to emit the common vector entry stubs where each stub is
  * packed into 8 bytes.
@@ -490,16 +469,15 @@ __visible noinstr void func(struct pt_regs *regs,                 \
        .align 8
 SYM_CODE_START(irq_entries_start)
     vector=FIRST_EXTERNAL_VECTOR
-    pos = .
     .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
        UNWIND_HINT_IRET_REGS
+0 :
        .byte   0x6a, vector
        jmp     asm_common_interrupt
        nop
        /* Ensure that the above is 8 bytes max */
-       . = pos + 8
-    pos=pos+8
-    vector=vector+1
+       . = 0b + 8
+       vector = vector+1
     .endr
 SYM_CODE_END(irq_entries_start)
 
@@ -507,16 +485,15 @@ SYM_CODE_END(irq_entries_start)
        .align 8
 SYM_CODE_START(spurious_entries_start)
     vector=FIRST_SYSTEM_VECTOR
-    pos = .
     .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
        UNWIND_HINT_IRET_REGS
+0 :
        .byte   0x6a, vector
        jmp     asm_spurious_interrupt
        nop
        /* Ensure that the above is 8 bytes max */
-       . = pos + 8
-    pos=pos+8
-    vector=vector+1
+       . = 0b + 8
+       vector = vector+1
     .endr
 SYM_CODE_END(spurious_entries_start)
 #endif
@@ -565,16 +542,28 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_BP,         exc_int3);
 DECLARE_IDTENTRY_RAW_ERRORCODE(X86_TRAP_PF,    exc_page_fault);
 
 #ifdef CONFIG_X86_MCE
+#ifdef CONFIG_X86_64
 DECLARE_IDTENTRY_MCE(X86_TRAP_MC,      exc_machine_check);
+#else
+DECLARE_IDTENTRY_RAW(X86_TRAP_MC,      exc_machine_check);
+#endif
 #endif
 
 /* NMI */
 DECLARE_IDTENTRY_NMI(X86_TRAP_NMI,     exc_nmi);
-DECLARE_IDTENTRY_XEN(X86_TRAP_NMI,     nmi);
+#if defined(CONFIG_XEN_PV) && defined(CONFIG_X86_64)
+DECLARE_IDTENTRY_RAW(X86_TRAP_NMI,     xenpv_exc_nmi);
+#endif
 
 /* #DB */
+#ifdef CONFIG_X86_64
 DECLARE_IDTENTRY_DEBUG(X86_TRAP_DB,    exc_debug);
-DECLARE_IDTENTRY_XEN(X86_TRAP_DB,      debug);
+#else
+DECLARE_IDTENTRY_RAW(X86_TRAP_DB,      exc_debug);
+#endif
+#if defined(CONFIG_XEN_PV) && defined(CONFIG_X86_64)
+DECLARE_IDTENTRY_RAW(X86_TRAP_DB,      xenpv_exc_debug);
+#endif
 
 /* #DF */
 DECLARE_IDTENTRY_DF(X86_TRAP_DF,       exc_double_fault);
@@ -635,8 +624,8 @@ DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_NESTED_VECTOR,  sysvec_kvm_posted_intr_nested
 
 #if IS_ENABLED(CONFIG_HYPERV)
 DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR,    sysvec_hyperv_callback);
-DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_REENLIGHTENMENT_VECTOR,     sysvec_hyperv_reenlightenment);
-DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_STIMER0_VECTOR,     sysvec_hyperv_stimer0);
+DECLARE_IDTENTRY_SYSVEC(HYPERV_REENLIGHTENMENT_VECTOR, sysvec_hyperv_reenlightenment);
+DECLARE_IDTENTRY_SYSVEC(HYPERV_STIMER0_VECTOR, sysvec_hyperv_stimer0);
 #endif
 
 #if IS_ENABLED(CONFIG_ACRN_GUEST)
index ac1a99f..7f080f5 100644 (file)
@@ -19,12 +19,28 @@ struct task_struct;
 void io_bitmap_share(struct task_struct *tsk);
 void io_bitmap_exit(struct task_struct *tsk);
 
+static inline void native_tss_invalidate_io_bitmap(void)
+{
+       /*
+        * Invalidate the I/O bitmap by moving io_bitmap_base outside the
+        * TSS limit so any subsequent I/O access from user space will
+        * trigger a #GP.
+        *
+        * This is correct even when VMEXIT rewrites the TSS limit
+        * to 0x67 as the only requirement is that the base points
+        * outside the limit.
+        */
+       this_cpu_write(cpu_tss_rw.x86_tss.io_bitmap_base,
+                      IO_BITMAP_OFFSET_INVALID);
+}
+
 void native_tss_update_io_bitmap(void);
 
 #ifdef CONFIG_PARAVIRT_XXL
 #include <asm/paravirt.h>
 #else
 #define tss_update_io_bitmap native_tss_update_io_bitmap
+#define tss_invalidate_io_bitmap native_tss_invalidate_io_bitmap
 #endif
 
 #else
index f8998e9..be5363b 100644 (file)
@@ -943,7 +943,7 @@ struct kvm_arch {
        atomic_t vapics_in_nmi_mode;
        struct mutex apic_map_lock;
        struct kvm_apic_map *apic_map;
-       bool apic_map_dirty;
+       atomic_t apic_map_dirty;
 
        bool apic_access_page_done;
        unsigned long apicv_inhibit_reasons;
@@ -1220,7 +1220,7 @@ struct kvm_x86_ops {
        void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
                                           struct kvm_memory_slot *slot,
                                           gfn_t offset, unsigned long mask);
-       int (*write_log_dirty)(struct kvm_vcpu *vcpu);
+       int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
 
        /* pmu operations of sub-arch */
        const struct kvm_pmu_ops *pmu_ops;
index 73d997a..e039a93 100644 (file)
@@ -25,8 +25,6 @@
 #define TPAUSE_C01_STATE               1
 #define TPAUSE_C02_STATE               0
 
-u32 get_umwait_control_msr(void);
-
 static inline void __monitor(const void *eax, unsigned long ecx,
                             unsigned long edx)
 {
index 5ca5d29..3d2afec 100644 (file)
@@ -302,6 +302,11 @@ static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
 }
 
 #ifdef CONFIG_X86_IOPL_IOPERM
+static inline void tss_invalidate_io_bitmap(void)
+{
+       PVOP_VCALL0(cpu.invalidate_io_bitmap);
+}
+
 static inline void tss_update_io_bitmap(void)
 {
        PVOP_VCALL0(cpu.update_io_bitmap);
index 732f62e..8dfcb25 100644 (file)
@@ -141,6 +141,7 @@ struct pv_cpu_ops {
        void (*load_sp0)(unsigned long sp0);
 
 #ifdef CONFIG_X86_IOPL_IOPERM
+       void (*invalidate_io_bitmap)(void);
        void (*update_io_bitmap)(void);
 #endif
 
index 2da1f95..816b31c 100644 (file)
@@ -194,6 +194,7 @@ enum page_cache_mode {
 #define _PAGE_TABLE_NOENC       (__PP|__RW|_USR|___A|   0|___D|   0|   0)
 #define _PAGE_TABLE             (__PP|__RW|_USR|___A|   0|___D|   0|   0| _ENC)
 #define __PAGE_KERNEL_RO        (__PP|   0|   0|___A|__NX|___D|   0|___G)
+#define __PAGE_KERNEL_ROX       (__PP|   0|   0|___A|   0|___D|   0|___G)
 #define __PAGE_KERNEL_NOCACHE   (__PP|__RW|   0|___A|__NX|___D|   0|___G| __NC)
 #define __PAGE_KERNEL_VVAR      (__PP|   0|_USR|___A|__NX|___D|   0|___G)
 #define __PAGE_KERNEL_LARGE     (__PP|__RW|   0|___A|__NX|___D|_PSE|___G)
@@ -219,6 +220,7 @@ enum page_cache_mode {
 #define PAGE_KERNEL_RO         __pgprot_mask(__PAGE_KERNEL_RO         | _ENC)
 #define PAGE_KERNEL_EXEC       __pgprot_mask(__PAGE_KERNEL_EXEC       | _ENC)
 #define PAGE_KERNEL_EXEC_NOENC __pgprot_mask(__PAGE_KERNEL_EXEC       |    0)
+#define PAGE_KERNEL_ROX                __pgprot_mask(__PAGE_KERNEL_ROX        | _ENC)
 #define PAGE_KERNEL_NOCACHE    __pgprot_mask(__PAGE_KERNEL_NOCACHE    | _ENC)
 #define PAGE_KERNEL_LARGE      __pgprot_mask(__PAGE_KERNEL_LARGE      | _ENC)
 #define PAGE_KERNEL_LARGE_EXEC __pgprot_mask(__PAGE_KERNEL_LARGE_EXEC | _ENC)
index 42cd333..03b7c4c 100644 (file)
@@ -370,7 +370,7 @@ struct x86_hw_tss {
 #define IO_BITMAP_OFFSET_INVALID       (__KERNEL_TSS_LIMIT + 1)
 
 struct entry_stack {
-       unsigned long           words[64];
+       char    stack[PAGE_SIZE];
 };
 
 struct entry_stack_page {
index 17c5a03..0780f97 100644 (file)
@@ -408,14 +408,15 @@ struct kvm_vmx_nested_state_data {
 };
 
 struct kvm_vmx_nested_state_hdr {
-       __u32 flags;
        __u64 vmxon_pa;
        __u64 vmcs12_pa;
-       __u64 preemption_timer_deadline;
 
        struct {
                __u16 flags;
        } smm;
+
+       __u32 flags;
+       __u64 preemption_timer_deadline;
 };
 
 struct kvm_svm_nested_state_data {
index ce61e3e..81ffcfb 100644 (file)
@@ -2316,12 +2316,12 @@ static int mp_irqdomain_create(int ioapic)
        ip->irqdomain = irq_domain_create_linear(fn, hwirqs, cfg->ops,
                                                 (void *)(long)ioapic);
 
-       /* Release fw handle if it was allocated above */
-       if (!cfg->dev)
-               irq_domain_free_fwnode(fn);
-
-       if (!ip->irqdomain)
+       if (!ip->irqdomain) {
+               /* Release fw handle if it was allocated above */
+               if (!cfg->dev)
+                       irq_domain_free_fwnode(fn);
                return -ENOMEM;
+       }
 
        ip->irqdomain->parent = parent;
 
index 5cbaca5..c2b2911 100644 (file)
@@ -263,12 +263,13 @@ void __init arch_init_msi_domain(struct irq_domain *parent)
                msi_default_domain =
                        pci_msi_create_irq_domain(fn, &pci_msi_domain_info,
                                                  parent);
-               irq_domain_free_fwnode(fn);
        }
-       if (!msi_default_domain)
+       if (!msi_default_domain) {
+               irq_domain_free_fwnode(fn);
                pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n");
-       else
+       } else {
                msi_default_domain->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK;
+       }
 }
 
 #ifdef CONFIG_IRQ_REMAP
@@ -301,7 +302,8 @@ struct irq_domain *arch_create_remap_msi_irq_domain(struct irq_domain *parent,
        if (!fn)
                return NULL;
        d = pci_msi_create_irq_domain(fn, &pci_msi_ir_domain_info, parent);
-       irq_domain_free_fwnode(fn);
+       if (!d)
+               irq_domain_free_fwnode(fn);
        return d;
 }
 #endif
@@ -364,7 +366,8 @@ static struct irq_domain *dmar_get_irq_domain(void)
        if (fn) {
                dmar_domain = msi_create_irq_domain(fn, &dmar_msi_domain_info,
                                                    x86_vector_domain);
-               irq_domain_free_fwnode(fn);
+               if (!dmar_domain)
+                       irq_domain_free_fwnode(fn);
        }
 out:
        mutex_unlock(&dmar_lock);
@@ -489,7 +492,10 @@ struct irq_domain *hpet_create_irq_domain(int hpet_id)
        }
 
        d = msi_create_irq_domain(fn, domain_info, parent);
-       irq_domain_free_fwnode(fn);
+       if (!d) {
+               irq_domain_free_fwnode(fn);
+               kfree(domain_info);
+       }
        return d;
 }
 
index c48be6e..7649da2 100644 (file)
@@ -446,12 +446,10 @@ static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
        trace_vector_activate(irqd->irq, apicd->is_managed,
                              apicd->can_reserve, reserve);
 
-       /* Nothing to do for fixed assigned vectors */
-       if (!apicd->can_reserve && !apicd->is_managed)
-               return 0;
-
        raw_spin_lock_irqsave(&vector_lock, flags);
-       if (reserve || irqd_is_managed_and_shutdown(irqd))
+       if (!apicd->can_reserve && !apicd->is_managed)
+               assign_irq_vector_any_locked(irqd);
+       else if (reserve || irqd_is_managed_and_shutdown(irqd))
                vector_assign_managed_shutdown(irqd);
        else if (apicd->is_managed)
                ret = activate_managed(irqd);
@@ -709,7 +707,6 @@ int __init arch_early_irq_init(void)
        x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
                                                   NULL);
        BUG_ON(x86_vector_domain == NULL);
-       irq_domain_free_fwnode(fn);
        irq_set_default_host(x86_vector_domain);
 
        arch_init_msi_domain(x86_vector_domain);
@@ -775,20 +772,10 @@ void lapic_offline(void)
 static int apic_set_affinity(struct irq_data *irqd,
                             const struct cpumask *dest, bool force)
 {
-       struct apic_chip_data *apicd = apic_chip_data(irqd);
        int err;
 
-       /*
-        * Core code can call here for inactive interrupts. For inactive
-        * interrupts which use managed or reservation mode there is no
-        * point in going through the vector assignment right now as the
-        * activation will assign a vector which fits the destination
-        * cpumask. Let the core code store the destination mask and be
-        * done with it.
-        */
-       if (!irqd_is_activated(irqd) &&
-           (apicd->is_managed || apicd->can_reserve))
-               return IRQ_SET_MASK_OK;
+       if (WARN_ON_ONCE(!irqd_is_activated(irqd)))
+               return -EIO;
 
        raw_spin_lock(&vector_lock);
        cpumask_and(vector_searchmask, dest, cpu_online_mask);
index 4267925..c5cf336 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/sched.h>
 #include <linux/sched/clock.h>
 
+#include <asm/cpu.h>
 #include <asm/cpufeature.h>
 #include <asm/e820/api.h>
 #include <asm/mtrr.h>
index 043d93c..95c090a 100644 (file)
@@ -347,6 +347,9 @@ out:
        cr4_clear_bits(X86_CR4_UMIP);
 }
 
+/* These bits should not change their value after CPU init is finished. */
+static const unsigned long cr4_pinned_mask =
+       X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | X86_CR4_FSGSBASE;
 static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
 static unsigned long cr4_pinned_bits __ro_after_init;
 
@@ -371,20 +374,20 @@ EXPORT_SYMBOL(native_write_cr0);
 
 void native_write_cr4(unsigned long val)
 {
-       unsigned long bits_missing = 0;
+       unsigned long bits_changed = 0;
 
 set_register:
        asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits));
 
        if (static_branch_likely(&cr_pinning)) {
-               if (unlikely((val & cr4_pinned_bits) != cr4_pinned_bits)) {
-                       bits_missing = ~val & cr4_pinned_bits;
-                       val |= bits_missing;
+               if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
+                       bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits;
+                       val = (val & ~cr4_pinned_mask) | cr4_pinned_bits;
                        goto set_register;
                }
-               /* Warn after we've set the missing bits. */
-               WARN_ONCE(bits_missing, "CR4 bits went missing: %lx!?\n",
-                         bits_missing);
+               /* Warn after we've corrected the changed bits. */
+               WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n",
+                         bits_changed);
        }
 }
 #if IS_MODULE(CONFIG_LKDTM)
@@ -419,7 +422,7 @@ void cr4_init(void)
        if (boot_cpu_has(X86_FEATURE_PCID))
                cr4 |= X86_CR4_PCIDE;
        if (static_branch_likely(&cr_pinning))
-               cr4 |= cr4_pinned_bits;
+               cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits;
 
        __write_cr4(cr4);
 
@@ -434,10 +437,7 @@ void cr4_init(void)
  */
 static void __init setup_cr_pinning(void)
 {
-       unsigned long mask;
-
-       mask = (X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP);
-       cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & mask;
+       cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask;
        static_key_enable(&cr_pinning.key);
 }
 
index fb538fc..9d03369 100644 (file)
@@ -81,8 +81,4 @@ extern void update_srbds_msr(void);
 
 extern u64 x86_read_arch_cap_msr(void);
 
-#ifdef CONFIG_IA32_FEAT_CTL
-void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
-#endif
-
 #endif /* ARCH_X86_CPU_H */
index c25a67a..0ab48f1 100644 (file)
@@ -50,6 +50,13 @@ static enum split_lock_detect_state sld_state __ro_after_init = sld_off;
 static u64 msr_test_ctrl_cache __ro_after_init;
 
 /*
+ * With a name like MSR_TEST_CTL it should go without saying, but don't touch
+ * MSR_TEST_CTL unless the CPU is one of the whitelisted models.  Writing it
+ * on CPUs that do not support SLD can cause fireworks, even when writing '0'.
+ */
+static bool cpu_model_supports_sld __ro_after_init;
+
+/*
  * Processors which have self-snooping capability can handle conflicting
  * memory type across CPUs by snooping its own cache. However, there exists
  * CPU models in which having conflicting memory types still leads to
@@ -1071,7 +1078,8 @@ static void sld_update_msr(bool on)
 
 static void split_lock_init(void)
 {
-       split_lock_verify_msr(sld_state != sld_off);
+       if (cpu_model_supports_sld)
+               split_lock_verify_msr(sld_state != sld_off);
 }
 
 static void split_lock_warn(unsigned long ip)
@@ -1177,5 +1185,6 @@ void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c)
                return;
        }
 
+       cpu_model_supports_sld = true;
        split_lock_setup();
 }
index ce9120c..14e4b4d 100644 (file)
@@ -1083,7 +1083,7 @@ static noinstr bool mce_check_crashing_cpu(void)
 {
        unsigned int cpu = smp_processor_id();
 
-       if (cpu_is_offline(cpu) ||
+       if (arch_cpu_is_offline(cpu) ||
            (crashing_cpu != -1 && crashing_cpu != cpu)) {
                u64 mcgstatus;
 
@@ -1901,6 +1901,8 @@ void (*machine_check_vector)(struct pt_regs *) = unexpected_machine_check;
 
 static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
 {
+       WARN_ON_ONCE(user_mode(regs));
+
        /*
         * Only required when from kernel mode. See
         * mce_check_crashing_cpu() for details.
@@ -1954,7 +1956,7 @@ DEFINE_IDTENTRY_MCE_USER(exc_machine_check)
 }
 #else
 /* 32bit unified entry point */
-DEFINE_IDTENTRY_MCE(exc_machine_check)
+DEFINE_IDTENTRY_RAW(exc_machine_check)
 {
        unsigned long dr7;
 
index 12f967c..6a9df71 100644 (file)
@@ -981,10 +981,10 @@ void resctrl_cpu_detect(struct cpuinfo_x86 *c)
 
                c->x86_cache_max_rmid  = ecx;
                c->x86_cache_occ_scale = ebx;
-               if (c->x86_vendor == X86_VENDOR_INTEL)
-                       c->x86_cache_mbm_width_offset = eax & 0xff;
-               else
-                       c->x86_cache_mbm_width_offset = -1;
+               c->x86_cache_mbm_width_offset = eax & 0xff;
+
+               if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset)
+                       c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD;
        }
 }
 
index f20a47d..5ffa322 100644 (file)
@@ -37,6 +37,7 @@
 #define MBA_IS_LINEAR                  0x4
 #define MBA_MAX_MBPS                   U32_MAX
 #define MAX_MBA_BW_AMD                 0x800
+#define MBM_CNTR_WIDTH_OFFSET_AMD      20
 
 #define RMID_VAL_ERROR                 BIT_ULL(63)
 #define RMID_VAL_UNAVAIL               BIT_ULL(62)
index 23b4b61..3f844f1 100644 (file)
@@ -1117,6 +1117,7 @@ static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
        _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
        if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
                _r_cdp = NULL;
+               _d_cdp = NULL;
                ret = -EINVAL;
        }
 
index 300e3fd..ec8064c 100644 (file)
  */
 static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE);
 
-u32 get_umwait_control_msr(void)
-{
-       return umwait_control_cached;
-}
-EXPORT_SYMBOL_GPL(get_umwait_control_msr);
-
 /*
  * Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by
  * hardware or BIOS before kernel boot.
index df1358b..05fa4ef 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/sched.h>
 #include <linux/sched/clock.h>
 
+#include <asm/cpu.h>
 #include <asm/cpufeature.h>
 
 #include "cpu.h"
index 06c8189..15247b9 100644 (file)
@@ -101,6 +101,12 @@ void kernel_fpu_begin(void)
                copy_fpregs_to_fpstate(&current->thread.fpu);
        }
        __cpu_invalidate_fpregs_state();
+
+       if (boot_cpu_has(X86_FEATURE_XMM))
+               ldmxcsr(MXCSR_DEFAULT);
+
+       if (boot_cpu_has(X86_FEATURE_FPU))
+               asm volatile ("fninit");
 }
 EXPORT_SYMBOL_GPL(kernel_fpu_begin);
 
index 8748321..b8aee71 100644 (file)
@@ -29,6 +29,8 @@
 #include <asm/mmu_context.h>
 #include <asm/pgtable_areas.h>
 
+#include <xen/xen.h>
+
 /* This is a multiple of PAGE_SIZE. */
 #define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
 
@@ -543,6 +545,28 @@ static int read_default_ldt(void __user *ptr, unsigned long bytecount)
        return bytecount;
 }
 
+static bool allow_16bit_segments(void)
+{
+       if (!IS_ENABLED(CONFIG_X86_16BIT))
+               return false;
+
+#ifdef CONFIG_XEN_PV
+       /*
+        * Xen PV does not implement ESPFIX64, which means that 16-bit
+        * segments will not work correctly.  Until either Xen PV implements
+        * ESPFIX64 and can signal this fact to the guest or unless someone
+        * provides compelling evidence that allowing broken 16-bit segments
+        * is worthwhile, disallow 16-bit segments under Xen PV.
+        */
+       if (xen_pv_domain()) {
+               pr_info_once("Warning: 16-bit segments do not work correctly in a Xen PV guest\n");
+               return false;
+       }
+#endif
+
+       return true;
+}
+
 static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
 {
        struct mm_struct *mm = current->mm;
@@ -574,7 +598,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
                /* The user wants to clear the entry. */
                memset(&ldt, 0, sizeof(ldt));
        } else {
-               if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
+               if (!ldt_info.seg_32bit && !allow_16bit_segments()) {
                        error = -EINVAL;
                        goto out;
                }
index 2de365f..d7c5e44 100644 (file)
@@ -478,7 +478,7 @@ static DEFINE_PER_CPU(unsigned long, nmi_dr7);
 
 DEFINE_IDTENTRY_RAW(exc_nmi)
 {
-       if (IS_ENABLED(CONFIG_SMP) && cpu_is_offline(smp_processor_id()))
+       if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id()))
                return;
 
        if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
index 674a7d6..de2138b 100644 (file)
@@ -324,7 +324,8 @@ struct paravirt_patch_template pv_ops = {
        .cpu.swapgs             = native_swapgs,
 
 #ifdef CONFIG_X86_IOPL_IOPERM
-       .cpu.update_io_bitmap   = native_tss_update_io_bitmap,
+       .cpu.invalidate_io_bitmap       = native_tss_invalidate_io_bitmap,
+       .cpu.update_io_bitmap           = native_tss_update_io_bitmap,
 #endif
 
        .cpu.start_context_switch       = paravirt_nop,
index f362ce0..fe67dbd 100644 (file)
@@ -322,20 +322,6 @@ void arch_setup_new_exec(void)
 }
 
 #ifdef CONFIG_X86_IOPL_IOPERM
-static inline void tss_invalidate_io_bitmap(struct tss_struct *tss)
-{
-       /*
-        * Invalidate the I/O bitmap by moving io_bitmap_base outside the
-        * TSS limit so any subsequent I/O access from user space will
-        * trigger a #GP.
-        *
-        * This is correct even when VMEXIT rewrites the TSS limit
-        * to 0x67 as the only requirement is that the base points
-        * outside the limit.
-        */
-       tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID;
-}
-
 static inline void switch_to_bitmap(unsigned long tifp)
 {
        /*
@@ -346,7 +332,7 @@ static inline void switch_to_bitmap(unsigned long tifp)
         * user mode.
         */
        if (tifp & _TIF_IO_BITMAP)
-               tss_invalidate_io_bitmap(this_cpu_ptr(&cpu_tss_rw));
+               tss_invalidate_io_bitmap();
 }
 
 static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm)
@@ -380,7 +366,7 @@ void native_tss_update_io_bitmap(void)
        u16 *base = &tss->x86_tss.io_bitmap_base;
 
        if (!test_thread_flag(TIF_IO_BITMAP)) {
-               tss_invalidate_io_bitmap(tss);
+               native_tss_invalidate_io_bitmap();
                return;
        }
 
index f9727b9..b7cb3e0 100644 (file)
@@ -84,17 +84,16 @@ static inline void cond_local_irq_disable(struct pt_regs *regs)
                local_irq_disable();
 }
 
-int is_valid_bugaddr(unsigned long addr)
+__always_inline int is_valid_bugaddr(unsigned long addr)
 {
-       unsigned short ud;
-
        if (addr < TASK_SIZE_MAX)
                return 0;
 
-       if (get_kernel_nofault(ud, (unsigned short *)addr))
-               return 0;
-
-       return ud == INSN_UD0 || ud == INSN_UD2;
+       /*
+        * We got #UD, if the text isn't readable we'd have gotten
+        * a different exception.
+        */
+       return *(unsigned short *)addr == INSN_UD2;
 }
 
 static nokprobe_inline int
@@ -216,40 +215,45 @@ static inline void handle_invalid_op(struct pt_regs *regs)
                      ILL_ILLOPN, error_get_trap_addr(regs));
 }
 
-DEFINE_IDTENTRY_RAW(exc_invalid_op)
+static noinstr bool handle_bug(struct pt_regs *regs)
 {
-       bool rcu_exit;
+       bool handled = false;
+
+       if (!is_valid_bugaddr(regs->ip))
+               return handled;
 
        /*
-        * Handle BUG/WARN like NMIs instead of like normal idtentries:
-        * if we bugged/warned in a bad RCU context, for example, the last
-        * thing we want is to BUG/WARN again in the idtentry code, ad
-        * infinitum.
+        * All lies, just get the WARN/BUG out.
+        */
+       instrumentation_begin();
+       /*
+        * Since we're emulating a CALL with exceptions, restore the interrupt
+        * state to what it was at the exception site.
         */
-       if (!user_mode(regs) && is_valid_bugaddr(regs->ip)) {
-               enum bug_trap_type type;
+       if (regs->flags & X86_EFLAGS_IF)
+               raw_local_irq_enable();
+       if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
+               regs->ip += LEN_UD2;
+               handled = true;
+       }
+       if (regs->flags & X86_EFLAGS_IF)
+               raw_local_irq_disable();
+       instrumentation_end();
 
-               nmi_enter();
-               instrumentation_begin();
-               trace_hardirqs_off_finish();
-               type = report_bug(regs->ip, regs);
-               if (regs->flags & X86_EFLAGS_IF)
-                       trace_hardirqs_on_prepare();
-               instrumentation_end();
-               nmi_exit();
+       return handled;
+}
 
-               if (type == BUG_TRAP_TYPE_WARN) {
-                       /* Skip the ud2. */
-                       regs->ip += LEN_UD2;
-                       return;
-               }
+DEFINE_IDTENTRY_RAW(exc_invalid_op)
+{
+       bool rcu_exit;
 
-               /*
-                * Else, if this was a BUG and report_bug returns or if this
-                * was just a normal #UD, we want to continue onward and
-                * crash.
-                */
-       }
+       /*
+        * We use UD2 as a short encoding for 'CALL __WARN', as such
+        * handle it before exception entry to avoid recursive WARN
+        * in case exception entry is the one triggering WARNs.
+        */
+       if (!user_mode(regs) && handle_bug(regs))
+               return;
 
        rcu_exit = idtentry_enter_cond_rcu(regs);
        instrumentation_begin();
@@ -299,6 +303,8 @@ DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check)
 
        do_trap(X86_TRAP_AC, SIGBUS, "alignment check", regs,
                error_code, BUS_ADRALN, NULL);
+
+       local_irq_disable();
 }
 
 #ifdef CONFIG_VMAP_STACK
@@ -691,13 +697,13 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
                (struct bad_iret_stack *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
 
        /* Copy the IRET target to the temporary storage. */
-       memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8);
+       __memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8);
 
        /* Copy the remainder of the stack from the current stack. */
-       memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip));
+       __memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip));
 
        /* Update the entry stack */
-       memcpy(new_stack, &tmp, sizeof(tmp));
+       __memcpy(new_stack, &tmp, sizeof(tmp));
 
        BUG_ON(!user_mode(&new_stack->regs));
        return new_stack;
@@ -866,6 +872,12 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
        trace_hardirqs_off_finish();
 
        /*
+        * If something gets miswired and we end up here for a user mode
+        * #DB, we will malfunction.
+        */
+       WARN_ON_ONCE(user_mode(regs));
+
+       /*
         * Catch SYSENTER with TF set and clear DR_STEP. If this hit a
         * watchpoint at the same time then that will still be handled.
         */
@@ -883,6 +895,12 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
 static __always_inline void exc_debug_user(struct pt_regs *regs,
                                           unsigned long dr6)
 {
+       /*
+        * If something gets miswired and we end up here for a kernel mode
+        * #DB, we will malfunction.
+        */
+       WARN_ON_ONCE(!user_mode(regs));
+
        idtentry_enter_user(regs);
        instrumentation_begin();
 
@@ -913,7 +931,7 @@ DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
 }
 #else
 /* 32 bit does not have separate entry points. */
-DEFINE_IDTENTRY_DEBUG(exc_debug)
+DEFINE_IDTENTRY_RAW(exc_debug)
 {
        unsigned long dr6, dr7;
 
index ff2d0e9..cfe83d4 100644 (file)
@@ -7,7 +7,7 @@
 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
 #define KVM_POSSIBLE_CR4_GUEST_BITS                              \
        (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
-        | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE)
+        | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE | X86_CR4_TSD)
 
 #define BUILD_KVM_GPR_ACCESSORS(lname, uname)                                \
 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
index 34a7e05..5bf72fc 100644 (file)
@@ -169,6 +169,18 @@ static void kvm_apic_map_free(struct rcu_head *rcu)
        kvfree(map);
 }
 
+/*
+ * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
+ *
+ * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
+ * apic_map_lock_held.
+ */
+enum {
+       CLEAN,
+       UPDATE_IN_PROGRESS,
+       DIRTY
+};
+
 void kvm_recalculate_apic_map(struct kvm *kvm)
 {
        struct kvm_apic_map *new, *old = NULL;
@@ -176,17 +188,17 @@ void kvm_recalculate_apic_map(struct kvm *kvm)
        int i;
        u32 max_id = 255; /* enough space for any xAPIC ID */
 
-       if (!kvm->arch.apic_map_dirty) {
-               /*
-                * Read kvm->arch.apic_map_dirty before
-                * kvm->arch.apic_map
-                */
-               smp_rmb();
+       /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map.  */
+       if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
                return;
-       }
 
        mutex_lock(&kvm->arch.apic_map_lock);
-       if (!kvm->arch.apic_map_dirty) {
+       /*
+        * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map
+        * (if clean) or the APIC registers (if dirty).
+        */
+       if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
+                                  DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
                /* Someone else has updated the map. */
                mutex_unlock(&kvm->arch.apic_map_lock);
                return;
@@ -256,11 +268,11 @@ out:
                        lockdep_is_held(&kvm->arch.apic_map_lock));
        rcu_assign_pointer(kvm->arch.apic_map, new);
        /*
-        * Write kvm->arch.apic_map before
-        * clearing apic->apic_map_dirty
+        * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
+        * If another update has come in, leave it DIRTY.
         */
-       smp_wmb();
-       kvm->arch.apic_map_dirty = false;
+       atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
+                              UPDATE_IN_PROGRESS, CLEAN);
        mutex_unlock(&kvm->arch.apic_map_lock);
 
        if (old)
@@ -282,20 +294,20 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
                else
                        static_key_slow_inc(&apic_sw_disabled.key);
 
-               apic->vcpu->kvm->arch.apic_map_dirty = true;
+               atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
        }
 }
 
 static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
 {
        kvm_lapic_set_reg(apic, APIC_ID, id << 24);
-       apic->vcpu->kvm->arch.apic_map_dirty = true;
+       atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
 }
 
 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
 {
        kvm_lapic_set_reg(apic, APIC_LDR, id);
-       apic->vcpu->kvm->arch.apic_map_dirty = true;
+       atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
 }
 
 static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
@@ -311,7 +323,7 @@ static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
 
        kvm_lapic_set_reg(apic, APIC_ID, id);
        kvm_lapic_set_reg(apic, APIC_LDR, ldr);
-       apic->vcpu->kvm->arch.apic_map_dirty = true;
+       atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
 }
 
 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
@@ -1976,7 +1988,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
        case APIC_DFR:
                if (!apic_x2apic_mode(apic)) {
                        kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
-                       apic->vcpu->kvm->arch.apic_map_dirty = true;
+                       atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
                } else
                        ret = 1;
                break;
@@ -2232,7 +2244,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
                        static_key_slow_dec_deferred(&apic_hw_disabled);
                } else {
                        static_key_slow_inc(&apic_hw_disabled.key);
-                       vcpu->kvm->arch.apic_map_dirty = true;
+                       atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
                }
        }
 
@@ -2273,7 +2285,6 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
        if (!apic)
                return;
 
-       vcpu->kvm->arch.apic_map_dirty = false;
        /* Stop the timer in case it's a reset to an active apic */
        hrtimer_cancel(&apic->lapic_timer.timer);
 
@@ -2567,6 +2578,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
        }
        memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
 
+       atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
        kvm_recalculate_apic_map(vcpu->kvm);
        kvm_apic_set_version(vcpu);
 
index 0ad06bf..444bb9c 100644 (file)
@@ -222,7 +222,7 @@ void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
                                    struct kvm_memory_slot *slot, u64 gfn);
-int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
+int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
 
 int kvm_mmu_post_init_vm(struct kvm *kvm);
 void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
index fdd05c2..6d6a0ae 100644 (file)
@@ -1745,10 +1745,10 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
  * Emulate arch specific page modification logging for the
  * nested hypervisor
  */
-int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu)
+int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa)
 {
        if (kvm_x86_ops.write_log_dirty)
-               return kvm_x86_ops.write_log_dirty(vcpu);
+               return kvm_x86_ops.write_log_dirty(vcpu, l2_gpa);
 
        return 0;
 }
@@ -4449,7 +4449,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
                        nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
                        rsvd_bits(maxphyaddr, 51);
                rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd |
-                       nonleaf_bit8_rsvd | gbpages_bit_rsvd |
+                       gbpages_bit_rsvd |
                        rsvd_bits(maxphyaddr, 51);
                rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
                        rsvd_bits(maxphyaddr, 51);
index a6d484e..bd70ece 100644 (file)
@@ -235,7 +235,7 @@ static inline unsigned FNAME(gpte_access)(u64 gpte)
 static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
                                             struct kvm_mmu *mmu,
                                             struct guest_walker *walker,
-                                            int write_fault)
+                                            gpa_t addr, int write_fault)
 {
        unsigned level, index;
        pt_element_t pte, orig_pte;
@@ -260,7 +260,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
                                !(pte & PT_GUEST_DIRTY_MASK)) {
                        trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
 #if PTTYPE == PTTYPE_EPT
-                       if (kvm_arch_write_log_dirty(vcpu))
+                       if (kvm_arch_write_log_dirty(vcpu, addr))
                                return -EINVAL;
 #endif
                        pte |= PT_GUEST_DIRTY_MASK;
@@ -360,7 +360,6 @@ retry_walk:
        ++walker->level;
 
        do {
-               gfn_t real_gfn;
                unsigned long host_addr;
 
                pt_access = pte_access;
@@ -375,7 +374,7 @@ retry_walk:
                walker->table_gfn[walker->level - 1] = table_gfn;
                walker->pte_gpa[walker->level - 1] = pte_gpa;
 
-               real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
+               real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
                                              nested_access,
                                              &walker->fault);
 
@@ -389,12 +388,10 @@ retry_walk:
                 * information to fix the exit_qualification or exit_info_1
                 * fields.
                 */
-               if (unlikely(real_gfn == UNMAPPED_GVA))
+               if (unlikely(real_gpa == UNMAPPED_GVA))
                        return 0;
 
-               real_gfn = gpa_to_gfn(real_gfn);
-
-               host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, real_gfn,
+               host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gpa_to_gfn(real_gpa),
                                            &walker->pte_writable[walker->level - 1]);
                if (unlikely(kvm_is_error_hva(host_addr)))
                        goto error;
@@ -457,7 +454,8 @@ retry_walk:
                        (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
 
        if (unlikely(!accessed_dirty)) {
-               ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
+               ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker,
+                                                       addr, write_fault);
                if (unlikely(ret < 0))
                        goto error;
                else if (ret)
index 8ccfa41..c0da4dd 100644 (file)
@@ -3344,7 +3344,7 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
 
 void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
 
-static fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
+static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
 {
        fastpath_t exit_fastpath;
        struct vcpu_svm *svm = to_svm(vcpu);
index d1af20b..d4a4cec 100644 (file)
@@ -4109,7 +4109,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
         * CR0_GUEST_HOST_MASK is already set in the original vmcs01
         * (KVM doesn't change it);
         */
-       vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
+       vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
        vmx_set_cr0(vcpu, vmcs12->host_cr0);
 
        /* Same as above - no reason to call set_cr4_guest_host_mask().  */
@@ -4259,7 +4259,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
         */
        vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
 
-       vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
+       vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
        vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
 
        vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
@@ -6176,6 +6176,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
                        goto error_guest_mode;
        }
 
+       vmx->nested.has_preemption_timer_deadline = false;
        if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) {
                vmx->nested.has_preemption_timer_deadline = true;
                vmx->nested.preemption_timer_deadline =
index 5c0ff80..7a3675f 100644 (file)
@@ -72,11 +72,24 @@ struct loaded_vmcs {
        struct vmcs_controls_shadow controls_shadow;
 };
 
+static inline bool is_intr_type(u32 intr_info, u32 type)
+{
+       const u32 mask = INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK;
+
+       return (intr_info & mask) == (INTR_INFO_VALID_MASK | type);
+}
+
+static inline bool is_intr_type_n(u32 intr_info, u32 type, u8 vector)
+{
+       const u32 mask = INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK |
+                        INTR_INFO_VECTOR_MASK;
+
+       return (intr_info & mask) == (INTR_INFO_VALID_MASK | type | vector);
+}
+
 static inline bool is_exception_n(u32 intr_info, u8 vector)
 {
-       return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
-                            INTR_INFO_VALID_MASK)) ==
-               (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK);
+       return is_intr_type_n(intr_info, INTR_TYPE_HARD_EXCEPTION, vector);
 }
 
 static inline bool is_debug(u32 intr_info)
@@ -106,28 +119,23 @@ static inline bool is_gp_fault(u32 intr_info)
 
 static inline bool is_machine_check(u32 intr_info)
 {
-       return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
-                            INTR_INFO_VALID_MASK)) ==
-               (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
+       return is_exception_n(intr_info, MC_VECTOR);
 }
 
 /* Undocumented: icebp/int1 */
 static inline bool is_icebp(u32 intr_info)
 {
-       return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
-               == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK);
+       return is_intr_type(intr_info, INTR_TYPE_PRIV_SW_EXCEPTION);
 }
 
 static inline bool is_nmi(u32 intr_info)
 {
-       return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
-               == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
+       return is_intr_type(intr_info, INTR_TYPE_NMI_INTR);
 }
 
 static inline bool is_external_intr(u32 intr_info)
 {
-       return (intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK))
-               == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR);
+       return is_intr_type(intr_info, INTR_TYPE_EXT_INTR);
 }
 
 enum vmcs_field_width {
index 36c7717..13745f2 100644 (file)
@@ -133,9 +133,6 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
 #define KVM_VM_CR0_ALWAYS_ON                           \
        (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST |      \
         X86_CR0_WP | X86_CR0_PG | X86_CR0_PE)
-#define KVM_CR4_GUEST_OWNED_BITS                                     \
-       (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
-        | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
 
 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
@@ -4034,9 +4031,9 @@ void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
 
 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
 {
-       vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
-       if (enable_ept)
-               vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
+       vmx->vcpu.arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS;
+       if (!enable_ept)
+               vmx->vcpu.arch.cr4_guest_owned_bits &= ~X86_CR4_PGE;
        if (is_guest_mode(&vmx->vcpu))
                vmx->vcpu.arch.cr4_guest_owned_bits &=
                        ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
@@ -4333,8 +4330,8 @@ static void init_vmcs(struct vcpu_vmx *vmx)
        /* 22.2.1, 20.8.1 */
        vm_entry_controls_set(vmx, vmx_vmentry_ctrl());
 
-       vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS;
-       vmcs_writel(CR0_GUEST_HOST_MASK, ~X86_CR0_TS);
+       vmx->vcpu.arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
+       vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
 
        set_cr4_guest_host_mask(vmx);
 
@@ -6606,23 +6603,6 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
                                        msrs[i].host, false);
 }
 
-static void atomic_switch_umwait_control_msr(struct vcpu_vmx *vmx)
-{
-       u32 host_umwait_control;
-
-       if (!vmx_has_waitpkg(vmx))
-               return;
-
-       host_umwait_control = get_umwait_control_msr();
-
-       if (vmx->msr_ia32_umwait_control != host_umwait_control)
-               add_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL,
-                       vmx->msr_ia32_umwait_control,
-                       host_umwait_control, false);
-       else
-               clear_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL);
-}
-
 static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6728,9 +6708,7 @@ reenter_guest:
 
        pt_guest_enter(vmx);
 
-       if (vcpu_to_pmu(vcpu)->version)
-               atomic_switch_perf_msrs(vmx);
-       atomic_switch_umwait_control_msr(vmx);
+       atomic_switch_perf_msrs(vmx);
 
        if (enable_preemption_timer)
                vmx_update_hv_timer(vcpu);
@@ -7501,11 +7479,11 @@ static void vmx_flush_log_dirty(struct kvm *kvm)
        kvm_flush_pml_buffers(kvm);
 }
 
-static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
+static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
 {
        struct vmcs12 *vmcs12;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       gpa_t gpa, dst;
+       gpa_t dst;
 
        if (is_guest_mode(vcpu)) {
                WARN_ON_ONCE(vmx->nested.pml_full);
@@ -7524,7 +7502,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
                        return 1;
                }
 
-               gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
+               gpa &= ~0xFFFull;
                dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
 
                if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
index 8a83b5e..639798e 100644 (file)
@@ -288,8 +288,6 @@ struct vcpu_vmx {
 
        u64 current_tsc_ratio;
 
-       u32 host_pkru;
-
        unsigned long host_debugctlmsr;
 
        /*
index 00c88c2..88c593f 100644 (file)
@@ -975,6 +975,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
        if (is_long_mode(vcpu)) {
                if (!(cr4 & X86_CR4_PAE))
                        return 1;
+               if ((cr4 ^ old_cr4) & X86_CR4_LA57)
+                       return 1;
        } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
                   && ((cr4 ^ old_cr4) & pdptr_bits)
                   && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
@@ -2693,6 +2695,9 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
        if (data & 0x30)
                return 1;
 
+       if (!lapic_in_kernel(vcpu))
+               return 1;
+
        vcpu->arch.apf.msr_en_val = data;
 
        if (!kvm_pv_async_pf_enabled(vcpu)) {
@@ -2856,7 +2861,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                return kvm_mtrr_set_msr(vcpu, msr, data);
        case MSR_IA32_APICBASE:
                return kvm_set_apic_base(vcpu, msr_info);
-       case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
+       case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
                return kvm_x2apic_msr_write(vcpu, msr, data);
        case MSR_IA32_TSCDEADLINE:
                kvm_set_lapic_tscdeadline_msr(vcpu, data);
@@ -3196,7 +3201,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_IA32_APICBASE:
                msr_info->data = kvm_get_apic_base(vcpu);
                break;
-       case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
+       case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
                return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
        case MSR_IA32_TSCDEADLINE:
                msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
@@ -4603,7 +4608,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                r = -EINVAL;
                user_tsc_khz = (u32)arg;
 
-               if (user_tsc_khz >= kvm_max_guest_tsc_khz)
+               if (kvm_has_tsc_control &&
+                   user_tsc_khz >= kvm_max_guest_tsc_khz)
                        goto out;
 
                if (user_tsc_khz == 0)
index 56b243b..bbcc05b 100644 (file)
@@ -8,6 +8,8 @@
 #include <asm/alternative-asm.h>
 #include <asm/export.h>
 
+.pushsection .noinstr.text, "ax"
+
 /*
  * We build a jump to memcpy_orig by default which gets NOPped out on
  * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
@@ -184,6 +186,8 @@ SYM_FUNC_START_LOCAL(memcpy_orig)
        retq
 SYM_FUNC_END(memcpy_orig)
 
+.popsection
+
 #ifndef CONFIG_UML
 
 MCSAFE_TEST_CTL
index fff28c6..b0dfac3 100644 (file)
@@ -24,6 +24,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
        asm volatile(
                "       testq  %[size8],%[size8]\n"
                "       jz     4f\n"
+               "       .align 16\n"
                "0:     movq $0,(%[dst])\n"
                "       addq   $8,%[dst]\n"
                "       decl %%ecx ; jnz   0b\n"
index 3b2b581..40526dd 100644 (file)
@@ -209,7 +209,7 @@ sqrt_stage_2_finish:
 
 #ifdef PARANOID
 /* It should be possible to get here only if the arg is ffff....ffff */
-       cmp     $0xffffffff,FPU_fsqrt_arg_1
+       cmpl    $0xffffffff,FPU_fsqrt_arg_1
        jnz     sqrt_stage_2_error
 #endif /* PARANOID */
 
index fc13cbb..abb6075 100644 (file)
@@ -167,9 +167,10 @@ static struct irq_domain *uv_get_irq_domain(void)
                goto out;
 
        uv_domain = irq_domain_create_tree(fn, &uv_domain_ops, NULL);
-       irq_domain_free_fwnode(fn);
        if (uv_domain)
                uv_domain->parent = x86_vector_domain;
+       else
+               irq_domain_free_fwnode(fn);
 out:
        mutex_unlock(&uv_lock);
 
index 7c65102..db1378c 100644 (file)
@@ -193,6 +193,8 @@ static void fix_processor_context(void)
  */
 static void notrace __restore_processor_state(struct saved_context *ctxt)
 {
+       struct cpuinfo_x86 *c;
+
        if (ctxt->misc_enable_saved)
                wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
        /*
@@ -263,6 +265,10 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
        mtrr_bp_restore();
        perf_restore_debug_store();
        msr_restore_context(ctxt);
+
+       c = &cpu_data(smp_processor_id());
+       if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))
+               init_ia32_feat_ctl(c);
 }
 
 /* Needed by apm.c */
index acc49fa..c46b9f2 100644 (file)
@@ -598,6 +598,26 @@ static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
 }
 
 #ifdef CONFIG_X86_64
+void noist_exc_debug(struct pt_regs *regs);
+
+DEFINE_IDTENTRY_RAW(xenpv_exc_nmi)
+{
+       /* On Xen PV, NMI doesn't use IST.  The C part is the sane as native. */
+       exc_nmi(regs);
+}
+
+DEFINE_IDTENTRY_RAW(xenpv_exc_debug)
+{
+       /*
+        * There's no IST on Xen PV, but we still need to dispatch
+        * to the correct handler.
+        */
+       if (user_mode(regs))
+               noist_exc_debug(regs);
+       else
+               exc_debug(regs);
+}
+
 struct trap_array_entry {
        void (*orig)(void);
        void (*xen)(void);
@@ -609,18 +629,18 @@ struct trap_array_entry {
        .xen            = xen_asm_##func,               \
        .ist_okay       = ist_ok }
 
-#define TRAP_ENTRY_REDIR(func, xenfunc, ist_ok) {      \
+#define TRAP_ENTRY_REDIR(func, ist_ok) {               \
        .orig           = asm_##func,                   \
-       .xen            = xen_asm_##xenfunc,            \
+       .xen            = xen_asm_xenpv_##func,         \
        .ist_okay       = ist_ok }
 
 static struct trap_array_entry trap_array[] = {
-       TRAP_ENTRY_REDIR(exc_debug, exc_xendebug,       true  ),
+       TRAP_ENTRY_REDIR(exc_debug,                     true  ),
        TRAP_ENTRY(exc_double_fault,                    true  ),
 #ifdef CONFIG_X86_MCE
        TRAP_ENTRY(exc_machine_check,                   true  ),
 #endif
-       TRAP_ENTRY_REDIR(exc_nmi, exc_xennmi,           true  ),
+       TRAP_ENTRY_REDIR(exc_nmi,                       true  ),
        TRAP_ENTRY(exc_int3,                            false ),
        TRAP_ENTRY(exc_overflow,                        false ),
 #ifdef CONFIG_IA32_EMULATION
@@ -850,6 +870,17 @@ static void xen_load_sp0(unsigned long sp0)
 }
 
 #ifdef CONFIG_X86_IOPL_IOPERM
+static void xen_invalidate_io_bitmap(void)
+{
+       struct physdev_set_iobitmap iobitmap = {
+               .bitmap = 0,
+               .nr_ports = 0,
+       };
+
+       native_tss_invalidate_io_bitmap();
+       HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &iobitmap);
+}
+
 static void xen_update_io_bitmap(void)
 {
        struct physdev_set_iobitmap iobitmap;
@@ -1079,6 +1110,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
        .load_sp0 = xen_load_sp0,
 
 #ifdef CONFIG_X86_IOPL_IOPERM
+       .invalidate_io_bitmap = xen_invalidate_io_bitmap,
        .update_io_bitmap = xen_update_io_bitmap,
 #endif
        .io_delay = xen_io_delay,
index 5d252aa..aab1d99 100644 (file)
@@ -29,10 +29,9 @@ _ASM_NOKPROBE(xen_\name)
 .endm
 
 xen_pv_trap asm_exc_divide_error
-xen_pv_trap asm_exc_debug
-xen_pv_trap asm_exc_xendebug
+xen_pv_trap asm_xenpv_exc_debug
 xen_pv_trap asm_exc_int3
-xen_pv_trap asm_exc_xennmi
+xen_pv_trap asm_xenpv_exc_nmi
 xen_pv_trap asm_exc_overflow
 xen_pv_trap asm_exc_bounds
 xen_pv_trap asm_exc_invalid_op
@@ -161,10 +160,22 @@ SYM_FUNC_END(xen_syscall32_target)
 
 /* 32-bit compat sysenter target */
 SYM_FUNC_START(xen_sysenter_target)
-       mov 0*8(%rsp), %rcx
-       mov 1*8(%rsp), %r11
-       mov 5*8(%rsp), %rsp
-       jmp entry_SYSENTER_compat
+       /*
+        * NB: Xen is polite and clears TF from EFLAGS for us.  This means
+        * that we don't need to guard against single step exceptions here.
+        */
+       popq %rcx
+       popq %r11
+
+       /*
+        * Neither Xen nor the kernel really knows what the old SS and
+        * CS were.  The kernel expects __USER32_DS and __USER32_CS, so
+        * report those values even though Xen will guess its own values.
+        */
+       movq $__USER32_DS, 4*8(%rsp)
+       movq $__USER32_CS, 1*8(%rsp)
+
+       jmp entry_SYSENTER_compat_after_hwframe
 SYM_FUNC_END(xen_sysenter_target)
 
 #else /* !CONFIG_IA32_EMULATION */
index 9bae79f..99fcd63 100644 (file)
@@ -362,9 +362,7 @@ irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id)
        struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
        unsigned i;
 
-       for (i = find_first_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS);
-            i < XCHAL_NUM_PERF_COUNTERS;
-            i = find_next_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS, i + 1)) {
+       for_each_set_bit(i, ev->used_mask, XCHAL_NUM_PERF_COUNTERS) {
                uint32_t v = get_er(XTENSA_PMU_PMSTAT(i));
                struct perf_event *event = ev->event[i];
                struct hw_perf_event *hwc = &event->hw;
index d9204dc..be2c78f 100644 (file)
@@ -724,7 +724,8 @@ c_start(struct seq_file *f, loff_t *pos)
 static void *
 c_next(struct seq_file *f, void *v, loff_t *pos)
 {
-       return NULL;
+       ++*pos;
+       return c_start(f, pos);
 }
 
 static void
index 4092555..24cf697 100644 (file)
@@ -87,13 +87,13 @@ void __xtensa_libgcc_window_spill(void)
 }
 EXPORT_SYMBOL(__xtensa_libgcc_window_spill);
 
-unsigned long __sync_fetch_and_and_4(unsigned long *p, unsigned long v)
+unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v)
 {
        BUG();
 }
 EXPORT_SYMBOL(__sync_fetch_and_and_4);
 
-unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v)
+unsigned int __sync_fetch_and_or_4(volatile void *p, unsigned int v)
 {
        BUG();
 }
index 23632a3..9ffd7e2 100644 (file)
@@ -24,6 +24,19 @@ void blk_flush_integrity(void)
        flush_workqueue(kintegrityd_wq);
 }
 
+static void __bio_integrity_free(struct bio_set *bs,
+                                struct bio_integrity_payload *bip)
+{
+       if (bs && mempool_initialized(&bs->bio_integrity_pool)) {
+               if (bip->bip_vec)
+                       bvec_free(&bs->bvec_integrity_pool, bip->bip_vec,
+                                 bip->bip_slab);
+               mempool_free(bip, &bs->bio_integrity_pool);
+       } else {
+               kfree(bip);
+       }
+}
+
 /**
  * bio_integrity_alloc - Allocate integrity payload and attach it to bio
  * @bio:       bio to attach integrity metadata to
@@ -78,7 +91,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
 
        return bip;
 err:
-       mempool_free(bip, &bs->bio_integrity_pool);
+       __bio_integrity_free(bs, bip);
        return ERR_PTR(-ENOMEM);
 }
 EXPORT_SYMBOL(bio_integrity_alloc);
@@ -99,14 +112,7 @@ void bio_integrity_free(struct bio *bio)
                kfree(page_address(bip->bip_vec->bv_page) +
                      bip->bip_vec->bv_offset);
 
-       if (bs && mempool_initialized(&bs->bio_integrity_pool)) {
-               bvec_free(&bs->bvec_integrity_pool, bip->bip_vec, bip->bip_slab);
-
-               mempool_free(bip, &bs->bio_integrity_pool);
-       } else {
-               kfree(bip);
-       }
-
+       __bio_integrity_free(bs, bip);
        bio->bi_integrity = NULL;
        bio->bi_opf &= ~REQ_INTEGRITY;
 }
index 15df3a3..e0b2bc1 100644 (file)
@@ -125,6 +125,9 @@ static const char *const blk_queue_flag_name[] = {
        QUEUE_FLAG_NAME(REGISTERED),
        QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
        QUEUE_FLAG_NAME(QUIESCED),
+       QUEUE_FLAG_NAME(PCI_P2PDMA),
+       QUEUE_FLAG_NAME(ZONE_RESETALL),
+       QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
 };
 #undef QUEUE_FLAG_NAME
 
index a9aa6d1..4e0d173 100644 (file)
@@ -828,10 +828,10 @@ static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
                               void *priv, bool reserved)
 {
        /*
-        * If we find a request that is inflight and the queue matches,
+        * If we find a request that isn't idle and the queue matches,
         * we know the queue is busy. Return false to stop the iteration.
         */
-       if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) {
+       if (blk_mq_request_started(rq) && rq->q == hctx->queue) {
                bool *busy = priv;
 
                *busy = true;
index c2ef41b..35abcb1 100644 (file)
@@ -374,8 +374,7 @@ void blk_ksm_destroy(struct blk_keyslot_manager *ksm)
        if (!ksm)
                return;
        kvfree(ksm->slot_hashtable);
-       memzero_explicit(ksm->slots, sizeof(ksm->slots[0]) * ksm->num_slots);
-       kvfree(ksm->slots);
+       kvfree_sensitive(ksm->slots, sizeof(ksm->slots[0]) * ksm->num_slots);
        memzero_explicit(ksm, sizeof(*ksm));
 }
 EXPORT_SYMBOL_GPL(blk_ksm_destroy);
index b1cd353..28fc323 100644 (file)
@@ -128,21 +128,15 @@ EXPORT_SYMBOL_GPL(af_alg_release);
 void af_alg_release_parent(struct sock *sk)
 {
        struct alg_sock *ask = alg_sk(sk);
-       unsigned int nokey = ask->nokey_refcnt;
-       bool last = nokey && !ask->refcnt;
+       unsigned int nokey = atomic_read(&ask->nokey_refcnt);
 
        sk = ask->parent;
        ask = alg_sk(sk);
 
-       local_bh_disable();
-       bh_lock_sock(sk);
-       ask->nokey_refcnt -= nokey;
-       if (!last)
-               last = !--ask->refcnt;
-       bh_unlock_sock(sk);
-       local_bh_enable();
+       if (nokey)
+               atomic_dec(&ask->nokey_refcnt);
 
-       if (last)
+       if (atomic_dec_and_test(&ask->refcnt))
                sock_put(sk);
 }
 EXPORT_SYMBOL_GPL(af_alg_release_parent);
@@ -187,7 +181,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 
        err = -EBUSY;
        lock_sock(sk);
-       if (ask->refcnt | ask->nokey_refcnt)
+       if (atomic_read(&ask->refcnt))
                goto unlock;
 
        swap(ask->type, type);
@@ -236,7 +230,7 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
        int err = -EBUSY;
 
        lock_sock(sk);
-       if (ask->refcnt)
+       if (atomic_read(&ask->refcnt) != atomic_read(&ask->nokey_refcnt))
                goto unlock;
 
        type = ask->type;
@@ -301,12 +295,14 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
        if (err)
                goto unlock;
 
-       if (nokey || !ask->refcnt++)
+       if (atomic_inc_return_relaxed(&ask->refcnt) == 1)
                sock_hold(sk);
-       ask->nokey_refcnt += nokey;
+       if (nokey) {
+               atomic_inc(&ask->nokey_refcnt);
+               atomic_set(&alg_sk(sk2)->nokey_refcnt, 1);
+       }
        alg_sk(sk2)->parent = sk;
        alg_sk(sk2)->type = type;
-       alg_sk(sk2)->nokey_refcnt = nokey;
 
        newsock->ops = type->ops;
        newsock->state = SS_CONNECTED;
index eb1910b..0ae000a 100644 (file)
@@ -384,7 +384,7 @@ static int aead_check_key(struct socket *sock)
        struct alg_sock *ask = alg_sk(sk);
 
        lock_sock(sk);
-       if (ask->refcnt)
+       if (!atomic_read(&ask->nokey_refcnt))
                goto unlock_child;
 
        psk = ask->parent;
@@ -396,11 +396,8 @@ static int aead_check_key(struct socket *sock)
        if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
                goto unlock;
 
-       if (!pask->refcnt++)
-               sock_hold(psk);
-
-       ask->refcnt = 1;
-       sock_put(psk);
+       atomic_dec(&pask->nokey_refcnt);
+       atomic_set(&ask->nokey_refcnt, 0);
 
        err = 0;
 
index da1ffa4..e71727c 100644 (file)
@@ -301,7 +301,7 @@ static int hash_check_key(struct socket *sock)
        struct alg_sock *ask = alg_sk(sk);
 
        lock_sock(sk);
-       if (ask->refcnt)
+       if (!atomic_read(&ask->nokey_refcnt))
                goto unlock_child;
 
        psk = ask->parent;
@@ -313,11 +313,8 @@ static int hash_check_key(struct socket *sock)
        if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
                goto unlock;
 
-       if (!pask->refcnt++)
-               sock_hold(psk);
-
-       ask->refcnt = 1;
-       sock_put(psk);
+       atomic_dec(&pask->nokey_refcnt);
+       atomic_set(&ask->nokey_refcnt, 0);
 
        err = 0;
 
index 4c3bdff..ec5567c 100644 (file)
@@ -211,7 +211,7 @@ static int skcipher_check_key(struct socket *sock)
        struct alg_sock *ask = alg_sk(sk);
 
        lock_sock(sk);
-       if (ask->refcnt)
+       if (!atomic_read(&ask->nokey_refcnt))
                goto unlock_child;
 
        psk = ask->parent;
@@ -223,11 +223,8 @@ static int skcipher_check_key(struct socket *sock)
        if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
                goto unlock;
 
-       if (!pask->refcnt++)
-               sock_hold(psk);
-
-       ask->refcnt = 1;
-       sock_put(psk);
+       atomic_dec(&pask->nokey_refcnt);
+       atomic_set(&ask->nokey_refcnt, 0);
 
        err = 0;
 
index d7f43d4..e5fae4e 100644 (file)
@@ -119,6 +119,7 @@ static int software_key_query(const struct kernel_pkey_params *params,
        if (IS_ERR(tfm))
                return PTR_ERR(tfm);
 
+       ret = -ENOMEM;
        key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
                      GFP_KERNEL);
        if (!key)
index ece8c1a..88c8af4 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/configfs.h>
 #include <linux/acpi.h>
+#include <linux/security.h>
 
 #include "acpica/accommon.h"
 #include "acpica/actables.h"
@@ -28,7 +29,10 @@ static ssize_t acpi_table_aml_write(struct config_item *cfg,
 {
        const struct acpi_table_header *header = data;
        struct acpi_table *table;
-       int ret;
+       int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
+
+       if (ret)
+               return ret;
 
        table = container_of(cfg, struct acpi_table, cfg);
 
index 5fab7e3..92b996a 100644 (file)
@@ -228,6 +228,7 @@ static const struct acpi_device_id int3407_device_ids[] = {
        {"INT3407", 0},
        {"INT3532", 0},
        {"INTC1047", 0},
+       {"INTC1050", 0},
        {"", 0},
 };
 MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
index 873e039..6287338 100644 (file)
@@ -25,8 +25,8 @@ static int acpi_fan_remove(struct platform_device *pdev);
 
 static const struct acpi_device_id fan_device_ids[] = {
        {"PNP0C0B", 0},
-       {"INT1044", 0},
        {"INT3404", 0},
+       {"INTC1044", 0},
        {"", 0},
 };
 MODULE_DEVICE_TABLE(acpi, fan_device_ids);
index 3a89909..76c668c 100644 (file)
@@ -938,13 +938,13 @@ static void __exit interrupt_stats_exit(void)
 }
 
 static ssize_t
-acpi_show_profile(struct device *dev, struct device_attribute *attr,
+acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr,
                  char *buf)
 {
        return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
 }
 
-static const struct device_attribute pm_profile_attr =
+static const struct kobj_attribute pm_profile_attr =
        __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
 
 static ssize_t hotplug_enabled_show(struct kobject *kobj,
index e47c8a4..f50c5f1 100644 (file)
@@ -4686,8 +4686,15 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
 
 static void binder_free_proc(struct binder_proc *proc)
 {
+       struct binder_device *device;
+
        BUG_ON(!list_empty(&proc->todo));
        BUG_ON(!list_empty(&proc->delivered_death));
+       device = container_of(proc->context, struct binder_device, context);
+       if (refcount_dec_and_test(&device->ref)) {
+               kfree(proc->context->name);
+               kfree(device);
+       }
        binder_alloc_deferred_release(&proc->alloc);
        put_task_struct(proc->tsk);
        binder_stats_deleted(BINDER_STAT_PROC);
@@ -5406,7 +5413,6 @@ static int binder_node_release(struct binder_node *node, int refs)
 static void binder_deferred_release(struct binder_proc *proc)
 {
        struct binder_context *context = proc->context;
-       struct binder_device *device;
        struct rb_node *n;
        int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
 
@@ -5423,12 +5429,6 @@ static void binder_deferred_release(struct binder_proc *proc)
                context->binder_context_mgr_node = NULL;
        }
        mutex_unlock(&context->context_mgr_node_lock);
-       device = container_of(proc->context, struct binder_device, context);
-       if (refcount_dec_and_test(&device->ref)) {
-               kfree(context->name);
-               kfree(device);
-       }
-       proc->context = NULL;
        binder_inner_proc_lock(proc);
        /*
         * Make sure proc stays alive after we
index 95c22c0..40fb069 100644 (file)
@@ -153,7 +153,6 @@ extern char *make_class_name(const char *name, struct kobject *kobj);
 extern int devres_release_all(struct device *dev);
 extern void device_block_probing(void);
 extern void device_unblock_probing(void);
-extern void driver_deferred_probe_force_trigger(void);
 
 /* /sys/devices directory */
 extern struct kset *devices_kset;
index 67d39a9..05d414e 100644 (file)
@@ -50,6 +50,7 @@ static DEFINE_MUTEX(wfs_lock);
 static LIST_HEAD(deferred_sync);
 static unsigned int defer_sync_state_count = 1;
 static unsigned int defer_fw_devlink_count;
+static LIST_HEAD(deferred_fw_devlink);
 static DEFINE_MUTEX(defer_fw_devlink_lock);
 static bool fw_devlink_is_permissive(void);
 
@@ -754,11 +755,11 @@ static void __device_links_queue_sync_state(struct device *dev,
         */
        dev->state_synced = true;
 
-       if (WARN_ON(!list_empty(&dev->links.defer_sync)))
+       if (WARN_ON(!list_empty(&dev->links.defer_hook)))
                return;
 
        get_device(dev);
-       list_add_tail(&dev->links.defer_sync, list);
+       list_add_tail(&dev->links.defer_hook, list);
 }
 
 /**
@@ -776,8 +777,8 @@ static void device_links_flush_sync_list(struct list_head *list,
 {
        struct device *dev, *tmp;
 
-       list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
-               list_del_init(&dev->links.defer_sync);
+       list_for_each_entry_safe(dev, tmp, list, links.defer_hook) {
+               list_del_init(&dev->links.defer_hook);
 
                if (dev != dont_lock_dev)
                        device_lock(dev);
@@ -815,12 +816,12 @@ void device_links_supplier_sync_state_resume(void)
        if (defer_sync_state_count)
                goto out;
 
-       list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) {
+       list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_hook) {
                /*
                 * Delete from deferred_sync list before queuing it to
-                * sync_list because defer_sync is used for both lists.
+                * sync_list because defer_hook is used for both lists.
                 */
-               list_del_init(&dev->links.defer_sync);
+               list_del_init(&dev->links.defer_hook);
                __device_links_queue_sync_state(dev, &sync_list);
        }
 out:
@@ -838,8 +839,8 @@ late_initcall(sync_state_resume_initcall);
 
 static void __device_links_supplier_defer_sync(struct device *sup)
 {
-       if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup))
-               list_add_tail(&sup->links.defer_sync, &deferred_sync);
+       if (list_empty(&sup->links.defer_hook) && dev_has_sync_state(sup))
+               list_add_tail(&sup->links.defer_hook, &deferred_sync);
 }
 
 static void device_link_drop_managed(struct device_link *link)
@@ -1052,7 +1053,7 @@ void device_links_driver_cleanup(struct device *dev)
                WRITE_ONCE(link->status, DL_STATE_DORMANT);
        }
 
-       list_del_init(&dev->links.defer_sync);
+       list_del_init(&dev->links.defer_hook);
        __device_links_no_driver(dev);
 
        device_links_write_unlock();
@@ -1244,6 +1245,12 @@ static void fw_devlink_link_device(struct device *dev)
                        fw_ret = -EAGAIN;
        } else {
                fw_ret = -ENODEV;
+               /*
+                * defer_hook is not used to add device to deferred_sync list
+                * until device is bound. Since deferred fw devlink also blocks
+                * probing, same list hook can be used for deferred_fw_devlink.
+                */
+               list_add_tail(&dev->links.defer_hook, &deferred_fw_devlink);
        }
 
        if (fw_ret == -ENODEV)
@@ -1312,6 +1319,9 @@ void fw_devlink_pause(void)
  */
 void fw_devlink_resume(void)
 {
+       struct device *dev, *tmp;
+       LIST_HEAD(probe_list);
+
        mutex_lock(&defer_fw_devlink_lock);
        if (!defer_fw_devlink_count) {
                WARN(true, "Unmatched fw_devlink pause/resume!");
@@ -1323,9 +1333,19 @@ void fw_devlink_resume(void)
                goto out;
 
        device_link_add_missing_supplier_links();
-       driver_deferred_probe_force_trigger();
+       list_splice_tail_init(&deferred_fw_devlink, &probe_list);
 out:
        mutex_unlock(&defer_fw_devlink_lock);
+
+       /*
+        * bus_probe_device() can cause new devices to get added and they'll
+        * try to grab defer_fw_devlink_lock. So, this needs to be done outside
+        * the defer_fw_devlink_lock.
+        */
+       list_for_each_entry_safe(dev, tmp, &probe_list, links.defer_hook) {
+               list_del_init(&dev->links.defer_hook);
+               bus_probe_device(dev);
+       }
 }
 /* Device links support end. */
 
@@ -2172,7 +2192,7 @@ void device_initialize(struct device *dev)
        INIT_LIST_HEAD(&dev->links.consumers);
        INIT_LIST_HEAD(&dev->links.suppliers);
        INIT_LIST_HEAD(&dev->links.needs_suppliers);
-       INIT_LIST_HEAD(&dev->links.defer_sync);
+       INIT_LIST_HEAD(&dev->links.defer_hook);
        dev->links.status = DL_DEV_NO_DRIVER;
 }
 EXPORT_SYMBOL_GPL(device_initialize);
index 9a1d940..48ca81c 100644 (file)
@@ -164,11 +164,6 @@ static void driver_deferred_probe_trigger(void)
        if (!driver_deferred_probe_enable)
                return;
 
-       driver_deferred_probe_force_trigger();
-}
-
-void driver_deferred_probe_force_trigger(void)
-{
        /*
         * A successful probe means that all the devices in the pending list
         * should be triggered to be reprobed.  Move all the deferred devices
index 977d27b..a97f33d 100644 (file)
@@ -265,14 +265,14 @@ static struct notifier_block pm_trace_nb = {
        .notifier_call = pm_trace_notify,
 };
 
-static int early_resume_init(void)
+static int __init early_resume_init(void)
 {
        hash_value_early_read = read_magic_time();
        register_pm_notifier(&pm_trace_nb);
        return 0;
 }
 
-static int late_resume_init(void)
+static int __init late_resume_init(void)
 {
        unsigned int val = hash_value_early_read;
        unsigned int user, file, dev;
index 0fd6f97..1d1d26b 100644 (file)
@@ -4,7 +4,7 @@
 # subsystems should select the appropriate symbols.
 
 config REGMAP
-       default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SCCB || REGMAP_I3C)
+       default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SCCB || REGMAP_I3C)
        select IRQ_DOMAIN if REGMAP_IRQ
        bool
 
index 089e5dc..f58baff 100644 (file)
@@ -463,29 +463,31 @@ static ssize_t regmap_cache_only_write_file(struct file *file,
 {
        struct regmap *map = container_of(file->private_data,
                                          struct regmap, cache_only);
-       ssize_t result;
-       bool was_enabled, require_sync = false;
+       bool new_val, require_sync = false;
        int err;
 
-       map->lock(map->lock_arg);
+       err = kstrtobool_from_user(user_buf, count, &new_val);
+       /* Ignore malforned data like debugfs_write_file_bool() */
+       if (err)
+               return count;
 
-       was_enabled = map->cache_only;
+       err = debugfs_file_get(file->f_path.dentry);
+       if (err)
+               return err;
 
-       result = debugfs_write_file_bool(file, user_buf, count, ppos);
-       if (result < 0) {
-               map->unlock(map->lock_arg);
-               return result;
-       }
+       map->lock(map->lock_arg);
 
-       if (map->cache_only && !was_enabled) {
+       if (new_val && !map->cache_only) {
                dev_warn(map->dev, "debugfs cache_only=Y forced\n");
                add_taint(TAINT_USER, LOCKDEP_STILL_OK);
-       } else if (!map->cache_only && was_enabled) {
+       } else if (!new_val && map->cache_only) {
                dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n");
                require_sync = true;
        }
+       map->cache_only = new_val;
 
        map->unlock(map->lock_arg);
+       debugfs_file_put(file->f_path.dentry);
 
        if (require_sync) {
                err = regcache_sync(map);
@@ -493,7 +495,7 @@ static ssize_t regmap_cache_only_write_file(struct file *file,
                        dev_err(map->dev, "Failed to sync cache %d\n", err);
        }
 
-       return result;
+       return count;
 }
 
 static const struct file_operations regmap_cache_only_fops = {
@@ -508,28 +510,32 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file,
 {
        struct regmap *map = container_of(file->private_data,
                                          struct regmap, cache_bypass);
-       ssize_t result;
-       bool was_enabled;
+       bool new_val;
+       int err;
 
-       map->lock(map->lock_arg);
+       err = kstrtobool_from_user(user_buf, count, &new_val);
+       /* Ignore malforned data like debugfs_write_file_bool() */
+       if (err)
+               return count;
 
-       was_enabled = map->cache_bypass;
+       err = debugfs_file_get(file->f_path.dentry);
+       if (err)
+               return err;
 
-       result = debugfs_write_file_bool(file, user_buf, count, ppos);
-       if (result < 0)
-               goto out;
+       map->lock(map->lock_arg);
 
-       if (map->cache_bypass && !was_enabled) {
+       if (new_val && !map->cache_bypass) {
                dev_warn(map->dev, "debugfs cache_bypass=Y forced\n");
                add_taint(TAINT_USER, LOCKDEP_STILL_OK);
-       } else if (!map->cache_bypass && was_enabled) {
+       } else if (!new_val && map->cache_bypass) {
                dev_warn(map->dev, "debugfs cache_bypass=N forced\n");
        }
+       map->cache_bypass = new_val;
 
-out:
        map->unlock(map->lock_arg);
+       debugfs_file_put(file->f_path.dentry);
 
-       return result;
+       return count;
 }
 
 static const struct file_operations regmap_cache_bypass_fops = {
index c472f62..795a62a 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/delay.h>
 #include <linux/log2.h>
 #include <linux/hwspinlock.h>
+#include <asm/unaligned.h>
 
 #define CREATE_TRACE_POINTS
 #include "trace.h"
@@ -249,22 +250,20 @@ static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
 
 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
 {
-       __be16 *b = buf;
-
-       b[0] = cpu_to_be16(val << shift);
+       put_unaligned_be16(val << shift, buf);
 }
 
 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
 {
-       __le16 *b = buf;
-
-       b[0] = cpu_to_le16(val << shift);
+       put_unaligned_le16(val << shift, buf);
 }
 
 static void regmap_format_16_native(void *buf, unsigned int val,
                                    unsigned int shift)
 {
-       *(u16 *)buf = val << shift;
+       u16 v = val << shift;
+
+       memcpy(buf, &v, sizeof(v));
 }
 
 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
@@ -280,43 +279,39 @@ static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
 
 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
 {
-       __be32 *b = buf;
-
-       b[0] = cpu_to_be32(val << shift);
+       put_unaligned_be32(val << shift, buf);
 }
 
 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
 {
-       __le32 *b = buf;
-
-       b[0] = cpu_to_le32(val << shift);
+       put_unaligned_le32(val << shift, buf);
 }
 
 static void regmap_format_32_native(void *buf, unsigned int val,
                                    unsigned int shift)
 {
-       *(u32 *)buf = val << shift;
+       u32 v = val << shift;
+
+       memcpy(buf, &v, sizeof(v));
 }
 
 #ifdef CONFIG_64BIT
 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
 {
-       __be64 *b = buf;
-
-       b[0] = cpu_to_be64((u64)val << shift);
+       put_unaligned_be64((u64) val << shift, buf);
 }
 
 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
 {
-       __le64 *b = buf;
-
-       b[0] = cpu_to_le64((u64)val << shift);
+       put_unaligned_le64((u64) val << shift, buf);
 }
 
 static void regmap_format_64_native(void *buf, unsigned int val,
                                    unsigned int shift)
 {
-       *(u64 *)buf = (u64)val << shift;
+       u64 v = (u64) val << shift;
+
+       memcpy(buf, &v, sizeof(v));
 }
 #endif
 
@@ -333,35 +328,34 @@ static unsigned int regmap_parse_8(const void *buf)
 
 static unsigned int regmap_parse_16_be(const void *buf)
 {
-       const __be16 *b = buf;
-
-       return be16_to_cpu(b[0]);
+       return get_unaligned_be16(buf);
 }
 
 static unsigned int regmap_parse_16_le(const void *buf)
 {
-       const __le16 *b = buf;
-
-       return le16_to_cpu(b[0]);
+       return get_unaligned_le16(buf);
 }
 
 static void regmap_parse_16_be_inplace(void *buf)
 {
-       __be16 *b = buf;
+       u16 v = get_unaligned_be16(buf);
 
-       b[0] = be16_to_cpu(b[0]);
+       memcpy(buf, &v, sizeof(v));
 }
 
 static void regmap_parse_16_le_inplace(void *buf)
 {
-       __le16 *b = buf;
+       u16 v = get_unaligned_le16(buf);
 
-       b[0] = le16_to_cpu(b[0]);
+       memcpy(buf, &v, sizeof(v));
 }
 
 static unsigned int regmap_parse_16_native(const void *buf)
 {
-       return *(u16 *)buf;
+       u16 v;
+
+       memcpy(&v, buf, sizeof(v));
+       return v;
 }
 
 static unsigned int regmap_parse_24(const void *buf)
@@ -376,69 +370,67 @@ static unsigned int regmap_parse_24(const void *buf)
 
 static unsigned int regmap_parse_32_be(const void *buf)
 {
-       const __be32 *b = buf;
-
-       return be32_to_cpu(b[0]);
+       return get_unaligned_be32(buf);
 }
 
 static unsigned int regmap_parse_32_le(const void *buf)
 {
-       const __le32 *b = buf;
-
-       return le32_to_cpu(b[0]);
+       return get_unaligned_le32(buf);
 }
 
 static void regmap_parse_32_be_inplace(void *buf)
 {
-       __be32 *b = buf;
+       u32 v = get_unaligned_be32(buf);
 
-       b[0] = be32_to_cpu(b[0]);
+       memcpy(buf, &v, sizeof(v));
 }
 
 static void regmap_parse_32_le_inplace(void *buf)
 {
-       __le32 *b = buf;
+       u32 v = get_unaligned_le32(buf);
 
-       b[0] = le32_to_cpu(b[0]);
+       memcpy(buf, &v, sizeof(v));
 }
 
 static unsigned int regmap_parse_32_native(const void *buf)
 {
-       return *(u32 *)buf;
+       u32 v;
+
+       memcpy(&v, buf, sizeof(v));
+       return v;
 }
 
 #ifdef CONFIG_64BIT
 static unsigned int regmap_parse_64_be(const void *buf)
 {
-       const __be64 *b = buf;
-
-       return be64_to_cpu(b[0]);
+       return get_unaligned_be64(buf);
 }
 
 static unsigned int regmap_parse_64_le(const void *buf)
 {
-       const __le64 *b = buf;
-
-       return le64_to_cpu(b[0]);
+       return get_unaligned_le64(buf);
 }
 
 static void regmap_parse_64_be_inplace(void *buf)
 {
-       __be64 *b = buf;
+       u64 v =  get_unaligned_be64(buf);
 
-       b[0] = be64_to_cpu(b[0]);
+       memcpy(buf, &v, sizeof(v));
 }
 
 static void regmap_parse_64_le_inplace(void *buf)
 {
-       __le64 *b = buf;
+       u64 v = get_unaligned_le64(buf);
 
-       b[0] = le64_to_cpu(b[0]);
+       memcpy(buf, &v, sizeof(v));
 }
 
 static unsigned int regmap_parse_64_native(const void *buf)
 {
-       return *(u64 *)buf;
+       u64 v;
+
+       memcpy(&v, buf, sizeof(v));
+       return v;
 }
 #endif
 
@@ -1357,6 +1349,7 @@ void regmap_exit(struct regmap *map)
        if (map->hwlock)
                hwspin_lock_free(map->hwlock);
        kfree_const(map->name);
+       kfree(map->patch);
        kfree(map);
 }
 EXPORT_SYMBOL_GPL(regmap_exit);
@@ -1371,7 +1364,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data)
 
        /* If the user didn't specify a name match any */
        if (data)
-               return (*r)->name == data;
+               return !strcmp((*r)->name, data);
        else
                return 1;
 }
@@ -2944,8 +2937,9 @@ EXPORT_SYMBOL_GPL(regmap_update_bits_base);
  * @reg: Register to read from
  * @bits: Bits to test
  *
- * Returns -1 if the underlying regmap_read() fails, 0 if at least one of the
- * tested bits is not set and 1 if all tested bits are set.
+ * Returns 0 if at least one of the tested bits is not set, 1 if all tested
+ * bits are set and a negative error number if the underlying regmap_read()
+ * fails.
  */
 int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
 {
index 43cff01..ce7e9f2 100644 (file)
@@ -1033,25 +1033,26 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
             test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
                dev_err(disk_to_dev(nbd->disk),
                        "Device being setup by another task");
-               sockfd_put(sock);
-               return -EBUSY;
+               err = -EBUSY;
+               goto put_socket;
+       }
+
+       nsock = kzalloc(sizeof(*nsock), GFP_KERNEL);
+       if (!nsock) {
+               err = -ENOMEM;
+               goto put_socket;
        }
 
        socks = krealloc(config->socks, (config->num_connections + 1) *
                         sizeof(struct nbd_sock *), GFP_KERNEL);
        if (!socks) {
-               sockfd_put(sock);
-               return -ENOMEM;
+               kfree(nsock);
+               err = -ENOMEM;
+               goto put_socket;
        }
 
        config->socks = socks;
 
-       nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
-       if (!nsock) {
-               sockfd_put(sock);
-               return -ENOMEM;
-       }
-
        nsock->fallback_index = -1;
        nsock->dead = false;
        mutex_init(&nsock->tx_lock);
@@ -1063,6 +1064,10 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
        atomic_inc(&config->live_connections);
 
        return 0;
+
+put_socket:
+       sockfd_put(sock);
+       return err;
 }
 
 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
index 9d21bf0..980df85 100644 (file)
@@ -878,6 +878,7 @@ out_put_disk:
        put_disk(vblk->disk);
 out_free_vq:
        vdev->config->del_vqs(vdev);
+       kfree(vblk->vqs);
 out_free_vblk:
        kfree(vblk);
 out_free_index:
index 6e2ad90..270dd81 100644 (file)
@@ -2021,7 +2021,8 @@ static ssize_t hot_add_show(struct class *class,
                return ret;
        return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
 }
-static CLASS_ATTR_RO(hot_add);
+static struct class_attribute class_attr_hot_add =
+       __ATTR(hot_add, 0400, hot_add_show, NULL);
 
 static ssize_t hot_remove_store(struct class *class,
                        struct class_attribute *attr,
index 3affd18..4f513fa 100644 (file)
@@ -221,6 +221,34 @@ static u32 sysc_read_sysstatus(struct sysc *ddata)
        return sysc_read(ddata, offset);
 }
 
+/* Poll on reset status */
+static int sysc_wait_softreset(struct sysc *ddata)
+{
+       u32 sysc_mask, syss_done, rstval;
+       int syss_offset, error = 0;
+
+       syss_offset = ddata->offsets[SYSC_SYSSTATUS];
+       sysc_mask = BIT(ddata->cap->regbits->srst_shift);
+
+       if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
+               syss_done = 0;
+       else
+               syss_done = ddata->cfg.syss_mask;
+
+       if (syss_offset >= 0) {
+               error = readx_poll_timeout_atomic(sysc_read_sysstatus, ddata,
+                               rstval, (rstval & ddata->cfg.syss_mask) ==
+                               syss_done, 100, MAX_MODULE_SOFTRESET_WAIT);
+
+       } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) {
+               error = readx_poll_timeout_atomic(sysc_read_sysconfig, ddata,
+                               rstval, !(rstval & sysc_mask),
+                               100, MAX_MODULE_SOFTRESET_WAIT);
+       }
+
+       return error;
+}
+
 static int sysc_add_named_clock_from_child(struct sysc *ddata,
                                           const char *name,
                                           const char *optfck_name)
@@ -925,18 +953,47 @@ static int sysc_enable_module(struct device *dev)
        struct sysc *ddata;
        const struct sysc_regbits *regbits;
        u32 reg, idlemodes, best_mode;
+       int error;
 
        ddata = dev_get_drvdata(dev);
+
+       /*
+        * Some modules like DSS reset automatically on idle. Enable optional
+        * reset clocks and wait for OCP softreset to complete.
+        */
+       if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) {
+               error = sysc_enable_opt_clocks(ddata);
+               if (error) {
+                       dev_err(ddata->dev,
+                               "Optional clocks failed for enable: %i\n",
+                               error);
+                       return error;
+               }
+       }
+       error = sysc_wait_softreset(ddata);
+       if (error)
+               dev_warn(ddata->dev, "OCP softreset timed out\n");
+       if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET)
+               sysc_disable_opt_clocks(ddata);
+
+       /*
+        * Some subsystem private interconnects, like DSS top level module,
+        * need only the automatic OCP softreset handling with no sysconfig
+        * register bits to configure.
+        */
        if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
                return 0;
 
        regbits = ddata->cap->regbits;
        reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
 
-       /* Set CLOCKACTIVITY, we only use it for ick */
+       /*
+        * Set CLOCKACTIVITY, we only use it for ick. And we only configure it
+        * based on the SYSC_QUIRK_USE_CLOCKACT flag, not based on the hardware
+        * capabilities. See the old HWMOD_SET_DEFAULT_CLOCKACT flag.
+        */
        if (regbits->clkact_shift >= 0 &&
-           (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT ||
-            ddata->cfg.sysc_val & BIT(regbits->clkact_shift)))
+           (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT))
                reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift;
 
        /* Set SIDLE mode */
@@ -991,6 +1048,9 @@ set_autoidle:
                sysc_write_sysconfig(ddata, reg);
        }
 
+       /* Flush posted write */
+       sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
        if (ddata->module_enable_quirk)
                ddata->module_enable_quirk(ddata);
 
@@ -1071,6 +1131,9 @@ set_sidle:
                reg |= 1 << regbits->autoidle_shift;
        sysc_write_sysconfig(ddata, reg);
 
+       /* Flush posted write */
+       sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
        return 0;
 }
 
@@ -1215,7 +1278,8 @@ static int __maybe_unused sysc_noirq_suspend(struct device *dev)
 
        ddata = dev_get_drvdata(dev);
 
-       if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE)
+       if (ddata->cfg.quirks &
+           (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
                return 0;
 
        return pm_runtime_force_suspend(dev);
@@ -1227,7 +1291,8 @@ static int __maybe_unused sysc_noirq_resume(struct device *dev)
 
        ddata = dev_get_drvdata(dev);
 
-       if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE)
+       if (ddata->cfg.quirks &
+           (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
                return 0;
 
        return pm_runtime_force_resume(dev);
@@ -1488,7 +1553,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
        bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false;
        const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1);
        int manager_count;
-       bool framedonetv_irq;
+       bool framedonetv_irq = true;
        u32 val, irq_mask = 0;
 
        switch (sysc_soc->soc) {
@@ -1505,6 +1570,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
                break;
        case SOC_AM4:
                manager_count = 1;
+               framedonetv_irq = false;
                break;
        case SOC_UNKNOWN:
        default:
@@ -1663,8 +1729,8 @@ static void sysc_quirk_rtc(struct sysc *ddata, bool lock)
 
        local_irq_save(flags);
        /* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */
-       error = readl_poll_timeout(ddata->module_va + 0x44, val,
-                                  !(val & BIT(0)), 100, 50);
+       error = readl_poll_timeout_atomic(ddata->module_va + 0x44, val,
+                                         !(val & BIT(0)), 100, 50);
        if (error)
                dev_warn(ddata->dev, "rtc busy timeout\n");
        /* Now we have ~15 microseconds to read/write various registers */
@@ -1822,11 +1888,10 @@ static int sysc_legacy_init(struct sysc *ddata)
  */
 static int sysc_reset(struct sysc *ddata)
 {
-       int sysc_offset, syss_offset, sysc_val, rstval, error = 0;
-       u32 sysc_mask, syss_done;
+       int sysc_offset, sysc_val, error;
+       u32 sysc_mask;
 
        sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
-       syss_offset = ddata->offsets[SYSC_SYSSTATUS];
 
        if (ddata->legacy_mode ||
            ddata->cap->regbits->srst_shift < 0 ||
@@ -1835,11 +1900,6 @@ static int sysc_reset(struct sysc *ddata)
 
        sysc_mask = BIT(ddata->cap->regbits->srst_shift);
 
-       if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
-               syss_done = 0;
-       else
-               syss_done = ddata->cfg.syss_mask;
-
        if (ddata->pre_reset_quirk)
                ddata->pre_reset_quirk(ddata);
 
@@ -1856,18 +1916,9 @@ static int sysc_reset(struct sysc *ddata)
        if (ddata->post_reset_quirk)
                ddata->post_reset_quirk(ddata);
 
-       /* Poll on reset status */
-       if (syss_offset >= 0) {
-               error = readx_poll_timeout(sysc_read_sysstatus, ddata, rstval,
-                                          (rstval & ddata->cfg.syss_mask) ==
-                                          syss_done,
-                                          100, MAX_MODULE_SOFTRESET_WAIT);
-
-       } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) {
-               error = readx_poll_timeout(sysc_read_sysconfig, ddata, rstval,
-                                          !(rstval & sysc_mask),
-                                          100, MAX_MODULE_SOFTRESET_WAIT);
-       }
+       error = sysc_wait_softreset(ddata);
+       if (error)
+               dev_warn(ddata->dev, "OCP softreset timed out\n");
 
        if (ddata->reset_done_quirk)
                ddata->reset_done_quirk(ddata);
index 35333b6..7c617ed 100644 (file)
@@ -210,7 +210,7 @@ static int st33zp24_i2c_request_resources(struct i2c_client *client)
 
 /*
  * st33zp24_i2c_probe initialize the TPM device
- * @param: client, the i2c_client drescription (TPM I2C description).
+ * @param: client, the i2c_client description (TPM I2C description).
  * @param: id, the i2c_device_id struct.
  * @return: 0 in case of success.
  *      -1 in other case.
index 26e09de..a75dafd 100644 (file)
@@ -329,7 +329,7 @@ static int st33zp24_spi_request_resources(struct spi_device *dev)
 
 /*
  * st33zp24_spi_probe initialize the TPM device
- * @param: dev, the spi_device drescription (TPM SPI description).
+ * @param: dev, the spi_device description (TPM SPI description).
  * @return: 0 in case of success.
  *      or a negative value describing the error.
  */
@@ -378,7 +378,7 @@ static int st33zp24_spi_probe(struct spi_device *dev)
 
 /*
  * st33zp24_spi_remove remove the TPM device
- * @param: client, the spi_device drescription (TPM SPI description).
+ * @param: client, the spi_device description (TPM SPI description).
  * @return: 0 in case of success.
  */
 static int st33zp24_spi_remove(struct spi_device *dev)
index 37bb13f..4ec10ab 100644 (file)
@@ -502,7 +502,7 @@ static const struct tpm_class_ops st33zp24_tpm = {
 
 /*
  * st33zp24_probe initialize the TPM device
- * @param: client, the i2c_client drescription (TPM I2C description).
+ * @param: client, the i2c_client description (TPM I2C description).
  * @param: id, the i2c_device_id struct.
  * @return: 0 in case of success.
  *      -1 in other case.
index 87f4493..1784530 100644 (file)
@@ -189,15 +189,6 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
                goto out;
        }
 
-       /* atomic tpm command send and result receive. We only hold the ops
-        * lock during this period so that the tpm can be unregistered even if
-        * the char dev is held open.
-        */
-       if (tpm_try_get_ops(priv->chip)) {
-               ret = -EPIPE;
-               goto out;
-       }
-
        priv->response_length = 0;
        priv->response_read = false;
        *off = 0;
@@ -211,11 +202,19 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
        if (file->f_flags & O_NONBLOCK) {
                priv->command_enqueued = true;
                queue_work(tpm_dev_wq, &priv->async_work);
-               tpm_put_ops(priv->chip);
                mutex_unlock(&priv->buffer_mutex);
                return size;
        }
 
+       /* atomic tpm command send and result receive. We only hold the ops
+        * lock during this period so that the tpm can be unregistered even if
+        * the char dev is held open.
+        */
+       if (tpm_try_get_ops(priv->chip)) {
+               ret = -EPIPE;
+               goto out;
+       }
+
        ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
                               sizeof(priv->data_buffer));
        tpm_put_ops(priv->chip);
index 09fe452..994385b 100644 (file)
@@ -683,13 +683,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
        if (rc)
                goto init_irq_cleanup;
 
-       if (!strcmp(id->compat, "IBM,vtpm20")) {
-               chip->flags |= TPM_CHIP_FLAG_TPM2;
-               rc = tpm2_get_cc_attrs_tbl(chip);
-               if (rc)
-                       goto init_irq_cleanup;
-       }
-
        if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
                                ibmvtpm->rtce_buf != NULL,
                                HZ)) {
@@ -697,6 +690,13 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
                goto init_irq_cleanup;
        }
 
+       if (!strcmp(id->compat, "IBM,vtpm20")) {
+               chip->flags |= TPM_CHIP_FLAG_TPM2;
+               rc = tpm2_get_cc_attrs_tbl(chip);
+               if (rc)
+                       goto init_irq_cleanup;
+       }
+
        return tpm_chip_register(chip);
 init_irq_cleanup:
        do {
index e7df342..0b21496 100644 (file)
@@ -235,6 +235,13 @@ static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
        return tpm_tis_init(&pnp_dev->dev, &tpm_info);
 }
 
+/*
+ * There is a known bug caused by 93e1b7d42e1e ("[PATCH] tpm: add HID module
+ * parameter"). This commit added IFX0102 device ID, which is also used by
+ * tpm_infineon but ignored to add quirks to probe which driver ought to be
+ * used.
+ */
+
 static struct pnp_device_id tpm_pnp_tbl[] = {
        {"PNP0C31", 0},         /* TPM */
        {"ATM1200", 0},         /* Atmel */
index 2435216..65ab1b0 100644 (file)
@@ -1085,7 +1085,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
 
        return 0;
 out_err:
-       if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL))
+       if (chip->ops->clk_enable != NULL)
                chip->ops->clk_enable(chip, false);
 
        tpm_tis_remove(chip);
index d967559..3856f6e 100644 (file)
@@ -53,8 +53,6 @@ static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy,
 
        if ((phy->iobuf[3] & 0x01) == 0) {
                // handle SPI wait states
-               phy->iobuf[0] = 0;
-
                for (i = 0; i < TPM_RETRY; i++) {
                        spi_xfer->len = 1;
                        spi_message_init(&m);
@@ -104,6 +102,8 @@ int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
                if (ret < 0)
                        goto exit;
 
+               /* Flow control transfers are receive only */
+               spi_xfer.tx_buf = NULL;
                ret = phy->flow_control(phy, &spi_xfer);
                if (ret < 0)
                        goto exit;
@@ -113,9 +113,8 @@ int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
                spi_xfer.delay.value = 5;
                spi_xfer.delay.unit = SPI_DELAY_UNIT_USECS;
 
-               if (in) {
-                       spi_xfer.tx_buf = NULL;
-               } else if (out) {
+               if (out) {
+                       spi_xfer.tx_buf = phy->iobuf;
                        spi_xfer.rx_buf = NULL;
                        memcpy(phy->iobuf, out, transfer_len);
                        out += transfer_len;
@@ -288,6 +287,7 @@ static struct spi_driver tpm_tis_spi_driver = {
                .pm = &tpm_tis_pm,
                .of_match_table = of_match_ptr(of_tis_spi_match),
                .acpi_match_table = ACPI_PTR(acpi_tis_spi_match),
+               .probe_type = PROBE_PREFER_ASYNCHRONOUS,
        },
        .probe = tpm_tis_spi_driver_probe,
        .remove = tpm_tis_spi_remove,
index 00c5e3a..ca691bc 100644 (file)
@@ -2116,6 +2116,7 @@ static struct virtio_device_id id_table[] = {
        { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
        { 0 },
 };
+MODULE_DEVICE_TABLE(virtio, id_table);
 
 static unsigned int features[] = {
        VIRTIO_CONSOLE_F_SIZE,
@@ -2128,6 +2129,7 @@ static struct virtio_device_id rproc_serial_id_table[] = {
 #endif
        { 0 },
 };
+MODULE_DEVICE_TABLE(virtio, rproc_serial_id_table);
 
 static unsigned int rproc_serial_features[] = {
 };
@@ -2280,6 +2282,5 @@ static void __exit fini(void)
 module_init(init);
 module_exit(fini);
 
-MODULE_DEVICE_TABLE(virtio, id_table);
 MODULE_DESCRIPTION("Virtio console driver");
 MODULE_LICENSE("GPL");
index 69934c0..326f91b 100644 (file)
@@ -50,6 +50,7 @@ source "drivers/clk/versatile/Kconfig"
 config CLK_HSDK
        bool "PLL Driver for HSDK platform"
        depends on OF || COMPILE_TEST
+       depends on IOMEM
        help
          This driver supports the HSDK core, system, ddr, tunnel and hdmi PLLs
          control.
index 99afc94..177368c 100644 (file)
@@ -131,6 +131,18 @@ static const struct clk_div_table ast2600_eclk_div_table[] = {
        { 0 }
 };
 
+static const struct clk_div_table ast2600_emmc_extclk_div_table[] = {
+       { 0x0, 2 },
+       { 0x1, 4 },
+       { 0x2, 6 },
+       { 0x3, 8 },
+       { 0x4, 10 },
+       { 0x5, 12 },
+       { 0x6, 14 },
+       { 0x7, 16 },
+       { 0 }
+};
+
 static const struct clk_div_table ast2600_mac_div_table[] = {
        { 0x0, 4 },
        { 0x1, 4 },
@@ -390,6 +402,11 @@ static struct clk_hw *aspeed_g6_clk_hw_register_gate(struct device *dev,
        return hw;
 }
 
+static const char *const emmc_extclk_parent_names[] = {
+       "emmc_extclk_hpll_in",
+       "mpll",
+};
+
 static const char * const vclk_parent_names[] = {
        "dpll",
        "d1pll",
@@ -459,16 +476,32 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
                return PTR_ERR(hw);
        aspeed_g6_clk_data->hws[ASPEED_CLK_UARTX] = hw;
 
-       /* EMMC ext clock divider */
-       hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "hpll", 0,
-                       scu_g6_base + ASPEED_G6_CLK_SELECTION1, 15, 0,
-                       &aspeed_g6_clk_lock);
+       /* EMMC ext clock */
+       hw = clk_hw_register_fixed_factor(dev, "emmc_extclk_hpll_in", "hpll",
+                                         0, 1, 2);
        if (IS_ERR(hw))
                return PTR_ERR(hw);
-       hw = clk_hw_register_divider_table(dev, "emmc_extclk", "emmc_extclk_gate", 0,
-                       scu_g6_base + ASPEED_G6_CLK_SELECTION1, 12, 3, 0,
-                       ast2600_div_table,
-                       &aspeed_g6_clk_lock);
+
+       hw = clk_hw_register_mux(dev, "emmc_extclk_mux",
+                                emmc_extclk_parent_names,
+                                ARRAY_SIZE(emmc_extclk_parent_names), 0,
+                                scu_g6_base + ASPEED_G6_CLK_SELECTION1, 11, 1,
+                                0, &aspeed_g6_clk_lock);
+       if (IS_ERR(hw))
+               return PTR_ERR(hw);
+
+       hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "emmc_extclk_mux",
+                                 0, scu_g6_base + ASPEED_G6_CLK_SELECTION1,
+                                 15, 0, &aspeed_g6_clk_lock);
+       if (IS_ERR(hw))
+               return PTR_ERR(hw);
+
+       hw = clk_hw_register_divider_table(dev, "emmc_extclk",
+                                          "emmc_extclk_gate", 0,
+                                          scu_g6_base +
+                                               ASPEED_G6_CLK_SELECTION1, 12,
+                                          3, 0, ast2600_emmc_extclk_div_table,
+                                          &aspeed_g6_clk_lock);
        if (IS_ERR(hw))
                return PTR_ERR(hw);
        aspeed_g6_clk_data->hws[ASPEED_CLK_EMMC] = hw;
index ded07b0..557d621 100644 (file)
@@ -42,6 +42,7 @@ config ARMADA_AP806_SYSCON
 
 config ARMADA_AP_CPU_CLK
        bool
+       select ARMADA_AP_CP_HELPER
 
 config ARMADA_CP110_SYSCON
        bool
index 6282ee2..a8901f9 100644 (file)
@@ -586,7 +586,10 @@ static int sifive_fu540_prci_probe(struct platform_device *pdev)
        struct __prci_data *pd;
        int r;
 
-       pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+       pd = devm_kzalloc(dev,
+                         struct_size(pd, hw_clks.hws,
+                                     ARRAY_SIZE(__prci_init_clocks)),
+                         GFP_KERNEL);
        if (!pd)
                return -ENOMEM;
 
index ecf7b7d..6c3e841 100644 (file)
@@ -480,6 +480,14 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
                .set_next_event_virt = erratum_set_next_event_tval_virt,
        },
 #endif
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+       {
+               .match_type = ate_match_local_cap_id,
+               .id = (void *)ARM64_WORKAROUND_1418040,
+               .desc = "ARM erratum 1418040",
+               .disable_compat_vdso = true,
+       },
+#endif
 };
 
 typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
@@ -566,6 +574,9 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
        if (wa->read_cntvct_el0) {
                clocksource_counter.vdso_clock_mode = VDSO_CLOCKMODE_NONE;
                vdso_default = VDSO_CLOCKMODE_NONE;
+       } else if (wa->disable_compat_vdso && vdso_default != VDSO_CLOCKMODE_NONE) {
+               vdso_default = VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT;
+               clocksource_counter.vdso_clock_mode = vdso_default;
        }
 }
 
index aa13708..d22cfae 100644 (file)
@@ -1274,18 +1274,26 @@ static ssize_t quad8_signal_cable_fault_read(struct counter_device *counter,
                                             struct counter_signal *signal,
                                             void *private, char *buf)
 {
-       const struct quad8_iio *const priv = counter->priv;
+       struct quad8_iio *const priv = counter->priv;
        const size_t channel_id = signal->id / 2;
-       const bool disabled = !(priv->cable_fault_enable & BIT(channel_id));
+       bool disabled;
        unsigned int status;
        unsigned int fault;
 
-       if (disabled)
+       mutex_lock(&priv->lock);
+
+       disabled = !(priv->cable_fault_enable & BIT(channel_id));
+
+       if (disabled) {
+               mutex_unlock(&priv->lock);
                return -EINVAL;
+       }
 
        /* Logic 0 = cable fault */
        status = inb(priv->base + QUAD8_DIFF_ENCODER_CABLE_STATUS);
 
+       mutex_unlock(&priv->lock);
+
        /* Mask respective channel and invert logic */
        fault = !(status & BIT(channel_id));
 
@@ -1317,6 +1325,8 @@ static ssize_t quad8_signal_cable_fault_enable_write(
        if (ret)
                return ret;
 
+       mutex_lock(&priv->lock);
+
        if (enable)
                priv->cable_fault_enable |= BIT(channel_id);
        else
@@ -1327,6 +1337,8 @@ static ssize_t quad8_signal_cable_fault_enable_write(
 
        outb(cable_fault_enable, priv->base + QUAD8_DIFF_ENCODER_CABLE_STATUS);
 
+       mutex_unlock(&priv->lock);
+
        return len;
 }
 
@@ -1353,6 +1365,8 @@ static ssize_t quad8_signal_fck_prescaler_write(struct counter_device *counter,
        if (ret)
                return ret;
 
+       mutex_lock(&priv->lock);
+
        priv->fck_prescaler[channel_id] = prescaler;
 
        /* Reset Byte Pointer */
@@ -1363,6 +1377,8 @@ static ssize_t quad8_signal_fck_prescaler_write(struct counter_device *counter,
        outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_PRESET_PSC,
             base_offset + 1);
 
+       mutex_unlock(&priv->lock);
+
        return len;
 }
 
index 8e23a69..7e0f788 100644 (file)
@@ -2464,7 +2464,7 @@ static struct cpufreq_driver intel_cpufreq = {
        .name           = "intel_cpufreq",
 };
 
-static struct cpufreq_driver *default_driver = &intel_pstate;
+static struct cpufreq_driver *default_driver;
 
 static void intel_pstate_driver_cleanup(void)
 {
@@ -2677,6 +2677,8 @@ static struct acpi_platform_list plat_info[] __initdata = {
        { } /* End */
 };
 
+#define BITMASK_OOB    (BIT(8) | BIT(18))
+
 static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
 {
        const struct x86_cpu_id *id;
@@ -2686,8 +2688,9 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
        id = x86_match_cpu(intel_pstate_cpu_oob_ids);
        if (id) {
                rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
-               if (misc_pwr & (1 << 8)) {
-                       pr_debug("Bit 8 in the MISC_PWR_MGMT MSR set\n");
+               if (misc_pwr & BITMASK_OOB) {
+                       pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n");
+                       pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n");
                        return true;
                }
        }
@@ -2755,6 +2758,7 @@ static int __init intel_pstate_init(void)
                        hwp_active++;
                        hwp_mode_bdw = id->driver_data;
                        intel_pstate.attr = hwp_cpufreq_attrs;
+                       default_driver = &intel_pstate;
                        goto hwp_cpu_matched;
                }
        } else {
@@ -2772,7 +2776,8 @@ static int __init intel_pstate_init(void)
                return -ENODEV;
        }
        /* Without HWP start in the passive mode. */
-       default_driver = &intel_cpufreq;
+       if (!default_driver)
+               default_driver = &intel_cpufreq;
 
 hwp_cpu_matched:
        /*
@@ -2817,6 +2822,8 @@ static int __init intel_pstate_setup(char *str)
 
        if (!strcmp(str, "disable")) {
                no_load = 1;
+       } else if (!strcmp(str, "active")) {
+               default_driver = &intel_pstate;
        } else if (!strcmp(str, "passive")) {
                default_driver = &intel_cpufreq;
                no_hwp = 1;
index c149d9e..8719731 100644 (file)
@@ -186,9 +186,10 @@ int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
         * be frozen safely.
         */
        index = find_deepest_state(drv, dev, U64_MAX, 0, true);
-       if (index > 0)
+       if (index > 0) {
                enter_s2idle_proper(drv, dev, index);
-
+               local_irq_enable();
+       }
        return index;
 }
 #endif /* CONFIG_SUSPEND */
index e018ef8..1699a8e 100644 (file)
@@ -45,46 +45,20 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
        size_t ret = 0;
 
        dmabuf = dentry->d_fsdata;
-       dma_resv_lock(dmabuf->resv, NULL);
+       spin_lock(&dmabuf->name_lock);
        if (dmabuf->name)
                ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
-       dma_resv_unlock(dmabuf->resv);
+       spin_unlock(&dmabuf->name_lock);
 
        return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
                             dentry->d_name.name, ret > 0 ? name : "");
 }
 
-static const struct dentry_operations dma_buf_dentry_ops = {
-       .d_dname = dmabuffs_dname,
-};
-
-static struct vfsmount *dma_buf_mnt;
-
-static int dma_buf_fs_init_context(struct fs_context *fc)
-{
-       struct pseudo_fs_context *ctx;
-
-       ctx = init_pseudo(fc, DMA_BUF_MAGIC);
-       if (!ctx)
-               return -ENOMEM;
-       ctx->dops = &dma_buf_dentry_ops;
-       return 0;
-}
-
-static struct file_system_type dma_buf_fs_type = {
-       .name = "dmabuf",
-       .init_fs_context = dma_buf_fs_init_context,
-       .kill_sb = kill_anon_super,
-};
-
-static int dma_buf_release(struct inode *inode, struct file *file)
+static void dma_buf_release(struct dentry *dentry)
 {
        struct dma_buf *dmabuf;
 
-       if (!is_dma_buf_file(file))
-               return -EINVAL;
-
-       dmabuf = file->private_data;
+       dmabuf = dentry->d_fsdata;
 
        BUG_ON(dmabuf->vmapping_counter);
 
@@ -110,9 +84,32 @@ static int dma_buf_release(struct inode *inode, struct file *file)
        module_put(dmabuf->owner);
        kfree(dmabuf->name);
        kfree(dmabuf);
+}
+
+static const struct dentry_operations dma_buf_dentry_ops = {
+       .d_dname = dmabuffs_dname,
+       .d_release = dma_buf_release,
+};
+
+static struct vfsmount *dma_buf_mnt;
+
+static int dma_buf_fs_init_context(struct fs_context *fc)
+{
+       struct pseudo_fs_context *ctx;
+
+       ctx = init_pseudo(fc, DMA_BUF_MAGIC);
+       if (!ctx)
+               return -ENOMEM;
+       ctx->dops = &dma_buf_dentry_ops;
        return 0;
 }
 
+static struct file_system_type dma_buf_fs_type = {
+       .name = "dmabuf",
+       .init_fs_context = dma_buf_fs_init_context,
+       .kill_sb = kill_anon_super,
+};
+
 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
 {
        struct dma_buf *dmabuf;
@@ -341,8 +338,10 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
                kfree(name);
                goto out_unlock;
        }
+       spin_lock(&dmabuf->name_lock);
        kfree(dmabuf->name);
        dmabuf->name = name;
+       spin_unlock(&dmabuf->name_lock);
 
 out_unlock:
        dma_resv_unlock(dmabuf->resv);
@@ -405,14 +404,13 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
        /* Don't count the temporary reference taken inside procfs seq_show */
        seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
        seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
-       dma_resv_lock(dmabuf->resv, NULL);
+       spin_lock(&dmabuf->name_lock);
        if (dmabuf->name)
                seq_printf(m, "name:\t%s\n", dmabuf->name);
-       dma_resv_unlock(dmabuf->resv);
+       spin_unlock(&dmabuf->name_lock);
 }
 
 static const struct file_operations dma_buf_fops = {
-       .release        = dma_buf_release,
        .mmap           = dma_buf_mmap_internal,
        .llseek         = dma_buf_llseek,
        .poll           = dma_buf_poll,
@@ -546,6 +544,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
        dmabuf->size = exp_info->size;
        dmabuf->exp_name = exp_info->exp_name;
        dmabuf->owner = exp_info->owner;
+       spin_lock_init(&dmabuf->name_lock);
        init_waitqueue_head(&dmabuf->poll);
        dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
        dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
index b175229..604f803 100644 (file)
@@ -1176,6 +1176,8 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp)
        } else if (dmatest_run) {
                if (!is_threaded_test_pending(info)) {
                        pr_info("No channels configured, continue with any\n");
+                       if (!is_threaded_test_run(info))
+                               stop_threaded_test(info);
                        add_threaded_test(info);
                }
                start_threaded_tests(info);
index 21cb2a5..a1b56f5 100644 (file)
@@ -118,16 +118,11 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
 {
        struct dw_dma *dw = to_dw_dma(dwc->chan.device);
 
-       if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
-               return;
-
        dw->initialize_chan(dwc);
 
        /* Enable interrupts */
        channel_set_bit(dw, MASK.XFER, dwc->mask);
        channel_set_bit(dw, MASK.ERROR, dwc->mask);
-
-       set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
 }
 
 /*----------------------------------------------------------------------*/
@@ -954,8 +949,6 @@ static void dwc_issue_pending(struct dma_chan *chan)
 
 void do_dw_dma_off(struct dw_dma *dw)
 {
-       unsigned int i;
-
        dma_writel(dw, CFG, 0);
 
        channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
@@ -966,9 +959,6 @@ void do_dw_dma_off(struct dw_dma *dw)
 
        while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
                cpu_relax();
-
-       for (i = 0; i < dw->dma.chancnt; i++)
-               clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags);
 }
 
 void do_dw_dma_on(struct dw_dma *dw)
@@ -1032,8 +1022,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
        /* Clear custom channel configuration */
        memset(&dwc->dws, 0, sizeof(struct dw_dma_slave));
 
-       clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
-
        /* Disable interrupts */
        channel_clear_bit(dw, MASK.XFER, dwc->mask);
        channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
index 5697c36..930ae26 100644 (file)
@@ -352,26 +352,28 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
        /*
         * TCD parameters are stored in struct fsl_edma_hw_tcd in little
         * endian format. However, we need to load the TCD registers in
-        * big- or little-endian obeying the eDMA engine model endian.
+        * big- or little-endian obeying the eDMA engine model endian,
+        * and this is performed from specific edma_write functions
         */
        edma_writew(edma, 0,  &regs->tcd[ch].csr);
-       edma_writel(edma, le32_to_cpu(tcd->saddr), &regs->tcd[ch].saddr);
-       edma_writel(edma, le32_to_cpu(tcd->daddr), &regs->tcd[ch].daddr);
 
-       edma_writew(edma, le16_to_cpu(tcd->attr), &regs->tcd[ch].attr);
-       edma_writew(edma, le16_to_cpu(tcd->soff), &regs->tcd[ch].soff);
+       edma_writel(edma, (s32)tcd->saddr, &regs->tcd[ch].saddr);
+       edma_writel(edma, (s32)tcd->daddr, &regs->tcd[ch].daddr);
 
-       edma_writel(edma, le32_to_cpu(tcd->nbytes), &regs->tcd[ch].nbytes);
-       edma_writel(edma, le32_to_cpu(tcd->slast), &regs->tcd[ch].slast);
+       edma_writew(edma, (s16)tcd->attr, &regs->tcd[ch].attr);
+       edma_writew(edma, tcd->soff, &regs->tcd[ch].soff);
 
-       edma_writew(edma, le16_to_cpu(tcd->citer), &regs->tcd[ch].citer);
-       edma_writew(edma, le16_to_cpu(tcd->biter), &regs->tcd[ch].biter);
-       edma_writew(edma, le16_to_cpu(tcd->doff), &regs->tcd[ch].doff);
+       edma_writel(edma, (s32)tcd->nbytes, &regs->tcd[ch].nbytes);
+       edma_writel(edma, (s32)tcd->slast, &regs->tcd[ch].slast);
 
-       edma_writel(edma, le32_to_cpu(tcd->dlast_sga),
+       edma_writew(edma, (s16)tcd->citer, &regs->tcd[ch].citer);
+       edma_writew(edma, (s16)tcd->biter, &regs->tcd[ch].biter);
+       edma_writew(edma, (s16)tcd->doff, &regs->tcd[ch].doff);
+
+       edma_writel(edma, (s32)tcd->dlast_sga,
                        &regs->tcd[ch].dlast_sga);
 
-       edma_writew(edma, le16_to_cpu(tcd->csr), &regs->tcd[ch].csr);
+       edma_writew(edma, (s16)tcd->csr, &regs->tcd[ch].csr);
 }
 
 static inline
@@ -589,6 +591,8 @@ void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
 {
        struct virt_dma_desc *vdesc;
 
+       lockdep_assert_held(&fsl_chan->vchan.lock);
+
        vdesc = vchan_next_desc(&fsl_chan->vchan);
        if (!vdesc)
                return;
index 67e4225..ec11697 100644 (file)
@@ -33,7 +33,7 @@
 #define EDMA_TCD_ATTR_DSIZE_16BIT      BIT(0)
 #define EDMA_TCD_ATTR_DSIZE_32BIT      BIT(1)
 #define EDMA_TCD_ATTR_DSIZE_64BIT      (BIT(0) | BIT(1))
-#define EDMA_TCD_ATTR_DSIZE_32BYTE     (BIT(3) | BIT(0))
+#define EDMA_TCD_ATTR_DSIZE_32BYTE     (BIT(2) | BIT(0))
 #define EDMA_TCD_ATTR_SSIZE_8BIT       0
 #define EDMA_TCD_ATTR_SSIZE_16BIT      (EDMA_TCD_ATTR_DSIZE_16BIT << 8)
 #define EDMA_TCD_ATTR_SSIZE_32BIT      (EDMA_TCD_ATTR_DSIZE_32BIT << 8)
index eff7ebd..90bb72a 100644 (file)
@@ -45,6 +45,13 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
                        fsl_chan = &fsl_edma->chans[ch];
 
                        spin_lock(&fsl_chan->vchan.lock);
+
+                       if (!fsl_chan->edesc) {
+                               /* terminate_all called before */
+                               spin_unlock(&fsl_chan->vchan.lock);
+                               continue;
+                       }
+
                        if (!fsl_chan->edesc->iscyclic) {
                                list_del(&fsl_chan->edesc->vdesc.node);
                                vchan_cookie_complete(&fsl_chan->edesc->vdesc);
index ff49847..cb376cf 100644 (file)
@@ -74,6 +74,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
        struct idxd_device *idxd;
        struct idxd_wq *wq;
        struct device *dev;
+       int rc = 0;
 
        wq = inode_wq(inode);
        idxd = wq->idxd;
@@ -81,17 +82,27 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
 
        dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq));
 
-       if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq))
-               return -EBUSY;
-
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
                return -ENOMEM;
 
+       mutex_lock(&wq->wq_lock);
+
+       if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) {
+               rc = -EBUSY;
+               goto failed;
+       }
+
        ctx->wq = wq;
        filp->private_data = ctx;
        idxd_wq_get(wq);
+       mutex_unlock(&wq->wq_lock);
        return 0;
+
+ failed:
+       mutex_unlock(&wq->wq_lock);
+       kfree(ctx);
+       return rc;
 }
 
 static int idxd_cdev_release(struct inode *node, struct file *filep)
@@ -105,7 +116,9 @@ static int idxd_cdev_release(struct inode *node, struct file *filep)
        filep->private_data = NULL;
 
        kfree(ctx);
+       mutex_lock(&wq->wq_lock);
        idxd_wq_put(wq);
+       mutex_unlock(&wq->wq_lock);
        return 0;
 }
 
index 8d79a87..8d2718c 100644 (file)
@@ -320,6 +320,31 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq)
        devm_iounmap(dev, wq->dportal);
 }
 
+void idxd_wq_disable_cleanup(struct idxd_wq *wq)
+{
+       struct idxd_device *idxd = wq->idxd;
+       struct device *dev = &idxd->pdev->dev;
+       int i, wq_offset;
+
+       lockdep_assert_held(&idxd->dev_lock);
+       memset(&wq->wqcfg, 0, sizeof(wq->wqcfg));
+       wq->type = IDXD_WQT_NONE;
+       wq->size = 0;
+       wq->group = NULL;
+       wq->threshold = 0;
+       wq->priority = 0;
+       clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
+       memset(wq->name, 0, WQ_NAME_SIZE);
+
+       for (i = 0; i < 8; i++) {
+               wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32);
+               iowrite32(0, idxd->reg_base + wq_offset);
+               dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
+                       wq->id, i, wq_offset,
+                       ioread32(idxd->reg_base + wq_offset));
+       }
+}
+
 /* Device control bits */
 static inline bool idxd_is_enabled(struct idxd_device *idxd)
 {
index b8f8a36..908c8d0 100644 (file)
@@ -290,6 +290,7 @@ int idxd_wq_enable(struct idxd_wq *wq);
 int idxd_wq_disable(struct idxd_wq *wq);
 int idxd_wq_map_portal(struct idxd_wq *wq);
 void idxd_wq_unmap_portal(struct idxd_wq *wq);
+void idxd_wq_disable_cleanup(struct idxd_wq *wq);
 
 /* submission */
 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
index 6510791..8a35f58 100644 (file)
@@ -141,7 +141,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
 
        iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
        if (!err)
-               return IRQ_HANDLED;
+               goto out;
 
        gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
        if (gensts.state == IDXD_DEVICE_STATE_HALT) {
@@ -162,6 +162,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
                spin_unlock_bh(&idxd->dev_lock);
        }
 
+ out:
        idxd_unmask_msix_vector(idxd, irq_entry->id);
        return IRQ_HANDLED;
 }
index 052dae5..2e2c508 100644 (file)
@@ -315,6 +315,11 @@ static int idxd_config_bus_remove(struct device *dev)
                idxd_unregister_dma_device(idxd);
                spin_lock_irqsave(&idxd->dev_lock, flags);
                rc = idxd_device_disable(idxd);
+               for (i = 0; i < idxd->max_wqs; i++) {
+                       struct idxd_wq *wq = &idxd->wqs[i];
+
+                       idxd_wq_disable_cleanup(wq);
+               }
                spin_unlock_irqrestore(&idxd->dev_lock, flags);
                module_put(THIS_MODULE);
                if (rc < 0)
index 9177403..270992c 100644 (file)
@@ -1331,8 +1331,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
 
        sdma_channel_synchronize(chan);
 
-       if (sdmac->event_id0 >= 0)
-               sdma_event_disable(sdmac, sdmac->event_id0);
+       sdma_event_disable(sdmac, sdmac->event_id0);
        if (sdmac->event_id1)
                sdma_event_disable(sdmac, sdmac->event_id1);
 
@@ -1632,11 +1631,9 @@ static int sdma_config(struct dma_chan *chan,
        memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
 
        /* Set ENBLn earlier to make sure dma request triggered after that */
-       if (sdmac->event_id0 >= 0) {
-               if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
-                       return -EINVAL;
-               sdma_event_enable(sdmac, sdmac->event_id0);
-       }
+       if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
+               return -EINVAL;
+       sdma_event_enable(sdmac, sdmac->event_id0);
 
        if (sdmac->event_id1) {
                if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
index 8ad0ad8..fd782ae 100644 (file)
 
 #include "../dmaengine.h"
 
+int completion_timeout = 200;
+module_param(completion_timeout, int, 0644);
+MODULE_PARM_DESC(completion_timeout,
+               "set ioat completion timeout [msec] (default 200 [msec])");
+int idle_timeout = 2000;
+module_param(idle_timeout, int, 0644);
+MODULE_PARM_DESC(idle_timeout,
+               "set ioat idel timeout [msec] (default 2000 [msec])");
+
+#define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout)
+#define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout)
+
 static char *chanerr_str[] = {
        "DMA Transfer Source Address Error",
        "DMA Transfer Destination Address Error",
index e6b622e..f7f31fd 100644 (file)
@@ -104,8 +104,6 @@ struct ioatdma_chan {
        #define IOAT_RUN 5
        #define IOAT_CHAN_ACTIVE 6
        struct timer_list timer;
-       #define COMPLETION_TIMEOUT msecs_to_jiffies(100)
-       #define IDLE_TIMEOUT msecs_to_jiffies(2000)
        #define RESET_DELAY msecs_to_jiffies(100)
        struct ioatdma_device *ioat_dma;
        dma_addr_t completion_dma;
index e15bd15..e12b754 100644 (file)
@@ -35,6 +35,13 @@ static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
                        mcf_chan = &mcf_edma->chans[ch];
 
                        spin_lock(&mcf_chan->vchan.lock);
+
+                       if (!mcf_chan->edesc) {
+                               /* terminate_all called before */
+                               spin_unlock(&mcf_chan->vchan.lock);
+                               continue;
+                       }
+
                        if (!mcf_chan->edesc->iscyclic) {
                                list_del(&mcf_chan->edesc->vdesc.node);
                                vchan_cookie_complete(&mcf_chan->edesc->vdesc);
index b218a01..8f7ceb6 100644 (file)
@@ -586,6 +586,8 @@ static void usb_dmac_isr_transfer_end(struct usb_dmac_chan *chan)
                desc->residue = usb_dmac_get_current_residue(chan, desc,
                                                        desc->sg_index - 1);
                desc->done_cookie = desc->vd.tx.cookie;
+               desc->vd.tx_result.result = DMA_TRANS_NOERROR;
+               desc->vd.tx_result.residue = desc->residue;
                vchan_cookie_complete(&desc->vd);
 
                /* Restart the next transfer if this driver has a next desc */
index db58d7e..c5fa2ef 100644 (file)
@@ -658,6 +658,7 @@ static int tegra_adma_alloc_chan_resources(struct dma_chan *dc)
 
        ret = pm_runtime_get_sync(tdc2dev(tdc));
        if (ret < 0) {
+               pm_runtime_put_noidle(tdc2dev(tdc));
                free_irq(tdc->irq, tdc);
                return ret;
        }
@@ -869,8 +870,10 @@ static int tegra_adma_probe(struct platform_device *pdev)
        pm_runtime_enable(&pdev->dev);
 
        ret = pm_runtime_get_sync(&pdev->dev);
-       if (ret < 0)
+       if (ret < 0) {
+               pm_runtime_put_noidle(&pdev->dev);
                goto rpm_disable;
+       }
 
        ret = tegra_adma_init(tdma);
        if (ret)
index 0b8f3dd..77e8e67 100644 (file)
@@ -42,6 +42,7 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
        ud = platform_get_drvdata(pdev);
        if (!ud) {
                pr_debug("UDMA has not been probed\n");
+               put_device(&pdev->dev);
                return ERR_PTR(-EPROBE_DEFER);
        }
 
index c91e2dc..6c879a7 100644 (file)
@@ -1753,7 +1753,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
                        dev_err(ud->ddev.dev,
                                "Descriptor pool allocation failed\n");
                        uc->use_dma_pool = false;
-                       return -ENOMEM;
+                       ret = -ENOMEM;
+                       goto err_cleanup;
                }
        }
 
@@ -1773,16 +1774,18 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
 
                ret = udma_get_chan_pair(uc);
                if (ret)
-                       return ret;
+                       goto err_cleanup;
 
                ret = udma_alloc_tx_resources(uc);
-               if (ret)
-                       return ret;
+               if (ret) {
+                       udma_put_rchan(uc);
+                       goto err_cleanup;
+               }
 
                ret = udma_alloc_rx_resources(uc);
                if (ret) {
                        udma_free_tx_resources(uc);
-                       return ret;
+                       goto err_cleanup;
                }
 
                uc->config.src_thread = ud->psil_base + uc->tchan->id;
@@ -1800,10 +1803,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
                        uc->id);
 
                ret = udma_alloc_tx_resources(uc);
-               if (ret) {
-                       uc->config.remote_thread_id = -1;
-                       return ret;
-               }
+               if (ret)
+                       goto err_cleanup;
 
                uc->config.src_thread = ud->psil_base + uc->tchan->id;
                uc->config.dst_thread = uc->config.remote_thread_id;
@@ -1820,10 +1821,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
                        uc->id);
 
                ret = udma_alloc_rx_resources(uc);
-               if (ret) {
-                       uc->config.remote_thread_id = -1;
-                       return ret;
-               }
+               if (ret)
+                       goto err_cleanup;
 
                uc->config.src_thread = uc->config.remote_thread_id;
                uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
@@ -1838,7 +1837,9 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
                /* Can not happen */
                dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
                        __func__, uc->id, uc->config.dir);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_cleanup;
+
        }
 
        /* check if the channel configuration was successful */
@@ -1847,7 +1848,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
 
        if (udma_is_chan_running(uc)) {
                dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
-               udma_stop(uc);
+               udma_reset_chan(uc, false);
                if (udma_is_chan_running(uc)) {
                        dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
                        ret = -EBUSY;
@@ -1906,8 +1907,6 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
 
        udma_reset_rings(uc);
 
-       INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
-                                 udma_check_tx_completion);
        return 0;
 
 err_irq_free:
@@ -1919,7 +1918,7 @@ err_psi_free:
 err_res_free:
        udma_free_tx_resources(uc);
        udma_free_rx_resources(uc);
-
+err_cleanup:
        udma_reset_uchan(uc);
 
        if (uc->use_dma_pool) {
@@ -3019,7 +3018,6 @@ static void udma_free_chan_resources(struct dma_chan *chan)
        }
 
        cancel_delayed_work_sync(&uc->tx_drain.work);
-       destroy_delayed_work_on_stack(&uc->tx_drain.work);
 
        if (uc->irq_num_ring > 0) {
                free_irq(uc->irq_num_ring, uc);
@@ -3593,7 +3591,7 @@ static int udma_probe(struct platform_device *pdev)
                return ret;
        }
 
-       ret = of_property_read_u32(navss_node, "ti,udma-atype", &ud->atype);
+       ret = of_property_read_u32(dev->of_node, "ti,udma-atype", &ud->atype);
        if (!ret && ud->atype > 2) {
                dev_err(dev, "Invalid atype: %u\n", ud->atype);
                return -EINVAL;
@@ -3711,6 +3709,7 @@ static int udma_probe(struct platform_device *pdev)
                tasklet_init(&uc->vc.task, udma_vchan_complete,
                             (unsigned long)&uc->vc);
                init_completion(&uc->teardown_completed);
+               INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
        }
 
        ret = dma_async_device_register(&ud->ddev);
index ef90070..6262f63 100644 (file)
@@ -269,6 +269,8 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
 
                if (pvt->model == 0x60)
                        amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
+               else
+                       amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
        } else {
                amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
        }
index e6fc022..3939699 100644 (file)
@@ -278,3 +278,14 @@ config EFI_EARLYCON
        depends on SERIAL_EARLYCON && !ARM && !IA64
        select FONT_SUPPORT
        select ARCH_USE_MEMREMAP_PROT
+
+config EFI_CUSTOM_SSDT_OVERLAYS
+       bool "Load custom ACPI SSDT overlay from an EFI variable"
+       depends on EFI_VARS && ACPI
+       default ACPI_TABLE_UPGRADE
+       help
+         Allow loading of an ACPI SSDT overlay from an EFI variable specified
+         by a kernel command line option.
+
+         See Documentation/admin-guide/acpi/ssdt-overlays.rst for more
+         information.
index c697e70..71c445d 100644 (file)
@@ -52,9 +52,11 @@ static phys_addr_t __init efi_to_phys(unsigned long addr)
 }
 
 static __initdata unsigned long screen_info_table = EFI_INVALID_TABLE_ADDR;
+static __initdata unsigned long cpu_state_table = EFI_INVALID_TABLE_ADDR;
 
 static const efi_config_table_type_t arch_tables[] __initconst = {
        {LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID, &screen_info_table},
+       {LINUX_EFI_ARM_CPU_STATE_TABLE_GUID, &cpu_state_table},
        {}
 };
 
@@ -62,7 +64,8 @@ static void __init init_screen_info(void)
 {
        struct screen_info *si;
 
-       if (screen_info_table != EFI_INVALID_TABLE_ADDR) {
+       if (IS_ENABLED(CONFIG_ARM) &&
+           screen_info_table != EFI_INVALID_TABLE_ADDR) {
                si = early_memremap_ro(screen_info_table, sizeof(*si));
                if (!si) {
                        pr_err("Could not map screen_info config table\n");
@@ -116,7 +119,8 @@ static int __init uefi_init(u64 efi_system_table)
                goto out;
        }
        retval = efi_config_parse_tables(config_tables, systab->nr_tables,
-                                        arch_tables);
+                                        IS_ENABLED(CONFIG_ARM) ? arch_tables
+                                                               : NULL);
 
        early_memunmap(config_tables, table_size);
 out:
@@ -238,9 +242,37 @@ void __init efi_init(void)
 
        init_screen_info();
 
+#ifdef CONFIG_ARM
        /* ARM does not permit early mappings to persist across paging_init() */
-       if (IS_ENABLED(CONFIG_ARM))
-               efi_memmap_unmap();
+       efi_memmap_unmap();
+
+       if (cpu_state_table != EFI_INVALID_TABLE_ADDR) {
+               struct efi_arm_entry_state *state;
+               bool dump_state = true;
+
+               state = early_memremap_ro(cpu_state_table,
+                                         sizeof(struct efi_arm_entry_state));
+               if (state == NULL) {
+                       pr_warn("Unable to map CPU entry state table.\n");
+                       return;
+               }
+
+               if ((state->sctlr_before_ebs & 1) == 0)
+                       pr_warn(FW_BUG "EFI stub was entered with MMU and Dcache disabled, please fix your firmware!\n");
+               else if ((state->sctlr_after_ebs & 1) == 0)
+                       pr_warn(FW_BUG "ExitBootServices() returned with MMU and Dcache disabled, please fix your firmware!\n");
+               else
+                       dump_state = false;
+
+               if (dump_state || efi_enabled(EFI_DBG)) {
+                       pr_info("CPSR at EFI stub entry        : 0x%08x\n", state->cpsr_before_ebs);
+                       pr_info("SCTLR at EFI stub entry       : 0x%08x\n", state->sctlr_before_ebs);
+                       pr_info("CPSR after ExitBootServices() : 0x%08x\n", state->cpsr_after_ebs);
+                       pr_info("SCTLR after ExitBootServices(): 0x%08x\n", state->sctlr_after_ebs);
+               }
+               early_memunmap(state, sizeof(struct efi_arm_entry_state));
+       }
+#endif
 }
 
 static bool efifb_overlaps_pci_range(const struct of_pci_range *range)
index 7f1657b..5114cae 100644 (file)
@@ -189,7 +189,7 @@ static void generic_ops_unregister(void)
        efivars_unregister(&generic_efivars);
 }
 
-#if IS_ENABLED(CONFIG_ACPI)
+#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
 #define EFIVAR_SSDT_NAME_MAX   16
 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
 static int __init efivar_ssdt_setup(char *str)
@@ -622,7 +622,8 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
                        rsv = (void *)(p + prsv % PAGE_SIZE);
 
                        /* reserve the entry itself */
-                       memblock_reserve(prsv, EFI_MEMRESERVE_SIZE(rsv->size));
+                       memblock_reserve(prsv,
+                                        struct_size(rsv, entry, rsv->size));
 
                        for (i = 0; i < atomic_read(&rsv->count); i++) {
                                memblock_reserve(rsv->entry[i].base,
index e3d6926..d591527 100644 (file)
@@ -181,7 +181,7 @@ static int esre_create_sysfs_entry(void *esre, int entry_num)
                rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL,
                                          "entry%d", entry_num);
                if (rc) {
-                       kfree(entry);
+                       kobject_put(&entry->kobj);
                        return rc;
                }
        }
index 75daaf2..4cce372 100644 (file)
@@ -6,7 +6,8 @@
 # enabled, even if doing so doesn't break the build.
 #
 cflags-$(CONFIG_X86_32)                := -march=i386
-cflags-$(CONFIG_X86_64)                := -mcmodel=small
+cflags-$(CONFIG_X86_64)                := -mcmodel=small \
+                                  $(call cc-option,-maccumulate-outgoing-args)
 cflags-$(CONFIG_X86)           += -m$(BITS) -D__KERNEL__ \
                                   -fPIC -fno-strict-aliasing -mno-red-zone \
                                   -mno-mmx -mno-sse -fshort-wchar \
index 40243f5..d08e5d5 100644 (file)
@@ -7,10 +7,49 @@
 
 #include "efistub.h"
 
+static efi_guid_t cpu_state_guid = LINUX_EFI_ARM_CPU_STATE_TABLE_GUID;
+
+struct efi_arm_entry_state *efi_entry_state;
+
+static void get_cpu_state(u32 *cpsr, u32 *sctlr)
+{
+       asm("mrs %0, cpsr" : "=r"(*cpsr));
+       if ((*cpsr & MODE_MASK) == HYP_MODE)
+               asm("mrc p15, 4, %0, c1, c0, 0" : "=r"(*sctlr));
+       else
+               asm("mrc p15, 0, %0, c1, c0, 0" : "=r"(*sctlr));
+}
+
 efi_status_t check_platform_features(void)
 {
+       efi_status_t status;
+       u32 cpsr, sctlr;
        int block;
 
+       get_cpu_state(&cpsr, &sctlr);
+
+       efi_info("Entering in %s mode with MMU %sabled\n",
+                ((cpsr & MODE_MASK) == HYP_MODE) ? "HYP" : "SVC",
+                (sctlr & 1) ? "en" : "dis");
+
+       status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+                            sizeof(*efi_entry_state),
+                            (void **)&efi_entry_state);
+       if (status != EFI_SUCCESS) {
+               efi_err("allocate_pool() failed\n");
+               return status;
+       }
+
+       efi_entry_state->cpsr_before_ebs = cpsr;
+       efi_entry_state->sctlr_before_ebs = sctlr;
+
+       status = efi_bs_call(install_configuration_table, &cpu_state_guid,
+                            efi_entry_state);
+       if (status != EFI_SUCCESS) {
+               efi_err("install_configuration_table() failed\n");
+               goto free_state;
+       }
+
        /* non-LPAE kernels can run anywhere */
        if (!IS_ENABLED(CONFIG_ARM_LPAE))
                return EFI_SUCCESS;
@@ -19,9 +58,22 @@ efi_status_t check_platform_features(void)
        block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
        if (block < 5) {
                efi_err("This LPAE kernel is not supported by your CPU\n");
-               return EFI_UNSUPPORTED;
+               status = EFI_UNSUPPORTED;
+               goto drop_table;
        }
        return EFI_SUCCESS;
+
+drop_table:
+       efi_bs_call(install_configuration_table, &cpu_state_guid, NULL);
+free_state:
+       efi_bs_call(free_pool, efi_entry_state);
+       return status;
+}
+
+void efi_handle_post_ebs_state(void)
+{
+       get_cpu_state(&efi_entry_state->cpsr_after_ebs,
+                     &efi_entry_state->sctlr_after_ebs);
 }
 
 static efi_guid_t screen_info_guid = LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID;
index 7f6a57d..e5bfac7 100644 (file)
@@ -35,13 +35,16 @@ efi_status_t check_platform_features(void)
 }
 
 /*
- * Relocatable kernels can fix up the misalignment with respect to
- * MIN_KIMG_ALIGN, so they only require a minimum alignment of EFI_KIMG_ALIGN
- * (which accounts for the alignment of statically allocated objects such as
- * the swapper stack.)
+ * Although relocatable kernels can fix up the misalignment with respect to
+ * MIN_KIMG_ALIGN, the resulting virtual text addresses are subtly out of
+ * sync with those recorded in the vmlinux when kaslr is disabled but the
+ * image required relocation anyway. Therefore retain 2M alignment unless
+ * KASLR is in use.
  */
-static const u64 min_kimg_align = IS_ENABLED(CONFIG_RELOCATABLE) ? EFI_KIMG_ALIGN
-                                                                : MIN_KIMG_ALIGN;
+static u64 min_kimg_align(void)
+{
+       return efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN;
+}
 
 efi_status_t handle_kernel_image(unsigned long *image_addr,
                                 unsigned long *image_size,
@@ -74,21 +77,21 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
 
        kernel_size = _edata - _text;
        kernel_memsize = kernel_size + (_end - _edata);
-       *reserve_size = kernel_memsize + TEXT_OFFSET % min_kimg_align;
+       *reserve_size = kernel_memsize + TEXT_OFFSET % min_kimg_align();
 
        if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
                /*
                 * If KASLR is enabled, and we have some randomness available,
                 * locate the kernel at a randomized offset in physical memory.
                 */
-               status = efi_random_alloc(*reserve_size, min_kimg_align,
+               status = efi_random_alloc(*reserve_size, min_kimg_align(),
                                          reserve_addr, phys_seed);
        } else {
                status = EFI_OUT_OF_RESOURCES;
        }
 
        if (status != EFI_SUCCESS) {
-               if (IS_ALIGNED((u64)_text - TEXT_OFFSET, min_kimg_align)) {
+               if (IS_ALIGNED((u64)_text - TEXT_OFFSET, min_kimg_align())) {
                        /*
                         * Just execute from wherever we were loaded by the
                         * UEFI PE/COFF loader if the alignment is suitable.
@@ -99,7 +102,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
                }
 
                status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
-                                                   ULONG_MAX, min_kimg_align);
+                                                   ULONG_MAX, min_kimg_align());
 
                if (status != EFI_SUCCESS) {
                        efi_err("Failed to relocate kernel\n");
@@ -108,7 +111,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
                }
        }
 
-       *image_addr = *reserve_addr + TEXT_OFFSET % min_kimg_align;
+       *image_addr = *reserve_addr + TEXT_OFFSET % min_kimg_align();
        memcpy((void *)*image_addr, _text, kernel_size);
 
        return EFI_SUCCESS;
index 89f0752..6bca70b 100644 (file)
@@ -19,7 +19,7 @@
 #include "efistub.h"
 
 bool efi_nochunk;
-bool efi_nokaslr;
+bool efi_nokaslr = !IS_ENABLED(CONFIG_RANDOMIZE_BASE);
 bool efi_noinitrd;
 int efi_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
 bool efi_novamap;
@@ -32,6 +32,10 @@ bool __pure __efi_soft_reserve_enabled(void)
        return !efi_nosoftreserve;
 }
 
+/**
+ * efi_char16_puts() - Write a UCS-2 encoded string to the console
+ * @str:       UCS-2 encoded string
+ */
 void efi_char16_puts(efi_char16_t *str)
 {
        efi_call_proto(efi_table_attr(efi_system_table, con_out),
@@ -83,6 +87,10 @@ u32 utf8_to_utf32(const u8 **s8)
        return c32;
 }
 
+/**
+ * efi_puts() - Write a UTF-8 encoded string to the console
+ * @str:       UTF-8 encoded string
+ */
 void efi_puts(const char *str)
 {
        efi_char16_t buf[128];
@@ -113,6 +121,16 @@ void efi_puts(const char *str)
        }
 }
 
+/**
+ * efi_printk() - Print a kernel message
+ * @fmt:       format string
+ *
+ * The first letter of the format string is used to determine the logging level
+ * of the message. If the level is less then the current EFI logging level, the
+ * message is suppressed. The message will be truncated to 255 bytes.
+ *
+ * Return:     number of printed characters
+ */
 int efi_printk(const char *fmt, ...)
 {
        char printf_buf[256];
@@ -154,13 +172,18 @@ int efi_printk(const char *fmt, ...)
        return printed;
 }
 
-/*
- * Parse the ASCII string 'cmdline' for EFI options, denoted by the efi=
+/**
+ * efi_parse_options() - Parse EFI command line options
+ * @cmdline:   kernel command line
+ *
+ * Parse the ASCII string @cmdline for EFI options, denoted by the efi=
  * option, e.g. efi=nochunk.
  *
  * It should be noted that efi= is parsed in two very different
  * environments, first in the early boot environment of the EFI boot
  * stub, and subsequently during the kernel boot.
+ *
+ * Return:     status code
  */
 efi_status_t efi_parse_options(char const *cmdline)
 {
@@ -286,13 +309,21 @@ char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len)
        return (char *)cmdline_addr;
 }
 
-/*
+/**
+ * efi_exit_boot_services() - Exit boot services
+ * @handle:    handle of the exiting image
+ * @map:       pointer to receive the memory map
+ * @priv:      argument to be passed to @priv_func
+ * @priv_func: function to process the memory map before exiting boot services
+ *
  * Handle calling ExitBootServices according to the requirements set out by the
  * spec.  Obtains the current memory map, and returns that info after calling
  * ExitBootServices.  The client must specify a function to perform any
  * processing of the memory map data prior to ExitBootServices.  A client
  * specific structure may be passed to the function via priv.  The client
  * function may be called multiple times.
+ *
+ * Return:     status code
  */
 efi_status_t efi_exit_boot_services(void *handle,
                                    struct efi_boot_memmap *map,
@@ -361,6 +392,11 @@ fail:
        return status;
 }
 
+/**
+ * get_efi_config_table() - retrieve UEFI configuration table
+ * @guid:      GUID of the configuration table to be retrieved
+ * Return:     pointer to the configuration table or NULL
+ */
 void *get_efi_config_table(efi_guid_t guid)
 {
        unsigned long tables = efi_table_attr(efi_system_table, tables);
@@ -408,17 +444,18 @@ static const struct {
 };
 
 /**
- * efi_load_initrd_dev_path - load the initrd from the Linux initrd device path
+ * efi_load_initrd_dev_path() - load the initrd from the Linux initrd device path
  * @load_addr: pointer to store the address where the initrd was loaded
  * @load_size: pointer to store the size of the loaded initrd
  * @max:       upper limit for the initrd memory allocation
- * @return:    %EFI_SUCCESS if the initrd was loaded successfully, in which
- *             case @load_addr and @load_size are assigned accordingly
- *             %EFI_NOT_FOUND if no LoadFile2 protocol exists on the initrd
- *             device path
- *             %EFI_INVALID_PARAMETER if load_addr == NULL or load_size == NULL
- *             %EFI_OUT_OF_RESOURCES if memory allocation failed
- *             %EFI_LOAD_ERROR in all other cases
+ *
+ * Return:
+ * * %EFI_SUCCESS if the initrd was loaded successfully, in which
+ *   case @load_addr and @load_size are assigned accordingly
+ * * %EFI_NOT_FOUND if no LoadFile2 protocol exists on the initrd device path
+ * * %EFI_INVALID_PARAMETER if load_addr == NULL or load_size == NULL
+ * * %EFI_OUT_OF_RESOURCES if memory allocation failed
+ * * %EFI_LOAD_ERROR in all other cases
  */
 static
 efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr,
@@ -481,6 +518,16 @@ efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image,
                                    load_addr, load_size);
 }
 
+/**
+ * efi_load_initrd() - Load initial RAM disk
+ * @image:     EFI loaded image protocol
+ * @load_addr: pointer to loaded initrd
+ * @load_size: size of loaded initrd
+ * @soft_limit:        preferred size of allocated memory for loading the initrd
+ * @hard_limit:        minimum size of allocated memory
+ *
+ * Return:     status code
+ */
 efi_status_t efi_load_initrd(efi_loaded_image_t *image,
                             unsigned long *load_addr,
                             unsigned long *load_size,
@@ -505,6 +552,15 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image,
        return status;
 }
 
+/**
+ * efi_wait_for_key() - Wait for key stroke
+ * @usec:      number of microseconds to wait for key stroke
+ * @key:       key entered
+ *
+ * Wait for up to @usec microseconds for a key stroke.
+ *
+ * Return:     status code, EFI_SUCCESS if key received
+ */
 efi_status_t efi_wait_for_key(unsigned long usec, efi_input_key_t *key)
 {
        efi_event_t events[2], timer;
index e97370b..3318ec3 100644 (file)
@@ -329,6 +329,9 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
        if (status != EFI_SUCCESS)
                goto fail_free_initrd;
 
+       if (IS_ENABLED(CONFIG_ARM))
+               efi_handle_post_ebs_state();
+
        efi_enter_kernel(image_addr, fdt_addr, fdt_totalsize((void *)fdt_addr));
        /* not reached */
 
index bcd8c0a..2c9d422 100644 (file)
@@ -157,8 +157,14 @@ typedef void (__efiapi *efi_event_notify_t)(efi_event_t, void *);
 #define EFI_EVT_NOTIFY_WAIT    0x00000100U
 #define EFI_EVT_NOTIFY_SIGNAL  0x00000200U
 
-/*
- * boottime->wait_for_event takes an array of events as input.
+/**
+ * efi_set_event_at() - add event to events array
+ *
+ * @events:    array of UEFI events
+ * @ids:       index where to put the event in the array
+ * @event:     event to add to the aray
+ *
+ * boottime->wait_for_event() takes an array of events as input.
  * Provide a helper to set it up correctly for mixed mode.
  */
 static inline
@@ -771,4 +777,6 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image,
                             unsigned long soft_limit,
                             unsigned long hard_limit);
 
+void efi_handle_post_ebs_state(void);
+
 #endif
index 2005e33..630caa6 100644 (file)
@@ -102,12 +102,20 @@ static int find_file_option(const efi_char16_t *cmdline, int cmdline_len,
        if (!found)
                return 0;
 
+       /* Skip any leading slashes */
+       while (cmdline[i] == L'/' || cmdline[i] == L'\\')
+               i++;
+
        while (--result_len > 0 && i < cmdline_len) {
-               if (cmdline[i] == L'\0' ||
-                   cmdline[i] == L'\n' ||
-                   cmdline[i] == L' ')
+               efi_char16_t c = cmdline[i++];
+
+               if (c == L'\0' || c == L'\n' || c == L' ')
                        break;
-               *result++ = cmdline[i++];
+               else if (c == L'/')
+                       /* Replace UNIX dir separators with EFI standard ones */
+                       *result++ = L'\\';
+               else
+                       *result++ = c;
        }
        *result = L'\0';
        return i;
index a700b3c..159fb4e 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 
 #include <linux/ctype.h>
+#include <linux/string.h>
 #include <linux/types.h>
 
 char *skip_spaces(const char *str)
index 873841a..3d6ba42 100644 (file)
@@ -157,8 +157,10 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
 
        cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups),
                             GFP_KERNEL);
-       if (!cpu_groups)
+       if (!cpu_groups) {
+               free_cpumask_var(tmp);
                return -ENOMEM;
+       }
 
        cpumask_copy(tmp, cpu_online_mask);
 
@@ -167,6 +169,7 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
                        topology_core_cpumask(cpumask_any(tmp));
 
                if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) {
+                       free_cpumask_var(tmp);
                        free_cpu_groups(num_groups, &cpu_groups);
                        return -ENOMEM;
                }
@@ -196,13 +199,12 @@ static int hotplug_tests(void)
        if (!page_buf)
                goto out_free_cpu_groups;
 
-       err = 0;
        /*
         * Of course the last CPU cannot be powered down and cpu_down() should
         * refuse doing that.
         */
        pr_info("Trying to turn off and on again all CPUs\n");
-       err += down_and_up_cpus(cpu_online_mask, offlined_cpus);
+       err = down_and_up_cpus(cpu_online_mask, offlined_cpus);
 
        /*
         * Take down CPUs by cpu group this time. When the last CPU is turned
index ef80988..625c8fd 100644 (file)
@@ -181,6 +181,7 @@ EXPORT_SYMBOL_GPL(rpi_firmware_property);
 static void
 rpi_firmware_print_firmware_revision(struct rpi_firmware *fw)
 {
+       time64_t date_and_time;
        u32 packet;
        int ret = rpi_firmware_property(fw,
                                        RPI_FIRMWARE_GET_FIRMWARE_REVISION,
@@ -189,7 +190,9 @@ rpi_firmware_print_firmware_revision(struct rpi_firmware *fw)
        if (ret)
                return;
 
-       dev_info(fw->cl.dev, "Attached to firmware from %ptT\n", &packet);
+       /* This is not compatible with y2038 */
+       date_and_time = packet;
+       dev_info(fw->cl.dev, "Attached to firmware from %ptT\n", &date_and_time);
 }
 
 static void
index b2408a7..7cd5a29 100644 (file)
@@ -208,7 +208,7 @@ config FPGA_DFL_PCI
 
 config FPGA_MGR_ZYNQMP_FPGA
        tristate "Xilinx ZynqMP FPGA"
-       depends on ARCH_ZYNQMP || COMPILE_TEST
+       depends on ZYNQMP_FIRMWARE || (!ZYNQMP_FIRMWARE && COMPILE_TEST)
        help
          FPGA manager driver support for Xilinx ZynqMP FPGAs.
          This driver uses the processor configuration port(PCAP)
index 5640efe..5bda38e 100644 (file)
@@ -64,6 +64,7 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset)
                ret = pm_runtime_get_sync(chip->parent);
                if (ret < 0) {
                        dev_err(chip->parent, "Failed to resume: %d\n", ret);
+                       pm_runtime_put_autosuspend(chip->parent);
                        return ret;
                }
 
@@ -72,12 +73,15 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset)
                if (ret < 0) {
                        dev_err(chip->parent, "Failed to drop cache: %d\n",
                                ret);
+                       pm_runtime_put_autosuspend(chip->parent);
                        return ret;
                }
 
                ret = regmap_read(arizona->regmap, reg, &val);
-               if (ret < 0)
+               if (ret < 0) {
+                       pm_runtime_put_autosuspend(chip->parent);
                        return ret;
+               }
 
                pm_runtime_mark_last_busy(chip->parent);
                pm_runtime_put_autosuspend(chip->parent);
@@ -106,6 +110,7 @@ static int arizona_gpio_direction_out(struct gpio_chip *chip,
                ret = pm_runtime_get_sync(chip->parent);
                if (ret < 0) {
                        dev_err(chip->parent, "Failed to resume: %d\n", ret);
+                       pm_runtime_put(chip->parent);
                        return ret;
                }
        }
index 1fca8dd..a3b9bde 100644 (file)
@@ -107,6 +107,84 @@ static const struct i2c_device_id pca953x_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, pca953x_id);
 
+#ifdef CONFIG_GPIO_PCA953X_IRQ
+
+#include <linux/dmi.h>
+#include <linux/gpio.h>
+#include <linux/list.h>
+
+static const struct dmi_system_id pca953x_dmi_acpi_irq_info[] = {
+       {
+               /*
+                * On Intel Galileo Gen 2 board the IRQ pin of one of
+                * the I²C GPIO expanders, which has GpioInt() resource,
+                * is provided as an absolute number instead of being
+                * relative. Since first controller (gpio-sch.c) and
+                * second (gpio-dwapb.c) are at the fixed bases, we may
+                * safely refer to the number in the global space to get
+                * an IRQ out of it.
+                */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
+               },
+       },
+       {}
+};
+
+#ifdef CONFIG_ACPI
+static int pca953x_acpi_get_pin(struct acpi_resource *ares, void *data)
+{
+       struct acpi_resource_gpio *agpio;
+       int *pin = data;
+
+       if (acpi_gpio_get_irq_resource(ares, &agpio))
+               *pin = agpio->pin_table[0];
+       return 1;
+}
+
+static int pca953x_acpi_find_pin(struct device *dev)
+{
+       struct acpi_device *adev = ACPI_COMPANION(dev);
+       int pin = -ENOENT, ret;
+       LIST_HEAD(r);
+
+       ret = acpi_dev_get_resources(adev, &r, pca953x_acpi_get_pin, &pin);
+       acpi_dev_free_resource_list(&r);
+       if (ret < 0)
+               return ret;
+
+       return pin;
+}
+#else
+static inline int pca953x_acpi_find_pin(struct device *dev) { return -ENXIO; }
+#endif
+
+static int pca953x_acpi_get_irq(struct device *dev)
+{
+       int pin, ret;
+
+       pin = pca953x_acpi_find_pin(dev);
+       if (pin < 0)
+               return pin;
+
+       dev_info(dev, "Applying ACPI interrupt quirk (GPIO %d)\n", pin);
+
+       if (!gpio_is_valid(pin))
+               return -EINVAL;
+
+       ret = gpio_request(pin, "pca953x interrupt");
+       if (ret)
+               return ret;
+
+       ret = gpio_to_irq(pin);
+
+       /* When pin is used as an IRQ, no need to keep it requested */
+       gpio_free(pin);
+
+       return ret;
+}
+#endif
+
 static const struct acpi_device_id pca953x_acpi_ids[] = {
        { "INT3491", 16 | PCA953X_TYPE | PCA_LATCH_INT, },
        { }
@@ -322,6 +400,7 @@ static const struct regmap_config pca953x_ai_i2c_regmap = {
        .writeable_reg = pca953x_writeable_register,
        .volatile_reg = pca953x_volatile_register,
 
+       .disable_locking = true,
        .cache_type = REGCACHE_RBTREE,
        .max_register = 0x7f,
 };
@@ -623,8 +702,6 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
        DECLARE_BITMAP(reg_direction, MAX_LINE);
        int level;
 
-       pca953x_read_regs(chip, chip->regs->direction, reg_direction);
-
        if (chip->driver_data & PCA_PCAL) {
                /* Enable latch on interrupt-enabled inputs */
                pca953x_write_regs(chip, PCAL953X_IN_LATCH, chip->irq_mask);
@@ -635,7 +712,11 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
                pca953x_write_regs(chip, PCAL953X_INT_MASK, irq_mask);
        }
 
+       /* Switch direction to input if needed */
+       pca953x_read_regs(chip, chip->regs->direction, reg_direction);
+
        bitmap_or(irq_mask, chip->irq_trig_fall, chip->irq_trig_raise, gc->ngpio);
+       bitmap_complement(reg_direction, reg_direction, gc->ngpio);
        bitmap_and(irq_mask, irq_mask, reg_direction, gc->ngpio);
 
        /* Look for any newly setup interrupt */
@@ -734,14 +815,16 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid)
        struct gpio_chip *gc = &chip->gpio_chip;
        DECLARE_BITMAP(pending, MAX_LINE);
        int level;
+       bool ret;
 
-       if (!pca953x_irq_pending(chip, pending))
-               return IRQ_NONE;
+       mutex_lock(&chip->i2c_lock);
+       ret = pca953x_irq_pending(chip, pending);
+       mutex_unlock(&chip->i2c_lock);
 
        for_each_set_bit(level, pending, gc->ngpio)
                handle_nested_irq(irq_find_mapping(gc->irq.domain, level));
 
-       return IRQ_HANDLED;
+       return IRQ_RETVAL(ret);
 }
 
 static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base)
@@ -752,6 +835,12 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base)
        DECLARE_BITMAP(irq_stat, MAX_LINE);
        int ret;
 
+       if (dmi_first_match(pca953x_dmi_acpi_irq_info)) {
+               ret = pca953x_acpi_get_irq(&client->dev);
+               if (ret > 0)
+                       client->irq = ret;
+       }
+
        if (!client->irq)
                return 0;
 
index c687432..29f767e 100644 (file)
@@ -2036,3 +2036,20 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
        return 0;
 }
 
+int amdgpu_atombios_get_data_table(struct amdgpu_device *adev,
+                                  uint32_t table,
+                                  uint16_t *size,
+                                  uint8_t *frev,
+                                  uint8_t *crev,
+                                  uint8_t **addr)
+{
+       uint16_t data_start;
+
+       if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
+                                          size, frev, crev, &data_start))
+               return -EINVAL;
+
+       *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
+
+       return 0;
+}
index fd8f180..1321ec0 100644 (file)
@@ -216,6 +216,13 @@ int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
                              u8 voltage_type,
                              u8 *svd_gpio_id, u8 *svc_gpio_id);
 
+int amdgpu_atombios_get_data_table(struct amdgpu_device *adev,
+                                  uint32_t table,
+                                  uint16_t *size,
+                                  uint8_t *frev,
+                                  uint8_t *crev,
+                                  uint8_t **addr);
+
 void amdgpu_atombios_fini(struct amdgpu_device *adev);
 int amdgpu_atombios_init(struct amdgpu_device *adev);
 
index a3fa156..193ffdb 100644 (file)
@@ -1073,6 +1073,57 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
 }
 
 
+/**
+ * amdgpu_debugfs_regs_gfxoff_status - read gfxoff status
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos:  Offset to seek to
+ */
+static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
+                                        size_t size, loff_t *pos)
+{
+       struct amdgpu_device *adev = file_inode(f)->i_private;
+       ssize_t result = 0;
+       int r;
+
+       if (size & 0x3 || *pos & 0x3)
+               return -EINVAL;
+
+       r = pm_runtime_get_sync(adev->ddev->dev);
+       if (r < 0)
+               return r;
+
+       while (size) {
+               uint32_t value;
+
+               r = amdgpu_get_gfx_off_status(adev, &value);
+               if (r) {
+                       pm_runtime_mark_last_busy(adev->ddev->dev);
+                       pm_runtime_put_autosuspend(adev->ddev->dev);
+                       return r;
+               }
+
+               r = put_user(value, (uint32_t *)buf);
+               if (r) {
+                       pm_runtime_mark_last_busy(adev->ddev->dev);
+                       pm_runtime_put_autosuspend(adev->ddev->dev);
+                       return r;
+               }
+
+               result += 4;
+               buf += 4;
+               *pos += 4;
+               size -= 4;
+       }
+
+       pm_runtime_mark_last_busy(adev->ddev->dev);
+       pm_runtime_put_autosuspend(adev->ddev->dev);
+
+       return result;
+}
+
 static const struct file_operations amdgpu_debugfs_regs_fops = {
        .owner = THIS_MODULE,
        .read = amdgpu_debugfs_regs_read,
@@ -1123,7 +1174,9 @@ static const struct file_operations amdgpu_debugfs_gpr_fops = {
 
 static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
        .owner = THIS_MODULE,
+       .read = amdgpu_debugfs_gfxoff_read,
        .write = amdgpu_debugfs_gfxoff_write,
+       .llseek = default_llseek
 };
 
 static const struct file_operations *debugfs_regs[] = {
index 7f3cd71..aa27fe6 100644 (file)
@@ -425,6 +425,7 @@ struct amdgpu_pm {
        u32                     default_sclk;
        u32                     default_mclk;
        struct amdgpu_i2c_chan *i2c_bus;
+       bool                    bus_locked;
        /* internal thermal controller on rv6xx+ */
        enum amdgpu_int_thermal_type int_thermal_type;
        struct device           *int_hwmon_dev;
index 2eacf1f..26127c7 100644 (file)
@@ -1186,7 +1186,8 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
         * unfortunately we can't detect certain
         * hypervisors so just do this all the time.
         */
-       adev->mp1_state = PP_MP1_STATE_UNLOAD;
+       if (!amdgpu_passthrough(adev))
+               adev->mp1_state = PP_MP1_STATE_UNLOAD;
        amdgpu_device_ip_suspend(adev);
        adev->mp1_state = PP_MP1_STATE_NONE;
 }
index d612033..78d37f9 100644 (file)
@@ -578,6 +578,20 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
        mutex_unlock(&adev->gfx.gfx_off_mutex);
 }
 
+int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
+{
+
+       int r = 0;
+
+       mutex_lock(&adev->gfx.gfx_off_mutex);
+
+       r = smu_get_status_gfxoff(adev, value);
+
+       mutex_unlock(&adev->gfx.gfx_off_mutex);
+
+       return r;
+}
+
 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
 {
        int r;
index 50be8e3..1e7a2b0 100644 (file)
@@ -378,6 +378,7 @@ void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
                                    int pipe, int queue);
 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
+int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value);
 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev);
 void amdgpu_gfx_ras_fini(struct amdgpu_device *adev);
 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
index 20f39aa..5f20cad 100644 (file)
@@ -796,8 +796,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
                tmp_str++;
        while (isspace(*++tmp_str));
 
-       while (tmp_str[0]) {
-               sub_str = strsep(&tmp_str, delimiter);
+       while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
                ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
                if (ret)
                        return -EINVAL;
@@ -1067,8 +1066,7 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
        memcpy(buf_cpy, buf, bytes);
        buf_cpy[bytes] = '\0';
        tmp = buf_cpy;
-       while (tmp[0]) {
-               sub_str = strsep(&tmp, delimiter);
+       while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
                if (strlen(sub_str)) {
                        ret = kstrtol(sub_str, 0, &level);
                        if (ret)
@@ -1697,8 +1695,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
                        i++;
                memcpy(buf_cpy, buf, count-i);
                tmp_str = buf_cpy;
-               while (tmp_str[0]) {
-                       sub_str = strsep(&tmp_str, delimiter);
+               while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
                        ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
                        if (ret)
                                return -EINVAL;
index aa80cf7..fe7d39b 100644 (file)
@@ -500,7 +500,6 @@ static int psp_asd_load(struct psp_context *psp)
         * TODO: add version check to make it common
         */
        if (amdgpu_sriov_vf(psp->adev) ||
-           (psp->adev->asic_type == CHIP_SIENNA_CICHLID) ||
            (psp->adev->asic_type == CHIP_NAVY_FLOUNDER))
                return 0;
 
@@ -1988,7 +1987,7 @@ static int psp_suspend(void *handle)
 
        ret = psp_tmr_terminate(psp);
        if (ret) {
-               DRM_ERROR("Failed to terminate tmr\n");
+               DRM_ERROR("Falied to terminate tmr\n");
                return ret;
        }
 
index 7e8647a..9e7d640 100644 (file)
@@ -47,7 +47,6 @@ struct amdgpu_ras_eeprom_control {
        uint32_t next_addr;
        unsigned int num_recs;
        struct mutex tbl_mutex;
-       bool bus_locked;
        uint32_t tbl_byte_sum;
        uint16_t i2c_address; // 8-bit represented address
 };
index fcff567..e11c5d6 100644 (file)
@@ -1292,7 +1292,7 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
  * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
  * ttm_tt_destroy().
  */
-static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
+static void amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -1303,14 +1303,13 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
                amdgpu_ttm_tt_unpin_userptr(ttm);
 
        if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
-               return 0;
+               return;
 
        /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
        r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
        if (r)
                DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
                          gtt->ttm.ttm.num_pages, gtt->offset);
-       return r;
 }
 
 static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
index a777d58..495c3d7 100644 (file)
@@ -60,7 +60,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
 
 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
 {
-       unsigned long bo_size, fw_shared_bo_size;
+       unsigned long bo_size;
        const char *fw_name;
        const struct common_firmware_header *hdr;
        unsigned char fw_check;
@@ -176,6 +176,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
        bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
                bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
+       bo_size += AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
 
        for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
                if (adev->vcn.harvest_config & (1 << i))
@@ -189,6 +190,11 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
                        return r;
                }
 
+               adev->vcn.inst[i].fw_shared_cpu_addr = adev->vcn.inst[i].cpu_addr +
+                               bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
+               adev->vcn.inst[i].fw_shared_gpu_addr = adev->vcn.inst[i].gpu_addr +
+                               bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
+
                if (adev->vcn.indirect_sram) {
                        r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
                                        AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
@@ -198,17 +204,6 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
                                return r;
                        }
                }
-
-               r = amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)),
-                               PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].fw_shared_bo,
-                               &adev->vcn.inst[i].fw_shared_gpu_addr, &adev->vcn.inst[i].fw_shared_cpu_addr);
-               if (r) {
-                       dev_err(adev->dev, "VCN %d (%d) failed to allocate firmware shared bo\n", i, r);
-                       return r;
-               }
-
-               fw_shared_bo_size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
-               adev->vcn.inst[i].saved_shm_bo = kvmalloc(fw_shared_bo_size, GFP_KERNEL);
        }
 
        return 0;
@@ -224,11 +219,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
                if (adev->vcn.harvest_config & (1 << j))
                        continue;
 
-               kvfree(adev->vcn.inst[j].saved_shm_bo);
-               amdgpu_bo_free_kernel(&adev->vcn.inst[j].fw_shared_bo,
-                                         &adev->vcn.inst[j].fw_shared_gpu_addr,
-                                         (void **)&adev->vcn.inst[j].fw_shared_cpu_addr);
-
                if (adev->vcn.indirect_sram) {
                        amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
                                                  &adev->vcn.inst[j].dpg_sram_gpu_addr,
@@ -274,17 +264,6 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
                        return -ENOMEM;
 
                memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
-
-               if (adev->vcn.inst[i].fw_shared_bo == NULL)
-                       return 0;
-
-               if (!adev->vcn.inst[i].saved_shm_bo)
-                       return -ENOMEM;
-
-               size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
-               ptr = adev->vcn.inst[i].fw_shared_cpu_addr;
-
-               memcpy_fromio(adev->vcn.inst[i].saved_shm_bo, ptr, size);
        }
        return 0;
 }
@@ -322,17 +301,6 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
                        }
                        memset_io(ptr, 0, size);
                }
-
-               if (adev->vcn.inst[i].fw_shared_bo == NULL)
-                       return -EINVAL;
-
-               size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
-               ptr = adev->vcn.inst[i].fw_shared_cpu_addr;
-
-               if (adev->vcn.inst[i].saved_shm_bo != NULL)
-                       memcpy_toio(ptr, adev->vcn.inst[i].saved_shm_bo, size);
-               else
-                       memset_io(ptr, 0, size);
        }
        return 0;
 }
index e125e8b..7a9b804 100644 (file)
@@ -199,7 +199,6 @@ struct amdgpu_vcn_inst {
        struct amdgpu_irq_src   irq;
        struct amdgpu_vcn_reg   external;
        struct amdgpu_bo        *dpg_sram_bo;
-       struct amdgpu_bo        *fw_shared_bo;
        struct dpg_pause_state  pause_state;
        void                    *dpg_sram_cpu_addr;
        uint64_t                dpg_sram_gpu_addr;
@@ -207,7 +206,6 @@ struct amdgpu_vcn_inst {
        atomic_t                dpg_enc_submission_cnt;
        void                    *fw_shared_cpu_addr;
        uint64_t                fw_shared_gpu_addr;
-       void                    *saved_shm_bo;
 };
 
 struct amdgpu_vcn {
index fdabaf0..350f1bf 100644 (file)
 static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
 
 /**
+ * force_update_wptr_for_self_int - Force update the wptr for self interrupt
+ *
+ * @adev: amdgpu_device pointer
+ * @threshold: threshold to trigger the wptr reporting
+ * @timeout: timeout to trigger the wptr reporting
+ * @enabled: Enable/disable timeout flush mechanism
+ *
+ * threshold input range: 0 ~ 15, default 0,
+ * real_threshold = 2^threshold
+ * timeout input range: 0 ~ 20, default 8,
+ * real_timeout = (2^timeout) * 1024 / (socclk_freq)
+ *
+ * Force update wptr for self interrupt ( >= SIENNA_CICHLID).
+ */
+static void
+force_update_wptr_for_self_int(struct amdgpu_device *adev,
+                              u32 threshold, u32 timeout, bool enabled)
+{
+       u32 ih_cntl, ih_rb_cntl;
+
+       if (adev->asic_type < CHIP_SIENNA_CICHLID)
+               return;
+
+       ih_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_CNTL2);
+       ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+
+       ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
+                               SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT, timeout);
+       ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
+                               SELF_IV_FORCE_WPTR_UPDATE_ENABLE, enabled);
+       ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
+                                  RB_USED_INT_THRESHOLD, threshold);
+
+       WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+       ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+       ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
+                                  RB_USED_INT_THRESHOLD, threshold);
+       WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+       WREG32_SOC15(OSSSYS, 0, mmIH_CNTL2, ih_cntl);
+}
+
+/**
  * navi10_ih_enable_interrupts - Enable the interrupt ring buffer
  *
  * @adev: amdgpu_device pointer
@@ -371,6 +413,8 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
 
        /* enable interrupts */
        navi10_ih_enable_interrupts(adev);
+       /* enable wptr force update for self int */
+       force_update_wptr_for_self_int(adev, 0, 8, true);
 
        return 0;
 }
@@ -384,6 +428,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
  */
 static void navi10_ih_irq_disable(struct amdgpu_device *adev)
 {
+       force_update_wptr_for_self_int(adev, 0, 8, false);
        navi10_ih_disable_interrupts(adev);
 
        /* Wait and acknowledge irq */
index 479991b..ea69ae7 100644 (file)
@@ -446,6 +446,9 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
        adev->nbio.funcs = &nbio_v2_3_funcs;
        adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
 
+       if (adev->asic_type == CHIP_SIENNA_CICHLID)
+               adev->gmc.xgmi.supported = true;
+
        /* Set IP register base before any HW register access */
        r = nv_reg_base_init(adev);
        if (r)
index 77f9981..d488d25 100644 (file)
@@ -56,7 +56,7 @@ MODULE_FIRMWARE("amdgpu/arcturus_sos.bin");
 MODULE_FIRMWARE("amdgpu/arcturus_asd.bin");
 MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
 MODULE_FIRMWARE("amdgpu/sienna_cichlid_sos.bin");
-MODULE_FIRMWARE("amdgpu/sienna_cichlid_asd.bin");
+MODULE_FIRMWARE("amdgpu/sienna_cichlid_ta.bin");
 MODULE_FIRMWARE("amdgpu/navy_flounder_sos.bin");
 MODULE_FIRMWARE("amdgpu/navy_flounder_asd.bin");
 
@@ -179,6 +179,10 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
                }
                break;
        case CHIP_SIENNA_CICHLID:
+               err = psp_init_ta_microcode(&adev->psp, chip_name);
+               if (err)
+                       return err;
+               break;
        case CHIP_NAVY_FLOUNDER:
                break;
        default:
index 9bffbab..d55bf64 100644 (file)
@@ -32,7 +32,6 @@
 #include "amdgpu_amdkfd.h"
 #include <linux/i2c.h>
 #include <linux/pci.h>
-#include "amdgpu_ras.h"
 
 /* error codes */
 #define I2C_OK                0
@@ -537,12 +536,12 @@ Fail:
        return false;
 }
 
-/***************************** EEPROM I2C GLUE ****************************/
+/***************************** I2C GLUE ****************************/
 
-static uint32_t smu_v11_0_i2c_eeprom_read_data(struct i2c_adapter *control,
-                                              uint8_t address,
-                                              uint8_t *data,
-                                              uint32_t numbytes)
+static uint32_t smu_v11_0_i2c_read_data(struct i2c_adapter *control,
+                                       uint8_t address,
+                                       uint8_t *data,
+                                       uint32_t numbytes)
 {
        uint32_t  ret = 0;
 
@@ -562,10 +561,10 @@ Fail:
        return ret;
 }
 
-static uint32_t smu_v11_0_i2c_eeprom_write_data(struct i2c_adapter *control,
-                                               uint8_t address,
-                                               uint8_t *data,
-                                               uint32_t numbytes)
+static uint32_t smu_v11_0_i2c_write_data(struct i2c_adapter *control,
+                                        uint8_t address,
+                                        uint8_t *data,
+                                        uint32_t numbytes)
 {
        uint32_t  ret;
 
@@ -592,14 +591,13 @@ static uint32_t smu_v11_0_i2c_eeprom_write_data(struct i2c_adapter *control,
 static void lock_bus(struct i2c_adapter *i2c, unsigned int flags)
 {
        struct amdgpu_device *adev = to_amdgpu_device(i2c);
-       struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
 
        if (!smu_v11_0_i2c_bus_lock(i2c)) {
                DRM_ERROR("Failed to lock the bus from SMU");
                return;
        }
 
-       control->bus_locked = true;
+       adev->pm.bus_locked = true;
 }
 
 static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags)
@@ -611,14 +609,13 @@ static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags)
 static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags)
 {
        struct amdgpu_device *adev = to_amdgpu_device(i2c);
-       struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
 
        if (!smu_v11_0_i2c_bus_unlock(i2c)) {
                DRM_ERROR("Failed to unlock the bus from SMU");
                return;
        }
 
-       control->bus_locked = false;
+       adev->pm.bus_locked = false;
 }
 
 static const struct i2c_lock_operations smu_v11_0_i2c_i2c_lock_ops = {
@@ -627,14 +624,13 @@ static const struct i2c_lock_operations smu_v11_0_i2c_i2c_lock_ops = {
        .unlock_bus = unlock_bus,
 };
 
-static int smu_v11_0_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
+static int smu_v11_0_i2c_xfer(struct i2c_adapter *i2c_adap,
                              struct i2c_msg *msgs, int num)
 {
        int i, ret;
        struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
-       struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
 
-       if (!control->bus_locked) {
+       if (!adev->pm.bus_locked) {
                DRM_ERROR("I2C bus unlocked, stopping transaction!");
                return -EIO;
        }
@@ -643,13 +639,13 @@ static int smu_v11_0_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
 
        for (i = 0; i < num; i++) {
                if (msgs[i].flags & I2C_M_RD)
-                       ret = smu_v11_0_i2c_eeprom_read_data(i2c_adap,
-                                                       (uint8_t)msgs[i].addr,
-                                                       msgs[i].buf, msgs[i].len);
+                       ret = smu_v11_0_i2c_read_data(i2c_adap,
+                                                     (uint8_t)msgs[i].addr,
+                                                     msgs[i].buf, msgs[i].len);
                else
-                       ret = smu_v11_0_i2c_eeprom_write_data(i2c_adap,
-                                                        (uint8_t)msgs[i].addr,
-                                                        msgs[i].buf, msgs[i].len);
+                       ret = smu_v11_0_i2c_write_data(i2c_adap,
+                                                      (uint8_t)msgs[i].addr,
+                                                      msgs[i].buf, msgs[i].len);
 
                if (ret != I2C_OK) {
                        num = -EIO;
@@ -661,18 +657,18 @@ static int smu_v11_0_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
        return num;
 }
 
-static u32 smu_v11_0_i2c_eeprom_i2c_func(struct i2c_adapter *adap)
+static u32 smu_v11_0_i2c_func(struct i2c_adapter *adap)
 {
        return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
 }
 
 
-static const struct i2c_algorithm smu_v11_0_i2c_eeprom_i2c_algo = {
-       .master_xfer = smu_v11_0_i2c_eeprom_i2c_xfer,
-       .functionality = smu_v11_0_i2c_eeprom_i2c_func,
+static const struct i2c_algorithm smu_v11_0_i2c_algo = {
+       .master_xfer = smu_v11_0_i2c_xfer,
+       .functionality = smu_v11_0_i2c_func,
 };
 
-int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control)
+int smu_v11_0_i2c_control_init(struct i2c_adapter *control)
 {
        struct amdgpu_device *adev = to_amdgpu_device(control);
        int res;
@@ -680,8 +676,8 @@ int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control)
        control->owner = THIS_MODULE;
        control->class = I2C_CLASS_SPD;
        control->dev.parent = &adev->pdev->dev;
-       control->algo = &smu_v11_0_i2c_eeprom_i2c_algo;
-       snprintf(control->name, sizeof(control->name), "AMDGPU EEPROM");
+       control->algo = &smu_v11_0_i2c_algo;
+       snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
        control->lock_ops = &smu_v11_0_i2c_i2c_lock_ops;
 
        res = i2c_add_adapter(control);
@@ -691,7 +687,7 @@ int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control)
        return res;
 }
 
-void smu_v11_0_i2c_eeprom_control_fini(struct i2c_adapter *control)
+void smu_v11_0_i2c_control_fini(struct i2c_adapter *control)
 {
        i2c_del_adapter(control);
 }
@@ -719,9 +715,9 @@ bool smu_v11_0_i2c_test_bus(struct i2c_adapter *control)
        smu_v11_0_i2c_init(control);
 
        /* Write 0xde to address 0x0000 on the EEPROM */
-       ret = smu_v11_0_i2c_eeprom_write_data(control, I2C_TARGET_ADDR, data, 6);
+       ret = smu_v11_0_i2c_write_data(control, I2C_TARGET_ADDR, data, 6);
 
-       ret = smu_v11_0_i2c_eeprom_read_data(control, I2C_TARGET_ADDR, data, 6);
+       ret = smu_v11_0_i2c_read_data(control, I2C_TARGET_ADDR, data, 6);
 
        smu_v11_0_i2c_fini(control);
 
index 973f28d..44467c0 100644 (file)
@@ -28,7 +28,7 @@
 
 struct i2c_adapter;
 
-int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control);
-void smu_v11_0_i2c_eeprom_control_fini(struct i2c_adapter *control);
+int smu_v11_0_i2c_control_init(struct i2c_adapter *control);
+void smu_v11_0_i2c_control_fini(struct i2c_adapter *control);
 
 #endif
index d822415..533913b 100644 (file)
@@ -1376,9 +1376,9 @@ static int dm_late_init(void *handle)
        unsigned int linear_lut[16];
        int i;
        struct dmcu *dmcu = NULL;
-       bool ret;
+       bool ret = true;
 
-       if (!adev->dm.fw_dmcu)
+       if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
                return detect_mst_link_for_all_connectors(adev->ddev);
 
        dmcu = adev->dm.dc->res_pool->dmcu;
@@ -1397,7 +1397,14 @@ static int dm_late_init(void *handle)
         */
        params.min_abm_backlight = 0x28F;
 
-       ret = dmcu_load_iram(dmcu, params);
+       /* In the case where abm is implemented on dmcub,
+        * dmcu object will be null.
+        * ABM 2.4 and up are implemented on dmcub.
+        */
+       if (dmcu)
+               ret = dmcu_load_iram(dmcu, params);
+       else if (adev->dm.dc->ctx->dmub_srv)
+               ret = dmub_init_abm_config(adev->dm.dc->res_pool->abm, params);
 
        if (!ret)
                return -EINVAL;
@@ -1486,23 +1493,12 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
                return 0;
        }
 
-       mutex_lock(&smu->mutex);
-
-       /* pass data to smu controller */
-       if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
-                       !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
-               ret = smu_write_watermarks_table(smu);
-
-               if (ret) {
-                       mutex_unlock(&smu->mutex);
-                       DRM_ERROR("Failed to update WMTABLE!\n");
-                       return ret;
-               }
-               smu->watermarks_bitmap |= WATERMARKS_LOADED;
+       ret = smu_write_watermarks_table(smu);
+       if (ret) {
+               DRM_ERROR("Failed to update WMTABLE!\n");
+               return ret;
        }
 
-       mutex_unlock(&smu->mutex);
-
        return 0;
 }
 
@@ -4546,7 +4542,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 #if defined(CONFIG_DRM_AMD_DC_DCN)
                dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
                                      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
-                                     aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
+                                     aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
                                      &dsc_caps);
 #endif
                link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
@@ -6235,7 +6231,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
        aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
 
        if (connector_type == DRM_MODE_CONNECTOR_eDP &&
-           dc_is_dmcu_initialized(adev->dm.dc)) {
+           (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
                drm_object_attach_property(&aconnector->base.base,
                                adev->mode_info.abm_level_property, 0);
        }
@@ -8471,7 +8467,7 @@ cleanup:
        *out_type = update_type;
        return ret;
 }
-
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
 {
        struct drm_connector *connector;
@@ -8494,6 +8490,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
 
        return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
 }
+#endif
 
 /**
  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
@@ -8547,6 +8544,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
        if (ret)
                goto fail;
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (adev->asic_type >= CHIP_NAVI10) {
                for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
                        if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
@@ -8556,7 +8554,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                        }
                }
        }
-
+#endif
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
                if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
                    !new_crtc_state->color_mgmt_changed &&
index caf3bea..998f729 100644 (file)
@@ -33,6 +33,8 @@
 #include "amdgpu_dm_debugfs.h"
 #include "dm_helpers.h"
 #include "dmub/dmub_srv.h"
+#include "resource.h"
+#include "dsc.h"
 
 struct dmub_debugfs_trace_header {
        uint32_t entry_count;
@@ -817,24 +819,6 @@ unlock:
        return res;
 }
 
-/*
- * Returns the min and max vrr vfreq through the connector's debugfs file.
- * Example usage: cat /sys/kernel/debug/dri/0/DP-1/vrr_range
- */
-static int vrr_range_show(struct seq_file *m, void *data)
-{
-       struct drm_connector *connector = m->private;
-       struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
-
-       if (connector->status != connector_status_connected)
-               return -ENODEV;
-
-       seq_printf(m, "Min: %u\n", (unsigned int)aconnector->min_vfreq);
-       seq_printf(m, "Max: %u\n", (unsigned int)aconnector->max_vfreq);
-
-       return 0;
-}
-
 #ifdef CONFIG_DRM_AMD_DC_HDCP
 /*
  * Returns the HDCP capability of the Display (1.4 for now).
@@ -995,14 +979,517 @@ static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf,
        return read_size - r;
 }
 
+static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
+                                   size_t size, loff_t *pos)
+{
+       char *rd_buf = NULL;
+       char *rd_buf_ptr = NULL;
+       struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+       struct display_stream_compressor *dsc;
+       struct dcn_dsc_state dsc_state = {0};
+       const uint32_t rd_buf_size = 10;
+       struct pipe_ctx *pipe_ctx;
+       ssize_t result = 0;
+       int i, r, str_len = 30;
+
+       rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+       if (!rd_buf)
+               return -ENOMEM;
+
+       rd_buf_ptr = rd_buf;
+
+       for (i = 0; i < MAX_PIPES; i++) {
+               pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+                       if (pipe_ctx && pipe_ctx->stream &&
+                           pipe_ctx->stream->link == aconnector->dc_link)
+                               break;
+       }
+
+       if (!pipe_ctx)
+               return -ENXIO;
+
+       dsc = pipe_ctx->stream_res.dsc;
+       if (dsc)
+               dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+       snprintf(rd_buf_ptr, str_len,
+               "%d\n",
+               dsc_state.dsc_clock_en);
+       rd_buf_ptr += str_len;
+
+       while (size) {
+               if (*pos >= rd_buf_size)
+                       break;
+
+               r = put_user(*(rd_buf + result), buf);
+               if (r)
+                       return r; /* r = -EFAULT */
+
+               buf += 1;
+               size -= 1;
+               *pos += 1;
+               result += 1;
+       }
+
+       kfree(rd_buf);
+       return result;
+}
+
+static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
+                                   size_t size, loff_t *pos)
+{
+       char *rd_buf = NULL;
+       char *rd_buf_ptr = NULL;
+       struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+       struct display_stream_compressor *dsc;
+       struct dcn_dsc_state dsc_state = {0};
+       const uint32_t rd_buf_size = 100;
+       struct pipe_ctx *pipe_ctx;
+       ssize_t result = 0;
+       int i, r, str_len = 30;
+
+       rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+       if (!rd_buf)
+               return -ENOMEM;
+
+       rd_buf_ptr = rd_buf;
+
+       for (i = 0; i < MAX_PIPES; i++) {
+               pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+                       if (pipe_ctx && pipe_ctx->stream &&
+                           pipe_ctx->stream->link == aconnector->dc_link)
+                               break;
+       }
+
+       if (!pipe_ctx)
+               return -ENXIO;
+
+       dsc = pipe_ctx->stream_res.dsc;
+       if (dsc)
+               dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+       snprintf(rd_buf_ptr, str_len,
+               "%d\n",
+               dsc_state.dsc_slice_width);
+       rd_buf_ptr += str_len;
+
+       while (size) {
+               if (*pos >= rd_buf_size)
+                       break;
+
+               r = put_user(*(rd_buf + result), buf);
+               if (r)
+                       return r; /* r = -EFAULT */
+
+               buf += 1;
+               size -= 1;
+               *pos += 1;
+               result += 1;
+       }
+
+       kfree(rd_buf);
+       return result;
+}
+
+static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
+                                   size_t size, loff_t *pos)
+{
+       char *rd_buf = NULL;
+       char *rd_buf_ptr = NULL;
+       struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+       struct display_stream_compressor *dsc;
+       struct dcn_dsc_state dsc_state = {0};
+       const uint32_t rd_buf_size = 100;
+       struct pipe_ctx *pipe_ctx;
+       ssize_t result = 0;
+       int i, r, str_len = 30;
+
+       rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+       if (!rd_buf)
+               return -ENOMEM;
+
+       rd_buf_ptr = rd_buf;
+
+       for (i = 0; i < MAX_PIPES; i++) {
+               pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+                       if (pipe_ctx && pipe_ctx->stream &&
+                           pipe_ctx->stream->link == aconnector->dc_link)
+                               break;
+       }
+
+       if (!pipe_ctx)
+               return -ENXIO;
+
+       dsc = pipe_ctx->stream_res.dsc;
+       if (dsc)
+               dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+       snprintf(rd_buf_ptr, str_len,
+               "%d\n",
+               dsc_state.dsc_slice_height);
+       rd_buf_ptr += str_len;
+
+       while (size) {
+               if (*pos >= rd_buf_size)
+                       break;
+
+               r = put_user(*(rd_buf + result), buf);
+               if (r)
+                       return r; /* r = -EFAULT */
+
+               buf += 1;
+               size -= 1;
+               *pos += 1;
+               result += 1;
+       }
+
+       kfree(rd_buf);
+       return result;
+}
+
+static ssize_t dp_dsc_bytes_per_pixel_read(struct file *f, char __user *buf,
+                                   size_t size, loff_t *pos)
+{
+       char *rd_buf = NULL;
+       char *rd_buf_ptr = NULL;
+       struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+       struct display_stream_compressor *dsc;
+       struct dcn_dsc_state dsc_state = {0};
+       const uint32_t rd_buf_size = 100;
+       struct pipe_ctx *pipe_ctx;
+       ssize_t result = 0;
+       int i, r, str_len = 30;
+
+       rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+       if (!rd_buf)
+               return -ENOMEM;
+
+       rd_buf_ptr = rd_buf;
+
+       for (i = 0; i < MAX_PIPES; i++) {
+               pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+                       if (pipe_ctx && pipe_ctx->stream &&
+                           pipe_ctx->stream->link == aconnector->dc_link)
+                               break;
+       }
+
+       if (!pipe_ctx)
+               return -ENXIO;
+
+       dsc = pipe_ctx->stream_res.dsc;
+       if (dsc)
+               dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+       snprintf(rd_buf_ptr, str_len,
+               "%d\n",
+               dsc_state.dsc_bytes_per_pixel);
+       rd_buf_ptr += str_len;
+
+       while (size) {
+               if (*pos >= rd_buf_size)
+                       break;
+
+               r = put_user(*(rd_buf + result), buf);
+               if (r)
+                       return r; /* r = -EFAULT */
+
+               buf += 1;
+               size -= 1;
+               *pos += 1;
+               result += 1;
+       }
+
+       kfree(rd_buf);
+       return result;
+}
+
+static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
+                                   size_t size, loff_t *pos)
+{
+       char *rd_buf = NULL;
+       char *rd_buf_ptr = NULL;
+       struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+       struct display_stream_compressor *dsc;
+       struct dcn_dsc_state dsc_state = {0};
+       const uint32_t rd_buf_size = 100;
+       struct pipe_ctx *pipe_ctx;
+       ssize_t result = 0;
+       int i, r, str_len = 30;
+
+       rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+       if (!rd_buf)
+               return -ENOMEM;
+
+       rd_buf_ptr = rd_buf;
+
+       for (i = 0; i < MAX_PIPES; i++) {
+               pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+                       if (pipe_ctx && pipe_ctx->stream &&
+                           pipe_ctx->stream->link == aconnector->dc_link)
+                               break;
+       }
+
+       if (!pipe_ctx)
+               return -ENXIO;
+
+       dsc = pipe_ctx->stream_res.dsc;
+       if (dsc)
+               dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+       snprintf(rd_buf_ptr, str_len,
+               "%d\n",
+               dsc_state.dsc_pic_width);
+       rd_buf_ptr += str_len;
+
+       while (size) {
+               if (*pos >= rd_buf_size)
+                       break;
+
+               r = put_user(*(rd_buf + result), buf);
+               if (r)
+                       return r; /* r = -EFAULT */
+
+               buf += 1;
+               size -= 1;
+               *pos += 1;
+               result += 1;
+       }
+
+       kfree(rd_buf);
+       return result;
+}
+
+static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
+                                   size_t size, loff_t *pos)
+{
+       char *rd_buf = NULL;
+       char *rd_buf_ptr = NULL;
+       struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+       struct display_stream_compressor *dsc;
+       struct dcn_dsc_state dsc_state = {0};
+       const uint32_t rd_buf_size = 100;
+       struct pipe_ctx *pipe_ctx;
+       ssize_t result = 0;
+       int i, r, str_len = 30;
+
+       rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+       if (!rd_buf)
+               return -ENOMEM;
+
+       rd_buf_ptr = rd_buf;
+
+       for (i = 0; i < MAX_PIPES; i++) {
+               pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+                       if (pipe_ctx && pipe_ctx->stream &&
+                           pipe_ctx->stream->link == aconnector->dc_link)
+                               break;
+       }
+
+       if (!pipe_ctx)
+               return -ENXIO;
+
+       dsc = pipe_ctx->stream_res.dsc;
+       if (dsc)
+               dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+       snprintf(rd_buf_ptr, str_len,
+               "%d\n",
+               dsc_state.dsc_pic_height);
+       rd_buf_ptr += str_len;
+
+       while (size) {
+               if (*pos >= rd_buf_size)
+                       break;
+
+               r = put_user(*(rd_buf + result), buf);
+               if (r)
+                       return r; /* r = -EFAULT */
+
+               buf += 1;
+               size -= 1;
+               *pos += 1;
+               result += 1;
+       }
+
+       kfree(rd_buf);
+       return result;
+}
+
+static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
+                                   size_t size, loff_t *pos)
+{
+       char *rd_buf = NULL;
+       char *rd_buf_ptr = NULL;
+       struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+       struct display_stream_compressor *dsc;
+       struct dcn_dsc_state dsc_state = {0};
+       const uint32_t rd_buf_size = 100;
+       struct pipe_ctx *pipe_ctx;
+       ssize_t result = 0;
+       int i, r, str_len = 30;
+
+       rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+       if (!rd_buf)
+               return -ENOMEM;
+
+       rd_buf_ptr = rd_buf;
+
+       for (i = 0; i < MAX_PIPES; i++) {
+               pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+                       if (pipe_ctx && pipe_ctx->stream &&
+                           pipe_ctx->stream->link == aconnector->dc_link)
+                               break;
+       }
+
+       if (!pipe_ctx)
+               return -ENXIO;
+
+       dsc = pipe_ctx->stream_res.dsc;
+       if (dsc)
+               dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+       snprintf(rd_buf_ptr, str_len,
+               "%d\n",
+               dsc_state.dsc_chunk_size);
+       rd_buf_ptr += str_len;
+
+       while (size) {
+               if (*pos >= rd_buf_size)
+                       break;
+
+               r = put_user(*(rd_buf + result), buf);
+               if (r)
+                       return r; /* r = -EFAULT */
+
+               buf += 1;
+               size -= 1;
+               *pos += 1;
+               result += 1;
+       }
+
+       kfree(rd_buf);
+       return result;
+}
+
+static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
+                                   size_t size, loff_t *pos)
+{
+       char *rd_buf = NULL;
+       char *rd_buf_ptr = NULL;
+       struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+       struct display_stream_compressor *dsc;
+       struct dcn_dsc_state dsc_state = {0};
+       const uint32_t rd_buf_size = 100;
+       struct pipe_ctx *pipe_ctx;
+       ssize_t result = 0;
+       int i, r, str_len = 30;
+
+       rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+       if (!rd_buf)
+               return -ENOMEM;
+
+       rd_buf_ptr = rd_buf;
+
+       for (i = 0; i < MAX_PIPES; i++) {
+               pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+                       if (pipe_ctx && pipe_ctx->stream &&
+                           pipe_ctx->stream->link == aconnector->dc_link)
+                               break;
+       }
+
+       if (!pipe_ctx)
+               return -ENXIO;
+
+       dsc = pipe_ctx->stream_res.dsc;
+       if (dsc)
+               dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+       snprintf(rd_buf_ptr, str_len,
+               "%d\n",
+               dsc_state.dsc_slice_bpg_offset);
+       rd_buf_ptr += str_len;
+
+       while (size) {
+               if (*pos >= rd_buf_size)
+                       break;
+
+               r = put_user(*(rd_buf + result), buf);
+               if (r)
+                       return r; /* r = -EFAULT */
+
+               buf += 1;
+               size -= 1;
+               *pos += 1;
+               result += 1;
+       }
+
+       kfree(rd_buf);
+       return result;
+}
+
 DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
 DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
 DEFINE_SHOW_ATTRIBUTE(output_bpc);
-DEFINE_SHOW_ATTRIBUTE(vrr_range);
 #ifdef CONFIG_DRM_AMD_DC_HDCP
 DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
 #endif
 
+static const struct file_operations dp_dsc_clock_en_debugfs_fops = {
+       .owner = THIS_MODULE,
+       .read = dp_dsc_clock_en_read,
+       .llseek = default_llseek
+};
+
+static const struct file_operations dp_dsc_slice_width_debugfs_fops = {
+       .owner = THIS_MODULE,
+       .read = dp_dsc_slice_width_read,
+       .llseek = default_llseek
+};
+
+static const struct file_operations dp_dsc_slice_height_debugfs_fops = {
+       .owner = THIS_MODULE,
+       .read = dp_dsc_slice_height_read,
+       .llseek = default_llseek
+};
+
+static const struct file_operations dp_dsc_bytes_per_pixel_debugfs_fops = {
+       .owner = THIS_MODULE,
+       .read = dp_dsc_bytes_per_pixel_read,
+       .llseek = default_llseek
+};
+
+static const struct file_operations dp_dsc_pic_width_debugfs_fops = {
+       .owner = THIS_MODULE,
+       .read = dp_dsc_pic_width_read,
+       .llseek = default_llseek
+};
+
+static const struct file_operations dp_dsc_pic_height_debugfs_fops = {
+       .owner = THIS_MODULE,
+       .read = dp_dsc_pic_height_read,
+       .llseek = default_llseek
+};
+
+static const struct file_operations dp_dsc_chunk_size_debugfs_fops = {
+       .owner = THIS_MODULE,
+       .read = dp_dsc_chunk_size_read,
+       .llseek = default_llseek
+};
+
+static const struct file_operations dp_dsc_slice_bpg_offset_debugfs_fops = {
+       .owner = THIS_MODULE,
+       .read = dp_dsc_slice_bpg_offset_read,
+       .llseek = default_llseek
+};
+
 static const struct file_operations dp_link_settings_debugfs_fops = {
        .owner = THIS_MODULE,
        .read = dp_link_settings_read,
@@ -1055,14 +1542,21 @@ static const struct {
                {"link_settings", &dp_link_settings_debugfs_fops},
                {"phy_settings", &dp_phy_settings_debugfs_fop},
                {"test_pattern", &dp_phy_test_pattern_fops},
-               {"vrr_range", &vrr_range_fops},
 #ifdef CONFIG_DRM_AMD_DC_HDCP
                {"hdcp_sink_capability", &hdcp_sink_capability_fops},
 #endif
                {"sdp_message", &sdp_message_fops},
                {"aux_dpcd_address", &dp_dpcd_address_debugfs_fops},
                {"aux_dpcd_size", &dp_dpcd_size_debugfs_fops},
-               {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops}
+               {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops},
+               {"dsc_clock_en", &dp_dsc_clock_en_debugfs_fops},
+               {"dsc_slice_width", &dp_dsc_slice_width_debugfs_fops},
+               {"dsc_slice_height", &dp_dsc_slice_height_debugfs_fops},
+               {"dsc_bytes_per_pixel", &dp_dsc_bytes_per_pixel_debugfs_fops},
+               {"dsc_pic_width", &dp_dsc_pic_width_debugfs_fops},
+               {"dsc_pic_height", &dp_dsc_pic_height_debugfs_fops},
+               {"dsc_chunk_size", &dp_dsc_chunk_size_debugfs_fops},
+               {"dsc_slice_bpg", &dp_dsc_slice_bpg_offset_debugfs_fops}
 };
 
 #ifdef CONFIG_DRM_AMD_DC_HDCP
index 0affd19..e85b58f 100644 (file)
@@ -806,7 +806,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
                stream = dc_state->streams[i];
 
                if (stream->timing.flags.DSC == 1)
-                       dcn20_add_dsc_to_stream_resource(stream->ctx->dc, dc_state, stream);
+                       dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream);
        }
 
        return true;
index 1f94591..5cb7b83 100644 (file)
@@ -1104,10 +1104,6 @@ static inline enum link_training_result perform_link_training_int(
        dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
        dpcd_set_training_pattern(link, dpcd_pattern);
 
-       /* delay 5ms after notifying sink of idle pattern before switching output */
-       if (link->connector_signal != SIGNAL_TYPE_EDP)
-               msleep(5);
-
        /* 4. mainlink output idle pattern*/
        dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
 
@@ -3523,8 +3519,8 @@ static bool retrieve_link_cap(struct dc_link *link)
                status = core_link_read_dpcd(
                                link,
                                DP_DSC_BRANCH_OVERALL_THROUGHPUT_0,
-                               link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
-                               sizeof(link->dpcd_caps.dsc_caps.dsc_ext_caps.raw));
+                               link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
+                               sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw));
        }
 
        if (!dpcd_read_sink_ext_caps(link))
index d6989d1..10d69ad 100644 (file)
@@ -244,6 +244,25 @@ struct dc_stream_status *dc_stream_get_status(
        return dc_stream_get_status_from_state(dc->current_state, stream);
 }
 
+#ifndef TRIM_FSFT
+/**
+ * dc_optimize_timing() - dc to optimize timing
+ */
+bool dc_optimize_timing(
+       struct dc_crtc_timing *timing,
+       unsigned int max_input_rate_in_khz)
+{
+       //optimization is expected to assing a value to these:
+       //timing->pix_clk_100hz
+       //timing->v_front_porch
+       //timing->v_total
+       //timing->fast_transport_output_rate_100hz;
+       timing->fast_transport_output_rate_100hz = timing->pix_clk_100hz;
+
+       return true;
+}
+#endif
+
 
 /**
  * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
@@ -655,6 +674,17 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc,
        return true;
 }
 
+enum dc_status dc_stream_add_dsc_to_resource(struct dc *dc,
+               struct dc_state *state,
+               struct dc_stream_state *stream)
+{
+       if (dc->res_pool->funcs->add_dsc_to_stream_resource) {
+               return dc->res_pool->funcs->add_dsc_to_stream_resource(dc, state, stream);
+       } else {
+               return DC_NO_DSC_RESOURCE;
+       }
+}
+
 void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
 {
        DC_LOG_DC(
index e5a1a9e..f50ef42 100644 (file)
@@ -42,7 +42,7 @@
 #include "inc/hw/dmcu.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.2.94"
+#define DC_VER "3.2.95"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
@@ -96,6 +96,9 @@ struct dc_plane_cap {
                uint32_t nv12;
                uint32_t fp16;
        } max_downscale_factor;
+       // minimal width/height
+       uint32_t min_width;
+       uint32_t min_height;
 };
 
 // Color management caps (DPP and MPC)
index af177c0..a8a3b06 100644 (file)
@@ -726,7 +726,7 @@ union dpcd_dsc_basic_capabilities {
        uint8_t raw[16];
 };
 
-union dpcd_dsc_ext_capabilities {
+union dpcd_dsc_branch_decoder_capabilities {
        struct {
                uint8_t BRANCH_OVERALL_THROUGHPUT_0;
                uint8_t BRANCH_OVERALL_THROUGHPUT_1;
@@ -737,7 +737,7 @@ union dpcd_dsc_ext_capabilities {
 
 struct dpcd_dsc_capabilities {
        union dpcd_dsc_basic_capabilities dsc_basic_caps;
-       union dpcd_dsc_ext_capabilities dsc_ext_caps;
+       union dpcd_dsc_branch_decoder_capabilities dsc_branch_decoder_caps;
 };
 
 /* These parameters are from PSR capabilities reported by Sink DPCD */
index b7a8c71..1a87bc3 100644 (file)
@@ -713,6 +713,9 @@ struct dc_crtc_timing_flags {
        uint32_t LTE_340MCSC_SCRAMBLE:1;
 
        uint32_t DSC : 1; /* Use DSC with this timing */
+#ifndef TRIM_FSFT
+       uint32_t FAST_TRANSPORT: 1;
+#endif
 };
 
 enum dc_timing_3d_format {
@@ -772,6 +775,10 @@ struct dc_crtc_timing {
        enum dc_aspect_ratio aspect_ratio;
        enum scanning_type scan_type;
 
+#ifndef TRIM_FSFT
+       uint32_t fast_transport_output_rate_100hz;
+#endif
+
        struct dc_crtc_timing_flags flags;
        struct dc_dsc_config dsc_cfg;
 };
index f2ed9bc..e4e85a1 100644 (file)
@@ -363,6 +363,10 @@ bool dc_stream_remove_writeback(struct dc *dc,
                struct dc_stream_state *stream,
                uint32_t dwb_pipe_inst);
 
+enum dc_status dc_stream_add_dsc_to_resource(struct dc *dc,
+               struct dc_state *state,
+               struct dc_stream_state *stream);
+
 bool dc_stream_warmup_writeback(struct dc *dc,
                int num_dwb,
                struct dc_writeback_info *wb_info);
@@ -419,6 +423,12 @@ struct dc_stream_status *dc_stream_get_status_from_state(
 struct dc_stream_status *dc_stream_get_status(
        struct dc_stream_state *dc_stream);
 
+#ifndef TRIM_FSFT
+bool dc_optimize_timing(
+       struct dc_crtc_timing *timing,
+       unsigned int max_input_rate_in_khz);
+#endif
+
 /*******************************************************************************
  * Cursor interfaces - To manages the cursor within a stream
  ******************************************************************************/
index 9597fc7..1d53850 100644 (file)
@@ -423,7 +423,9 @@ static const struct dc_plane_cap plane_cap = {
                                .argb8888 = 250,
                                .nv12 = 1,
                                .fp16 = 1
-               }
+               },
+               64,
+               64
 };
 
 static const struct dc_plane_cap underlay_plane_cap = {
@@ -447,7 +449,9 @@ static const struct dc_plane_cap underlay_plane_cap = {
                                .argb8888 = 1,
                                .nv12 = 250,
                                .fp16 = 1
-               }
+               },
+               64,
+               64
 };
 
 #define CTX  ctx
index 51b3fe5..5d83e81 100644 (file)
@@ -424,7 +424,9 @@ static const struct dc_plane_cap plane_cap = {
                        .argb8888 = 250,
                        .nv12 = 1,
                        .fp16 = 1
-       }
+       },
+       64,
+       64
 };
 
 #define CTX  ctx
index 3c6ecfe..ba50214 100644 (file)
@@ -157,6 +157,11 @@ static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_ds
        REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &s->dsc_clock_en);
        REG_GET(DSCC_PPS_CONFIG3, SLICE_WIDTH, &s->dsc_slice_width);
        REG_GET(DSCC_PPS_CONFIG1, BITS_PER_PIXEL, &s->dsc_bytes_per_pixel);
+       REG_GET(DSCC_PPS_CONFIG3, SLICE_HEIGHT, &s->dsc_slice_height);
+       REG_GET(DSCC_PPS_CONFIG1, CHUNK_SIZE, &s->dsc_chunk_size);
+       REG_GET(DSCC_PPS_CONFIG2, PIC_WIDTH, &s->dsc_pic_width);
+       REG_GET(DSCC_PPS_CONFIG2, PIC_HEIGHT, &s->dsc_pic_height);
+       REG_GET(DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, &s->dsc_slice_bpg_offset);
 }
 
 
index e226647..968a89b 100644 (file)
@@ -1043,7 +1043,9 @@ static const struct dc_plane_cap plane_cap = {
                        .argb8888 = 250,
                        .nv12 = 250,
                        .fp16 = 1
-       }
+       },
+       16,
+       16
 };
 static const struct resource_caps res_cap_nv14 = {
                .num_timing_generator = 5,
@@ -3364,6 +3366,7 @@ static struct resource_funcs dcn20_res_pool_funcs = {
        .validate_bandwidth = dcn20_validate_bandwidth,
        .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
        .add_stream_to_ctx = dcn20_add_stream_to_ctx,
+       .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
        .remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
        .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
        .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
index 99a7ef6..e3984f0 100644 (file)
@@ -152,11 +152,11 @@ static void enc2_stream_encoder_update_hdmi_info_packets(
 
        /*Always add mandatory packets first followed by optional ones*/
        enc2_update_hdmi_info_packet(enc1, 0, &info_frame->avi);
-       enc2_update_hdmi_info_packet(enc1, 5, &info_frame->hfvsif);
+       enc2_update_hdmi_info_packet(enc1, 1, &info_frame->hfvsif);
        enc2_update_hdmi_info_packet(enc1, 2, &info_frame->gamut);
-       enc2_update_hdmi_info_packet(enc1, 1, &info_frame->vendor);
-       enc2_update_hdmi_info_packet(enc1, 3, &info_frame->spd);
-       enc2_update_hdmi_info_packet(enc1, 4, &info_frame->hdrsmd);
+       enc2_update_hdmi_info_packet(enc1, 3, &info_frame->vendor);
+       enc2_update_hdmi_info_packet(enc1, 4, &info_frame->spd);
+       enc2_update_hdmi_info_packet(enc1, 5, &info_frame->hdrsmd);
 }
 
 static void enc2_stream_encoder_stop_hdmi_info_packets(
index 61b3372..88d41a3 100644 (file)
@@ -857,7 +857,9 @@ static const struct dc_plane_cap plane_cap = {
                        .argb8888 = 250,
                        .nv12 = 250,
                        .fp16 = 250
-       }
+       },
+       64,
+       64
 };
 
 static const struct dc_debug_options debug_defaults_drv = {
@@ -1759,6 +1761,7 @@ static struct resource_funcs dcn21_res_pool_funcs = {
        .validate_bandwidth = dcn21_validate_bandwidth,
        .populate_dml_pipes = dcn21_populate_dml_pipes_from_context,
        .add_stream_to_ctx = dcn20_add_stream_to_ctx,
+       .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
        .remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
        .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
        .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
index 0c7f247..8cdaa6e 100644 (file)
@@ -747,7 +747,7 @@ done:
        return is_dsc_possible;
 }
 
-bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps)
+bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_branch_decoder_caps, struct dsc_dec_dpcd_caps *dsc_sink_caps)
 {
        if (!dpcd_dsc_basic_data)
                return false;
@@ -818,14 +818,14 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_da
        }
 
        /* Extended caps */
-       if (dpcd_dsc_ext_data == NULL) { // Extended DPCD DSC data can be null, e.g. because it doesn't apply to SST
+       if (dpcd_dsc_branch_decoder_caps == NULL) { // branch decoder DPCD DSC data can be null for non branch device
                dsc_sink_caps->branch_overall_throughput_0_mps = 0;
                dsc_sink_caps->branch_overall_throughput_1_mps = 0;
                dsc_sink_caps->branch_max_line_width = 0;
                return true;
        }
 
-       dsc_sink_caps->branch_overall_throughput_0_mps = dpcd_dsc_ext_data[DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0];
+       dsc_sink_caps->branch_overall_throughput_0_mps = dpcd_dsc_branch_decoder_caps[DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0];
        if (dsc_sink_caps->branch_overall_throughput_0_mps == 0)
                dsc_sink_caps->branch_overall_throughput_0_mps = 0;
        else if (dsc_sink_caps->branch_overall_throughput_0_mps == 1)
@@ -835,7 +835,7 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_da
                dsc_sink_caps->branch_overall_throughput_0_mps += 600;
        }
 
-       dsc_sink_caps->branch_overall_throughput_1_mps = dpcd_dsc_ext_data[DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0];
+       dsc_sink_caps->branch_overall_throughput_1_mps = dpcd_dsc_branch_decoder_caps[DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0];
        if (dsc_sink_caps->branch_overall_throughput_1_mps == 0)
                dsc_sink_caps->branch_overall_throughput_1_mps = 0;
        else if (dsc_sink_caps->branch_overall_throughput_1_mps == 1)
@@ -845,7 +845,7 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_da
                dsc_sink_caps->branch_overall_throughput_1_mps += 600;
        }
 
-       dsc_sink_caps->branch_max_line_width = dpcd_dsc_ext_data[DP_DSC_BRANCH_MAX_LINE_WIDTH - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0] * 320;
+       dsc_sink_caps->branch_max_line_width = dpcd_dsc_branch_decoder_caps[DP_DSC_BRANCH_MAX_LINE_WIDTH - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0] * 320;
        ASSERT(dsc_sink_caps->branch_max_line_width == 0 || dsc_sink_caps->branch_max_line_width >= 5120);
 
        return true;
index 5f985fc..329395e 100644 (file)
@@ -165,7 +165,9 @@ struct resource_funcs {
                        struct dc_3dlut **lut,
                        struct dc_transfer_func **shaper);
 #endif
-
+       enum dc_status (*add_dsc_to_stream_resource)(
+                       struct dc *dc, struct dc_state *state,
+                       struct dc_stream_state *stream);
 };
 
 struct audio_support{
index 7c2a332..5915994 100644 (file)
@@ -56,6 +56,11 @@ struct dcn_dsc_state {
        uint32_t dsc_clock_en;
        uint32_t dsc_slice_width;
        uint32_t dsc_bytes_per_pixel;
+       uint32_t dsc_slice_height;
+       uint32_t dsc_pic_width;
+       uint32_t dsc_pic_height;
+       uint32_t dsc_slice_bpg_offset;
+       uint32_t dsc_chunk_size;
 };
 
 
index 513a5f8..e013875 100644 (file)
 
 /* Firmware versioning. */
 #ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0xf675c6448
+#define DMUB_FW_VERSION_GIT_HASH 0xe6d590b09
 #define DMUB_FW_VERSION_MAJOR 0
 #define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 24
+#define DMUB_FW_VERSION_REVISION 25
 #define DMUB_FW_VERSION_UCODE ((DMUB_FW_VERSION_MAJOR << 24) | (DMUB_FW_VERSION_MINOR << 16) | DMUB_FW_VERSION_REVISION)
 #endif
 
index d3a5ba9..7a2500f 100644 (file)
@@ -760,9 +760,35 @@ static void build_vrr_infopacket_v2(enum signal_type signal,
 
        infopacket->valid = true;
 }
+#ifndef TRIM_FSFT
+static void build_vrr_infopacket_fast_transport_data(
+       bool ftActive,
+       unsigned int ftOutputRate,
+       struct dc_info_packet *infopacket)
+{
+       /* PB9 : bit7 - fast transport Active*/
+       unsigned char activeBit = (ftActive) ? 1 << 7 : 0;
+
+       infopacket->sb[1] &= ~activeBit;  //clear bit
+       infopacket->sb[1] |=  activeBit;  //set bit
+
+       /* PB13 : Target Output Pixel Rate [kHz] - bits 7:0  */
+       infopacket->sb[13] = ftOutputRate & 0xFF;
+
+       /* PB14 : Target Output Pixel Rate [kHz] - bits 15:8  */
+       infopacket->sb[14] = (ftOutputRate >> 8) & 0xFF;
+
+       /* PB15 : Target Output Pixel Rate [kHz] - bits 23:16  */
+       infopacket->sb[15] = (ftOutputRate >> 16) & 0xFF;
+
+}
+#endif
 
 static void build_vrr_infopacket_v3(enum signal_type signal,
                const struct mod_vrr_params *vrr,
+#ifndef TRIM_FSFT
+               bool ftActive, unsigned int ftOutputRate,
+#endif
                enum color_transfer_func app_tf,
                struct dc_info_packet *infopacket)
 {
@@ -773,6 +799,13 @@ static void build_vrr_infopacket_v3(enum signal_type signal,
 
        build_vrr_infopacket_fs2_data(app_tf, infopacket);
 
+#ifndef TRIM_FSFT
+       build_vrr_infopacket_fast_transport_data(
+                       ftActive,
+                       ftOutputRate,
+                       infopacket);
+#endif
+
        build_vrr_infopacket_checksum(&payload_size, infopacket);
 
        infopacket->valid = true;
@@ -795,7 +828,15 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
 
        switch (packet_type) {
        case PACKET_TYPE_FS_V3:
+#ifndef TRIM_FSFT
+               build_vrr_infopacket_v3(
+                               stream->signal, vrr,
+                               stream->timing.flags.FAST_TRANSPORT,
+                               stream->timing.fast_transport_output_rate_100hz,
+                               app_tf, infopacket);
+#else
                build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket);
+#endif
                break;
        case PACKET_TYPE_FS_V2:
                build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket);
index d27a02a..e9c48f9 100644 (file)
@@ -35,7 +35,9 @@ AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(
 
 include $(AMD_POWERPLAY)
 
-POWER_MGR = amd_powerplay.o amdgpu_smu.o smu_v11_0.o smu_v12_0.o arcturus_ppt.o navi10_ppt.o renoir_ppt.o sienna_cichlid_ppt.o
+POWER_MGR = amd_powerplay.o amdgpu_smu.o smu_v11_0.o \
+           smu_v12_0.o arcturus_ppt.o navi10_ppt.o \
+           renoir_ppt.o sienna_cichlid_ppt.o smu_cmn.o
 
 AMD_PP_POWER = $(addprefix $(AMD_PP_PATH)/,$(POWER_MGR))
 
index 03125c8..838a369 100644 (file)
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#define SWSMU_CODE_LAYER_L1
+
 #include <linux/firmware.h>
 #include <linux/pci.h>
 
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
 #include "smu_internal.h"
-#include "smu_v11_0.h"
-#include "smu_v12_0.h"
 #include "atom.h"
 #include "arcturus_ppt.h"
 #include "navi10_ppt.h"
 #undef pr_info
 #undef pr_debug
 
-#undef __SMU_DUMMY_MAP
-#define __SMU_DUMMY_MAP(type)  #type
-static const char* __smu_message_names[] = {
-       SMU_MESSAGE_TYPES
-};
-
-const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
-{
-       if (type < 0 || type >= SMU_MSG_MAX_COUNT)
-               return "unknown smu message";
-       return __smu_message_names[type];
-}
-
-#undef __SMU_DUMMY_MAP
-#define __SMU_DUMMY_MAP(fea)   #fea
-static const char* __smu_feature_names[] = {
-       SMU_FEATURE_MASKS
-};
-
-const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
-{
-       if (feature < 0 || feature >= SMU_FEATURE_COUNT)
-               return "unknown smu feature";
-       return __smu_feature_names[feature];
-}
-
 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
 {
        size_t size = 0;
-       int ret = 0, i = 0;
-       uint32_t feature_mask[2] = { 0 };
-       int32_t feature_index = 0;
-       uint32_t count = 0;
-       uint32_t sort_feature[SMU_FEATURE_COUNT];
-       uint64_t hw_feature_count = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
        mutex_lock(&smu->mutex);
 
-       ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
-       if (ret)
-               goto failed;
-
-       size =  sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
-                       feature_mask[1], feature_mask[0]);
-
-       for (i = 0; i < SMU_FEATURE_COUNT; i++) {
-               feature_index = smu_feature_get_index(smu, i);
-               if (feature_index < 0)
-                       continue;
-               sort_feature[feature_index] = i;
-               hw_feature_count++;
-       }
+       size = smu_get_pp_feature_mask(smu, buf);
 
-       for (i = 0; i < hw_feature_count; i++) {
-               size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
-                              count++,
-                              smu_get_feature_name(smu, sort_feature[i]),
-                              i,
-                              !!smu_feature_is_enabled(smu, sort_feature[i]) ?
-                              "enabled" : "disabled");
-       }
-
-failed:
        mutex_unlock(&smu->mutex);
 
        return size;
 }
 
-static int smu_feature_update_enable_state(struct smu_context *smu,
-                                          uint64_t feature_mask,
-                                          bool enabled)
-{
-       struct smu_feature *feature = &smu->smu_feature;
-       int ret = 0;
-
-       if (enabled) {
-               ret = smu_send_smc_msg_with_param(smu,
-                                                 SMU_MSG_EnableSmuFeaturesLow,
-                                                 lower_32_bits(feature_mask),
-                                                 NULL);
-               if (ret)
-                       return ret;
-               ret = smu_send_smc_msg_with_param(smu,
-                                                 SMU_MSG_EnableSmuFeaturesHigh,
-                                                 upper_32_bits(feature_mask),
-                                                 NULL);
-               if (ret)
-                       return ret;
-       } else {
-               ret = smu_send_smc_msg_with_param(smu,
-                                                 SMU_MSG_DisableSmuFeaturesLow,
-                                                 lower_32_bits(feature_mask),
-                                                 NULL);
-               if (ret)
-                       return ret;
-               ret = smu_send_smc_msg_with_param(smu,
-                                                 SMU_MSG_DisableSmuFeaturesHigh,
-                                                 upper_32_bits(feature_mask),
-                                                 NULL);
-               if (ret)
-                       return ret;
-       }
-
-       mutex_lock(&feature->mutex);
-       if (enabled)
-               bitmap_or(feature->enabled, feature->enabled,
-                               (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
-       else
-               bitmap_andnot(feature->enabled, feature->enabled,
-                               (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
-       mutex_unlock(&feature->mutex);
-
-       return ret;
-}
-
 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
 {
        int ret = 0;
-       uint32_t feature_mask[2] = { 0 };
-       uint64_t feature_2_enabled = 0;
-       uint64_t feature_2_disabled = 0;
-       uint64_t feature_enables = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
        mutex_lock(&smu->mutex);
 
-       ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
-       if (ret)
-               goto out;
-
-       feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
-
-       feature_2_enabled  = ~feature_enables & new_mask;
-       feature_2_disabled = feature_enables & ~new_mask;
+       ret = smu_set_pp_feature_mask(smu, new_mask);
 
-       if (feature_2_enabled) {
-               ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
-               if (ret)
-                       goto out;
-       }
-       if (feature_2_disabled) {
-               ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
-               if (ret)
-                       goto out;
-       }
-
-out:
        mutex_unlock(&smu->mutex);
 
        return ret;
 }
 
-int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
+int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
 {
        int ret = 0;
+       struct smu_context *smu = &adev->smu;
 
-       if (!if_version && !smu_version)
-               return -EINVAL;
-
-       if (smu->smc_fw_if_version && smu->smc_fw_version)
-       {
-               if (if_version)
-                       *if_version = smu->smc_fw_if_version;
-
-               if (smu_version)
-                       *smu_version = smu->smc_fw_version;
-
-               return 0;
-       }
-
-       if (if_version) {
-               ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
-               if (ret)
-                       return ret;
-
-               smu->smc_fw_if_version = *if_version;
-       }
-
-       if (smu_version) {
-               ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
-               if (ret)
-                       return ret;
-
-               smu->smc_fw_version = *smu_version;
-       }
+       if (is_support_sw_smu(adev) && smu->ppt_funcs->get_gfx_off_status)
+               *value = smu_get_gfx_off_status(smu);
+       else
+               ret = -EINVAL;
 
        return ret;
 }
@@ -246,9 +97,6 @@ int smu_set_soft_freq_range(struct smu_context *smu,
 {
        int ret = 0;
 
-       if (!smu_clk_dpm_is_enabled(smu, clk_type))
-               return 0;
-
        mutex_lock(&smu->mutex);
 
        if (smu->ppt_funcs->set_soft_freq_limited_range)
@@ -285,33 +133,6 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
        return ret;
 }
 
-bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
-{
-       enum smu_feature_mask feature_id = 0;
-
-       switch (clk_type) {
-       case SMU_MCLK:
-       case SMU_UCLK:
-               feature_id = SMU_FEATURE_DPM_UCLK_BIT;
-               break;
-       case SMU_GFXCLK:
-       case SMU_SCLK:
-               feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
-               break;
-       case SMU_SOCCLK:
-               feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
-               break;
-       default:
-               return true;
-       }
-
-       if(!smu_feature_is_enabled(smu, feature_id)) {
-               return false;
-       }
-
-       return true;
-}
-
 /**
  * smu_dpm_set_power_gate - power gate/ungate the specific IP block
  *
@@ -386,45 +207,6 @@ int smu_get_power_num_states(struct smu_context *smu,
        return 0;
 }
 
-int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
-                    void *table_data, bool drv2smu)
-{
-       struct smu_table_context *smu_table = &smu->smu_table;
-       struct amdgpu_device *adev = smu->adev;
-       struct smu_table *table = &smu_table->driver_table;
-       int table_id = smu_table_get_index(smu, table_index);
-       uint32_t table_size;
-       int ret = 0;
-       if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
-               return -EINVAL;
-
-       table_size = smu_table->tables[table_index].size;
-
-       if (drv2smu) {
-               memcpy(table->cpu_addr, table_data, table_size);
-               /*
-                * Flush hdp cache: to guard the content seen by
-                * GPU is consitent with CPU.
-                */
-               amdgpu_asic_flush_hdp(adev, NULL);
-       }
-
-       ret = smu_send_smc_msg_with_param(smu, drv2smu ?
-                                         SMU_MSG_TransferTableDram2Smu :
-                                         SMU_MSG_TransferTableSmu2Dram,
-                                         table_id | ((argument & 0xFFFF) << 16),
-                                         NULL);
-       if (ret)
-               return ret;
-
-       if (!drv2smu) {
-               amdgpu_asic_flush_hdp(adev, NULL);
-               memcpy(table_data, table->cpu_addr, table_size);
-       }
-
-       return ret;
-}
-
 bool is_support_sw_smu(struct amdgpu_device *adev)
 {
        if (adev->asic_type >= CHIP_ARCTURUS)
@@ -525,63 +307,6 @@ static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
        return ret;
 }
 
-int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
-{
-       struct smu_feature *feature = &smu->smu_feature;
-       int feature_id;
-       int ret = 0;
-
-       if (smu->is_apu)
-               return 1;
-       feature_id = smu_feature_get_index(smu, mask);
-       if (feature_id < 0)
-               return 0;
-
-       WARN_ON(feature_id > feature->feature_num);
-
-       mutex_lock(&feature->mutex);
-       ret = test_bit(feature_id, feature->enabled);
-       mutex_unlock(&feature->mutex);
-
-       return ret;
-}
-
-int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
-                           bool enable)
-{
-       struct smu_feature *feature = &smu->smu_feature;
-       int feature_id;
-
-       feature_id = smu_feature_get_index(smu, mask);
-       if (feature_id < 0)
-               return -EINVAL;
-
-       WARN_ON(feature_id > feature->feature_num);
-
-       return smu_feature_update_enable_state(smu,
-                                              1ULL << feature_id,
-                                              enable);
-}
-
-int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
-{
-       struct smu_feature *feature = &smu->smu_feature;
-       int feature_id;
-       int ret = 0;
-
-       feature_id = smu_feature_get_index(smu, mask);
-       if (feature_id < 0)
-               return 0;
-
-       WARN_ON(feature_id > feature->feature_num);
-
-       mutex_lock(&feature->mutex);
-       ret = test_bit(feature_id, feature->supported);
-       mutex_unlock(&feature->mutex);
-
-       return ret;
-}
-
 static int smu_set_funcs(struct amdgpu_device *adev)
 {
        struct smu_context *smu = &adev->smu;
@@ -676,22 +401,6 @@ static int smu_late_init(void *handle)
        return 0;
 }
 
-int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
-                           uint16_t *size, uint8_t *frev, uint8_t *crev,
-                           uint8_t **addr)
-{
-       struct amdgpu_device *adev = smu->adev;
-       uint16_t data_start;
-
-       if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
-                                          size, frev, crev, &data_start))
-               return -EINVAL;
-
-       *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
-
-       return 0;
-}
-
 static int smu_init_fb_allocations(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
@@ -1135,7 +844,7 @@ static int smu_smc_hw_setup(struct smu_context *smu)
                return ret;
        }
 
-       ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
+       ret = smu_i2c_init(smu, &adev->pm.smu_i2c);
        if (ret)
                return ret;
 
@@ -1280,7 +989,6 @@ static int smu_hw_init(void *handle)
 static int smu_disable_dpms(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
-       uint64_t features_to_disable;
        int ret = 0;
        bool use_baco = !smu->is_apu &&
                ((adev->in_gpu_reset &&
@@ -1316,11 +1024,8 @@ static int smu_disable_dpms(struct smu_context *smu)
         * BACO feature has to be kept enabled.
         */
        if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
-               features_to_disable = U64_MAX &
-                       ~(1ULL << smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT));
-               ret = smu_feature_update_enable_state(smu,
-                                                     features_to_disable,
-                                                     0);
+               ret = smu_disable_all_features_with_exception(smu,
+                                                             SMU_FEATURE_BACO_BIT);
                if (ret)
                        dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
        } else {
@@ -1341,7 +1046,7 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
        struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
-       smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
+       smu_i2c_fini(smu, &adev->pm.smu_i2c);
 
        cancel_work_sync(&smu->throttling_logging_work);
 
@@ -1884,12 +1589,6 @@ int smu_set_mp1_state(struct smu_context *smu,
                return 0;
        }
 
-       /* some asics may not support those messages */
-       if (smu_msg_get_index(smu, msg) < 0) {
-               mutex_unlock(&smu->mutex);
-               return 0;
-       }
-
        ret = smu_send_smc_msg(smu, msg, NULL);
        if (ret)
                dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
@@ -1944,35 +1643,34 @@ int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
 
 int smu_write_watermarks_table(struct smu_context *smu)
 {
-       void *watermarks_table = smu->smu_table.watermarks_table;
+       int ret = 0;
 
-       if (!watermarks_table)
-               return -EINVAL;
+       if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&smu->mutex);
 
-       return smu_update_table(smu,
-                               SMU_TABLE_WATERMARKS,
-                               0,
-                               watermarks_table,
-                               true);
+       ret = smu_set_watermarks_table(smu, NULL);
+
+       mutex_unlock(&smu->mutex);
+
+       return ret;
 }
 
 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
                struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
 {
-       void *table = smu->smu_table.watermarks_table;
+       int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       if (!table)
-               return -EINVAL;
-
        mutex_lock(&smu->mutex);
 
        if (!smu->disable_watermark &&
                        smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
                        smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
-               smu_set_watermarks_table(smu, table, clock_ranges);
+               ret = smu_set_watermarks_table(smu, clock_ranges);
 
                if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
                        smu->watermarks_bitmap |= WATERMARKS_EXIST;
@@ -1982,7 +1680,7 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
 
        mutex_unlock(&smu->mutex);
 
-       return 0;
+       return ret;
 }
 
 int smu_set_ac_dc(struct smu_context *smu)
@@ -2216,8 +1914,14 @@ int smu_od_edit_dpm_table(struct smu_context *smu,
 
        mutex_lock(&smu->mutex);
 
-       if (smu->ppt_funcs->od_edit_dpm_table)
+       if (smu->ppt_funcs->od_edit_dpm_table) {
                ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
+               if (!ret && (type == PP_OD_COMMIT_DPM_TABLE))
+                       ret = smu_handle_task(smu,
+                                             smu->smu_dpm.dpm_level,
+                                             AMD_PP_TASK_READJUST_POWER_STATE,
+                                             false);
+       }
 
        mutex_unlock(&smu->mutex);
 
index 56dc20a..3b9182c 100644 (file)
  *
  */
 
+#define SWSMU_CODE_LAYER_L2
+
 #include <linux/firmware.h>
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
-#include "smu_internal.h"
 #include "atomfirmware.h"
 #include "amdgpu_atomfirmware.h"
+#include "amdgpu_atombios.h"
 #include "smu_v11_0.h"
 #include "smu11_driver_if_arcturus.h"
 #include "soc15_common.h"
@@ -43,6 +45,7 @@
 #include <linux/i2c.h>
 #include <linux/pci.h>
 #include "amdgpu_ras.h"
+#include "smu_cmn.h"
 
 /*
  * DO NOT use these for err/warn/info/debug messages.
@@ -56,8 +59,6 @@
 
 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
 
-#define MSG_MAP(msg, index, valid_in_vf) \
-       [SMU_MSG_##msg] = {1, (index), (valid_in_vf)}
 #define ARCTURUS_FEA_MAP(smu_feature, arcturus_feature) \
        [smu_feature] = {1, (arcturus_feature)}
 
@@ -78,7 +79,7 @@
 /* possible frequency drift (1Mhz) */
 #define EPSILON                                1
 
-static struct smu_11_0_msg_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] = {
+static const struct cmn2asic_msg_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] = {
        MSG_MAP(TestMessage,                         PPSMC_MSG_TestMessage,                     0),
        MSG_MAP(GetSmuVersion,                       PPSMC_MSG_GetSmuVersion,                   1),
        MSG_MAP(GetDriverIfVersion,                  PPSMC_MSG_GetDriverIfVersion,              1),
@@ -141,7 +142,7 @@ static struct smu_11_0_msg_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] = {
        MSG_MAP(ReadSerialNumBottom32,               PPSMC_MSG_ReadSerialNumBottom32,           1),
 };
 
-static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
+static const struct cmn2asic_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
        CLK_MAP(GFXCLK, PPCLK_GFXCLK),
        CLK_MAP(SCLK,   PPCLK_GFXCLK),
        CLK_MAP(SOCCLK, PPCLK_SOCCLK),
@@ -152,7 +153,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
        CLK_MAP(VCLK, PPCLK_VCLK),
 };
 
-static struct smu_11_0_cmn2aisc_mapping arcturus_feature_mask_map[SMU_FEATURE_COUNT] = {
+static const struct cmn2asic_mapping arcturus_feature_mask_map[SMU_FEATURE_COUNT] = {
        FEA_MAP(DPM_PREFETCHER),
        FEA_MAP(DPM_GFXCLK),
        FEA_MAP(DPM_UCLK),
@@ -181,7 +182,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_feature_mask_map[SMU_FEATURE_CO
        FEA_MAP(TEMP_DEPENDENT_VMIN),
 };
 
-static struct smu_11_0_cmn2aisc_mapping arcturus_table_map[SMU_TABLE_COUNT] = {
+static const struct cmn2asic_mapping arcturus_table_map[SMU_TABLE_COUNT] = {
        TAB_MAP(PPTABLE),
        TAB_MAP(AVFS),
        TAB_MAP(AVFS_PSM_DEBUG),
@@ -194,12 +195,12 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_table_map[SMU_TABLE_COUNT] = {
        TAB_MAP(ACTIVITY_MONITOR_COEFF),
 };
 
-static struct smu_11_0_cmn2aisc_mapping arcturus_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
+static const struct cmn2asic_mapping arcturus_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
        PWR_MAP(AC),
        PWR_MAP(DC),
 };
 
-static struct smu_11_0_cmn2aisc_mapping arcturus_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
+static const struct cmn2asic_mapping arcturus_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT,       WORKLOAD_PPLIB_DEFAULT_BIT),
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING,          WORKLOAD_PPLIB_POWER_SAVING_BIT),
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO,                WORKLOAD_PPLIB_VIDEO_BIT),
@@ -207,103 +208,10 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_workload_map[PP_SMC_POWER_PROFI
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM,               WORKLOAD_PPLIB_CUSTOM_BIT),
 };
 
-static int arcturus_get_smu_msg_index(struct smu_context *smc, uint32_t index)
-{
-       struct smu_11_0_msg_mapping mapping;
-
-       if (index >= SMU_MSG_MAX_COUNT)
-               return -EINVAL;
-
-       mapping = arcturus_message_map[index];
-       if (!(mapping.valid_mapping))
-               return -EINVAL;
-
-       if (amdgpu_sriov_vf(smc->adev) && !mapping.valid_in_vf)
-               return -EACCES;
-
-       return mapping.map_to;
-}
-
-static int arcturus_get_smu_clk_index(struct smu_context *smc, uint32_t index)
-{
-       struct smu_11_0_cmn2aisc_mapping mapping;
-
-       if (index >= SMU_CLK_COUNT)
-               return -EINVAL;
-
-       mapping = arcturus_clk_map[index];
-       if (!(mapping.valid_mapping)) {
-               dev_warn(smc->adev->dev, "Unsupported SMU clk: %d\n", index);
-               return -EINVAL;
-       }
-
-       return mapping.map_to;
-}
-
-static int arcturus_get_smu_feature_index(struct smu_context *smc, uint32_t index)
-{
-       struct smu_11_0_cmn2aisc_mapping mapping;
-
-       if (index >= SMU_FEATURE_COUNT)
-               return -EINVAL;
-
-       mapping = arcturus_feature_mask_map[index];
-       if (!(mapping.valid_mapping)) {
-               return -EINVAL;
-       }
-
-       return mapping.map_to;
-}
-
-static int arcturus_get_smu_table_index(struct smu_context *smc, uint32_t index)
-{
-       struct smu_11_0_cmn2aisc_mapping mapping;
-
-       if (index >= SMU_TABLE_COUNT)
-               return -EINVAL;
-
-       mapping = arcturus_table_map[index];
-       if (!(mapping.valid_mapping)) {
-               dev_warn(smc->adev->dev, "Unsupported SMU table: %d\n", index);
-               return -EINVAL;
-       }
-
-       return mapping.map_to;
-}
-
-static int arcturus_get_pwr_src_index(struct smu_context *smc, uint32_t index)
-{
-       struct smu_11_0_cmn2aisc_mapping mapping;
-
-       if (index >= SMU_POWER_SOURCE_COUNT)
-               return -EINVAL;
-
-       mapping = arcturus_pwr_src_map[index];
-       if (!(mapping.valid_mapping)) {
-               dev_warn(smc->adev->dev, "Unsupported SMU power source: %d\n", index);
-               return -EINVAL;
-       }
-
-       return mapping.map_to;
-}
-
-static int arcturus_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile)
-{
-       struct smu_11_0_cmn2aisc_mapping mapping;
-
-       if (profile > PP_SMC_POWER_PROFILE_CUSTOM)
-               return -EINVAL;
-
-       mapping = arcturus_workload_map[profile];
-       if (!(mapping.valid_mapping))
-               return -EINVAL;
-
-       return mapping.map_to;
-}
-
-static int arcturus_tables_init(struct smu_context *smu, struct smu_table *tables)
+static int arcturus_tables_init(struct smu_context *smu)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
+       struct smu_table *tables = smu_table->tables;
 
        SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
@@ -352,6 +260,21 @@ static int arcturus_allocate_dpm_context(struct smu_context *smu)
        return 0;
 }
 
+static int arcturus_init_smc_tables(struct smu_context *smu)
+{
+       int ret = 0;
+
+       ret = arcturus_tables_init(smu);
+       if (ret)
+               return ret;
+
+       ret = arcturus_allocate_dpm_context(smu);
+       if (ret)
+               return ret;
+
+       return smu_v11_0_init_smc_tables(smu);
+}
+
 static int
 arcturus_get_allowed_feature_mask(struct smu_context *smu,
                                  uint32_t *feature_mask, uint32_t num)
@@ -374,7 +297,7 @@ static int arcturus_set_default_dpm_table(struct smu_context *smu)
 
        /* socclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.soc_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_SOCCLK,
                                                     dpm_table);
@@ -392,7 +315,7 @@ static int arcturus_set_default_dpm_table(struct smu_context *smu)
 
        /* gfxclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.gfx_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_GFXCLK,
                                                     dpm_table);
@@ -410,7 +333,7 @@ static int arcturus_set_default_dpm_table(struct smu_context *smu)
 
        /* memclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.uclk_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_UCLK,
                                                     dpm_table);
@@ -428,7 +351,7 @@ static int arcturus_set_default_dpm_table(struct smu_context *smu)
 
        /* fclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.fclk_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_FCLK,
                                                     dpm_table);
@@ -488,7 +411,7 @@ static int arcturus_append_powerplay_table(struct smu_context *smu)
        index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
                                           smc_dpm_info);
 
-       ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
+       ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL,
                                      (uint8_t **)&smc_dpm_table);
        if (ret)
                return ret;
@@ -533,13 +456,13 @@ static int arcturus_run_btc(struct smu_context *smu)
 {
        int ret = 0;
 
-       ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL);
+       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL);
        if (ret) {
                dev_err(smu->adev->dev, "RunAfllBtc failed!\n");
                return ret;
        }
 
-       return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
+       return smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
 }
 
 static int arcturus_populate_umd_state_clk(struct smu_context *smu)
@@ -621,7 +544,7 @@ static int arcturus_get_smu_metrics_data(struct smu_context *smu,
 
        if (!smu_table->metrics_time ||
             time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
-               ret = smu_update_table(smu,
+               ret = smu_cmn_update_table(smu,
                                       SMU_TABLE_SMU_METRICS,
                                       0,
                                       smu_table->metrics_table,
@@ -730,7 +653,9 @@ static int arcturus_get_current_clk_freq_by_table(struct smu_context *smu,
        if (!value)
                return -EINVAL;
 
-       clk_id = smu_clk_get_index(smu, clk_type);
+       clk_id = smu_cmn_to_asic_specific_index(smu,
+                                               CMN2ASIC_MAPPING_CLK,
+                                               clk_type);
        if (clk_id < 0)
                return -EINVAL;
 
@@ -742,31 +667,31 @@ static int arcturus_get_current_clk_freq_by_table(struct smu_context *smu,
                 * We can use Average_* for dpm disabled case.
                 *   But this is available for gfxclk/uclk/socclk/vclk/dclk.
                 */
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT))
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT))
                        member_type = METRICS_CURR_GFXCLK;
                else
                        member_type = METRICS_AVERAGE_GFXCLK;
                break;
        case PPCLK_UCLK:
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
                        member_type = METRICS_CURR_UCLK;
                else
                        member_type = METRICS_AVERAGE_UCLK;
                break;
        case PPCLK_SOCCLK:
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT))
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT))
                        member_type = METRICS_CURR_SOCCLK;
                else
                        member_type = METRICS_AVERAGE_SOCCLK;
                break;
        case PPCLK_VCLK:
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
                        member_type = METRICS_CURR_VCLK;
                else
                        member_type = METRICS_AVERAGE_VCLK;
                break;
        case PPCLK_DCLK:
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
                        member_type = METRICS_CURR_DCLK;
                else
                        member_type = METRICS_AVERAGE_DCLK;
@@ -912,10 +837,10 @@ static int arcturus_upload_dpm_level(struct smu_context *smu,
        uint32_t freq;
        int ret = 0;
 
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
            (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
                freq = dpm_context->dpm_tables.gfx_table.dpm_levels[level].value;
-               ret = smu_send_smc_msg_with_param(smu,
+               ret = smu_cmn_send_smc_msg_with_param(smu,
                        (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
                        (PPCLK_GFXCLK << 16) | (freq & 0xffff),
                        NULL);
@@ -926,10 +851,10 @@ static int arcturus_upload_dpm_level(struct smu_context *smu,
                }
        }
 
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
            (feature_mask & FEATURE_DPM_UCLK_MASK)) {
                freq = dpm_context->dpm_tables.uclk_table.dpm_levels[level].value;
-               ret = smu_send_smc_msg_with_param(smu,
+               ret = smu_cmn_send_smc_msg_with_param(smu,
                        (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
                        (PPCLK_UCLK << 16) | (freq & 0xffff),
                        NULL);
@@ -940,10 +865,10 @@ static int arcturus_upload_dpm_level(struct smu_context *smu,
                }
        }
 
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) &&
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) &&
            (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
                freq = dpm_context->dpm_tables.soc_table.dpm_levels[level].value;
-               ret = smu_send_smc_msg_with_param(smu,
+               ret = smu_cmn_send_smc_msg_with_param(smu,
                        (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
                        (PPCLK_SOCCLK << 16) | (freq & 0xffff),
                        NULL);
@@ -966,7 +891,7 @@ static int arcturus_force_clk_levels(struct smu_context *smu,
        uint32_t smu_version;
        int ret = 0;
 
-       ret = smu_get_smc_version(smu, NULL, &smu_version);
+       ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
        if (ret) {
                dev_err(smu->adev->dev, "Failed to get smu version!\n");
                return ret;
@@ -1283,7 +1208,7 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
        if (!buf)
                return -EINVAL;
 
-       result = smu_get_smc_version(smu, NULL, &smu_version);
+       result = smu_cmn_get_smc_version(smu, NULL, &smu_version);
        if (result)
                return result;
 
@@ -1300,12 +1225,14 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
                 * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
                 * Not all profile modes are supported on arcturus.
                 */
-               workload_type = smu_workload_get_type(smu, i);
+               workload_type = smu_cmn_to_asic_specific_index(smu,
+                                                              CMN2ASIC_MAPPING_WORKLOAD,
+                                                              i);
                if (workload_type < 0)
                        continue;
 
                if (smu_version >= 0x360d00) {
-                       result = smu_update_table(smu,
+                       result = smu_cmn_update_table(smu,
                                                  SMU_TABLE_ACTIVITY_MONITOR_COEFF,
                                                  workload_type,
                                                  (void *)(&activity_monitor),
@@ -1368,13 +1295,13 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
                return -EINVAL;
        }
 
-       ret = smu_get_smc_version(smu, NULL, &smu_version);
+       ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
        if (ret)
                return ret;
 
        if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) &&
             (smu_version >=0x360d00)) {
-               ret = smu_update_table(smu,
+               ret = smu_cmn_update_table(smu,
                                       SMU_TABLE_ACTIVITY_MONITOR_COEFF,
                                       WORKLOAD_PPLIB_CUSTOM_BIT,
                                       (void *)(&activity_monitor),
@@ -1409,7 +1336,7 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
                        break;
                }
 
-               ret = smu_update_table(smu,
+               ret = smu_cmn_update_table(smu,
                                       SMU_TABLE_ACTIVITY_MONITOR_COEFF,
                                       WORKLOAD_PPLIB_CUSTOM_BIT,
                                       (void *)(&activity_monitor),
@@ -1424,13 +1351,15 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
         * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
         * Not all profile modes are supported on arcturus.
         */
-       workload_type = smu_workload_get_type(smu, profile_mode);
+       workload_type = smu_cmn_to_asic_specific_index(smu,
+                                                      CMN2ASIC_MAPPING_WORKLOAD,
+                                                      profile_mode);
        if (workload_type < 0) {
                dev_err(smu->adev->dev, "Unsupported power profile mode %d on arcturus\n", profile_mode);
                return -EINVAL;
        }
 
-       ret = smu_send_smc_msg_with_param(smu,
+       ret = smu_cmn_send_smc_msg_with_param(smu,
                                          SMU_MSG_SetWorkloadMask,
                                          1 << workload_type,
                                          NULL);
@@ -1450,7 +1379,7 @@ static int arcturus_set_performance_level(struct smu_context *smu,
        uint32_t smu_version;
        int ret;
 
-       ret = smu_get_smc_version(smu, NULL, &smu_version);
+       ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
        if (ret) {
                dev_err(smu->adev->dev, "Failed to get smu version!\n");
                return ret;
@@ -1912,7 +1841,7 @@ static bool arcturus_is_dpm_running(struct smu_context *smu)
        int ret = 0;
        uint32_t feature_mask[2];
        unsigned long feature_enabled;
-       ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+       ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
        feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
                           ((uint64_t)feature_mask[1] << 32));
        return !!(feature_enabled & SMC_DPM_FEATURE);
@@ -1925,8 +1854,8 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
        int ret = 0;
 
        if (enable) {
-               if (!smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
-                       ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 1);
+               if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+                       ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 1);
                        if (ret) {
                                dev_err(smu->adev->dev, "[EnableVCNDPM] failed!\n");
                                return ret;
@@ -1934,8 +1863,8 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
                }
                power_gate->vcn_gated = false;
        } else {
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
-                       ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0);
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+                       ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0);
                        if (ret) {
                                dev_err(smu->adev->dev, "[DisableVCNDPM] failed!\n");
                                return ret;
@@ -1947,14 +1876,12 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
        return ret;
 }
 
-static void arcturus_fill_eeprom_i2c_req(SwI2cRequest_t  *req, bool write,
+static void arcturus_fill_i2c_req(SwI2cRequest_t  *req, bool write,
                                  uint8_t address, uint32_t numbytes,
                                  uint8_t *data)
 {
        int i;
 
-       BUG_ON(numbytes > MAX_SW_I2C_COMMANDS);
-
        req->I2CcontrollerPort = 0;
        req->I2CSpeed = 2;
        req->SlaveAddress = address;
@@ -1981,7 +1908,7 @@ static void arcturus_fill_eeprom_i2c_req(SwI2cRequest_t  *req, bool write,
        }
 }
 
-static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control,
+static int arcturus_i2c_read_data(struct i2c_adapter *control,
                                               uint8_t address,
                                               uint8_t *data,
                                               uint32_t numbytes)
@@ -1992,12 +1919,18 @@ static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control,
        struct smu_table_context *smu_table = &adev->smu.smu_table;
        struct smu_table *table = &smu_table->driver_table;
 
+       if (numbytes > MAX_SW_I2C_COMMANDS) {
+               dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
+                       numbytes, MAX_SW_I2C_COMMANDS);
+               return -EINVAL;
+       }
+
        memset(&req, 0, sizeof(req));
-       arcturus_fill_eeprom_i2c_req(&req, false, address, numbytes, data);
+       arcturus_fill_i2c_req(&req, false, address, numbytes, data);
 
        mutex_lock(&adev->smu.mutex);
        /* Now read data starting with that address */
-       ret = smu_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
+       ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
                                        true);
        mutex_unlock(&adev->smu.mutex);
 
@@ -2008,18 +1941,18 @@ static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control,
                for (i = 0; i < numbytes; i++)
                        data[i] = res->SwI2cCmds[i].Data;
 
-               dev_dbg(adev->dev, "arcturus_i2c_eeprom_read_data, address = %x, bytes = %d, data :",
+               dev_dbg(adev->dev, "arcturus_i2c_read_data, address = %x, bytes = %d, data :",
                                  (uint16_t)address, numbytes);
 
                print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
                               8, 1, data, numbytes, false);
        } else
-               dev_err(adev->dev, "arcturus_i2c_eeprom_read_data - error occurred :%x", ret);
+               dev_err(adev->dev, "arcturus_i2c_read_data - error occurred :%x", ret);
 
        return ret;
 }
 
-static int arcturus_i2c_eeprom_write_data(struct i2c_adapter *control,
+static int arcturus_i2c_write_data(struct i2c_adapter *control,
                                                uint8_t address,
                                                uint8_t *data,
                                                uint32_t numbytes)
@@ -2028,11 +1961,17 @@ static int arcturus_i2c_eeprom_write_data(struct i2c_adapter *control,
        SwI2cRequest_t req;
        struct amdgpu_device *adev = to_amdgpu_device(control);
 
+       if (numbytes > MAX_SW_I2C_COMMANDS) {
+               dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
+                       numbytes, MAX_SW_I2C_COMMANDS);
+               return -EINVAL;
+       }
+
        memset(&req, 0, sizeof(req));
-       arcturus_fill_eeprom_i2c_req(&req, true, address, numbytes, data);
+       arcturus_fill_i2c_req(&req, true, address, numbytes, data);
 
        mutex_lock(&adev->smu.mutex);
-       ret = smu_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
+       ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
        mutex_unlock(&adev->smu.mutex);
 
        if (!ret) {
@@ -2055,7 +1994,7 @@ static int arcturus_i2c_eeprom_write_data(struct i2c_adapter *control,
        return ret;
 }
 
-static int arcturus_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
+static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
                              struct i2c_msg *msgs, int num)
 {
        uint32_t  i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0;
@@ -2078,18 +2017,18 @@ static int arcturus_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
                        data_chunk[1] = (next_eeprom_addr & 0xff);
 
                        if (msgs[i].flags & I2C_M_RD) {
-                               ret = arcturus_i2c_eeprom_read_data(i2c_adap,
-                                                               (uint8_t)msgs[i].addr,
-                                                               data_chunk, MAX_SW_I2C_COMMANDS);
+                               ret = arcturus_i2c_read_data(i2c_adap,
+                                                            (uint8_t)msgs[i].addr,
+                                                            data_chunk, MAX_SW_I2C_COMMANDS);
 
                                memcpy(data_ptr, data_chunk + 2, data_chunk_size);
                        } else {
 
                                memcpy(data_chunk + 2, data_ptr, data_chunk_size);
 
-                               ret = arcturus_i2c_eeprom_write_data(i2c_adap,
-                                                                (uint8_t)msgs[i].addr,
-                                                                data_chunk, MAX_SW_I2C_COMMANDS);
+                               ret = arcturus_i2c_write_data(i2c_adap,
+                                                             (uint8_t)msgs[i].addr,
+                                                             data_chunk, MAX_SW_I2C_COMMANDS);
                        }
 
                        if (ret) {
@@ -2106,17 +2045,17 @@ static int arcturus_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
                        data_chunk[1] = (next_eeprom_addr & 0xff);
 
                        if (msgs[i].flags & I2C_M_RD) {
-                               ret = arcturus_i2c_eeprom_read_data(i2c_adap,
-                                                               (uint8_t)msgs[i].addr,
-                                                               data_chunk, (data_size % data_chunk_size) + 2);
+                               ret = arcturus_i2c_read_data(i2c_adap,
+                                                            (uint8_t)msgs[i].addr,
+                                                            data_chunk, (data_size % data_chunk_size) + 2);
 
                                memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size);
                        } else {
                                memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size);
 
-                               ret = arcturus_i2c_eeprom_write_data(i2c_adap,
-                                                                (uint8_t)msgs[i].addr,
-                                                                data_chunk, (data_size % data_chunk_size) + 2);
+                               ret = arcturus_i2c_write_data(i2c_adap,
+                                                             (uint8_t)msgs[i].addr,
+                                                             data_chunk, (data_size % data_chunk_size) + 2);
                        }
 
                        if (ret) {
@@ -2130,15 +2069,15 @@ fail:
        return num;
 }
 
-static u32 arcturus_i2c_eeprom_i2c_func(struct i2c_adapter *adap)
+static u32 arcturus_i2c_func(struct i2c_adapter *adap)
 {
        return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
 }
 
 
-static const struct i2c_algorithm arcturus_i2c_eeprom_i2c_algo = {
-       .master_xfer = arcturus_i2c_eeprom_i2c_xfer,
-       .functionality = arcturus_i2c_eeprom_i2c_func,
+static const struct i2c_algorithm arcturus_i2c_algo = {
+       .master_xfer = arcturus_i2c_xfer,
+       .functionality = arcturus_i2c_func,
 };
 
 static bool arcturus_i2c_adapter_is_added(struct i2c_adapter *control)
@@ -2148,7 +2087,7 @@ static bool arcturus_i2c_adapter_is_added(struct i2c_adapter *control)
        return control->dev.parent == &adev->pdev->dev;
 }
 
-static int arcturus_i2c_eeprom_control_init(struct smu_context *smu, struct i2c_adapter *control)
+static int arcturus_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
 {
        struct amdgpu_device *adev = to_amdgpu_device(control);
        int res;
@@ -2160,8 +2099,8 @@ static int arcturus_i2c_eeprom_control_init(struct smu_context *smu, struct i2c_
        control->owner = THIS_MODULE;
        control->class = I2C_CLASS_SPD;
        control->dev.parent = &adev->pdev->dev;
-       control->algo = &arcturus_i2c_eeprom_i2c_algo;
-       snprintf(control->name, sizeof(control->name), "AMDGPU EEPROM");
+       control->algo = &arcturus_i2c_algo;
+       snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
 
        res = i2c_add_adapter(control);
        if (res)
@@ -2170,7 +2109,7 @@ static int arcturus_i2c_eeprom_control_init(struct smu_context *smu, struct i2c_
        return res;
 }
 
-static void arcturus_i2c_eeprom_control_fini(struct smu_context *smu, struct i2c_adapter *control)
+static void arcturus_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
 {
        if (!arcturus_i2c_adapter_is_added(control))
                return;
@@ -2184,7 +2123,7 @@ static void arcturus_get_unique_id(struct smu_context *smu)
        uint32_t top32 = 0, bottom32 = 0, smu_version;
        uint64_t id;
 
-       if (smu_get_smc_version(smu, NULL, &smu_version)) {
+       if (smu_cmn_get_smc_version(smu, NULL, &smu_version)) {
                dev_warn(adev->dev, "Failed to get smu version, cannot get unique_id or serial_number\n");
                return;
        }
@@ -2196,8 +2135,8 @@ static void arcturus_get_unique_id(struct smu_context *smu)
        }
 
        /* Get the SN to turn into a Unique ID */
-       smu_send_smc_msg(smu, SMU_MSG_ReadSerialNumTop32, &top32);
-       smu_send_smc_msg(smu, SMU_MSG_ReadSerialNumBottom32, &bottom32);
+       smu_cmn_send_smc_msg(smu, SMU_MSG_ReadSerialNumTop32, &top32);
+       smu_cmn_send_smc_msg(smu, SMU_MSG_ReadSerialNumBottom32, &bottom32);
 
        id = ((uint64_t)bottom32 << 32) | top32;
        adev->unique_id = id;
@@ -2225,7 +2164,7 @@ static int arcturus_set_df_cstate(struct smu_context *smu,
        uint32_t smu_version;
        int ret;
 
-       ret = smu_get_smc_version(smu, NULL, &smu_version);
+       ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
        if (ret) {
                dev_err(smu->adev->dev, "Failed to get smu version!\n");
                return ret;
@@ -2237,7 +2176,7 @@ static int arcturus_set_df_cstate(struct smu_context *smu,
                return -EINVAL;
        }
 
-       return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
+       return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
 }
 
 static int arcturus_allow_xgmi_power_down(struct smu_context *smu, bool en)
@@ -2245,7 +2184,7 @@ static int arcturus_allow_xgmi_power_down(struct smu_context *smu, bool en)
        uint32_t smu_version;
        int ret;
 
-       ret = smu_get_smc_version(smu, NULL, &smu_version);
+       ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
        if (ret) {
                dev_err(smu->adev->dev, "Failed to get smu version!\n");
                return ret;
@@ -2258,12 +2197,12 @@ static int arcturus_allow_xgmi_power_down(struct smu_context *smu, bool en)
        }
 
        if (en)
-               return smu_send_smc_msg_with_param(smu,
+               return smu_cmn_send_smc_msg_with_param(smu,
                                                   SMU_MSG_GmiPwrDnControl,
                                                   1,
                                                   NULL);
 
-       return smu_send_smc_msg_with_param(smu,
+       return smu_cmn_send_smc_msg_with_param(smu,
                                           SMU_MSG_GmiPwrDnControl,
                                           0,
                                           NULL);
@@ -2315,16 +2254,6 @@ static void arcturus_log_thermal_throttling_event(struct smu_context *smu)
 }
 
 static const struct pptable_funcs arcturus_ppt_funcs = {
-       /* translate smu index into arcturus specific index */
-       .get_smu_msg_index = arcturus_get_smu_msg_index,
-       .get_smu_clk_index = arcturus_get_smu_clk_index,
-       .get_smu_feature_index = arcturus_get_smu_feature_index,
-       .get_smu_table_index = arcturus_get_smu_table_index,
-       .get_smu_power_index= arcturus_get_pwr_src_index,
-       .get_workload_type = arcturus_get_workload_type,
-       /* internal structurs allocations */
-       .tables_init = arcturus_tables_init,
-       .alloc_dpm_context = arcturus_allocate_dpm_context,
        /* init dpm */
        .get_allowed_feature_mask = arcturus_get_allowed_feature_mask,
        /* btc */
@@ -2346,13 +2275,13 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
        .get_power_limit = arcturus_get_power_limit,
        .is_dpm_running = arcturus_is_dpm_running,
        .dpm_set_vcn_enable = arcturus_dpm_set_vcn_enable,
-       .i2c_eeprom_init = arcturus_i2c_eeprom_control_init,
-       .i2c_eeprom_fini = arcturus_i2c_eeprom_control_fini,
+       .i2c_init = arcturus_i2c_control_init,
+       .i2c_fini = arcturus_i2c_control_fini,
        .get_unique_id = arcturus_get_unique_id,
        .init_microcode = smu_v11_0_init_microcode,
        .load_microcode = smu_v11_0_load_microcode,
        .fini_microcode = smu_v11_0_fini_microcode,
-       .init_smc_tables = smu_v11_0_init_smc_tables,
+       .init_smc_tables = arcturus_init_smc_tables,
        .fini_smc_tables = smu_v11_0_fini_smc_tables,
        .init_power = smu_v11_0_init_power,
        .fini_power = smu_v11_0_fini_power,
@@ -2361,15 +2290,18 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
        .setup_pptable = arcturus_setup_pptable,
        .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
        .check_fw_version = smu_v11_0_check_fw_version,
-       .write_pptable = smu_v11_0_write_pptable,
+       .write_pptable = smu_cmn_write_pptable,
        .set_driver_table_location = smu_v11_0_set_driver_table_location,
        .set_tool_table_location = smu_v11_0_set_tool_table_location,
        .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
        .system_features_control = smu_v11_0_system_features_control,
-       .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+       .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
+       .send_smc_msg = smu_cmn_send_smc_msg,
        .init_display_count = NULL,
        .set_allowed_mask = smu_v11_0_set_allowed_mask,
-       .get_enabled_mask = smu_v11_0_get_enabled_mask,
+       .get_enabled_mask = smu_cmn_get_enabled_mask,
+       .feature_is_enabled = smu_cmn_feature_is_enabled,
+       .disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
        .notify_display_change = NULL,
        .set_power_limit = smu_v11_0_set_power_limit,
        .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
@@ -2396,9 +2328,17 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
        .set_df_cstate = arcturus_set_df_cstate,
        .allow_xgmi_power_down = arcturus_allow_xgmi_power_down,
        .log_thermal_throttling_event = arcturus_log_thermal_throttling_event,
+       .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
+       .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
 };
 
 void arcturus_set_ppt_funcs(struct smu_context *smu)
 {
        smu->ppt_funcs = &arcturus_ppt_funcs;
+       smu->message_map = arcturus_message_map;
+       smu->clock_map = arcturus_clk_map;
+       smu->feature_map = arcturus_feature_mask_map;
+       smu->table_map = arcturus_table_map;
+       smu->pwr_src_map = arcturus_pwr_src_map;
+       smu->workload_map = arcturus_workload_map;
 }
index 70181ba..28312d6 100644 (file)
@@ -259,7 +259,7 @@ struct smu_table_context
        void                            *max_sustainable_clocks;
        struct smu_bios_boot_up_values  boot_values;
        void                            *driver_pptable;
-       struct smu_table                *tables;
+       struct smu_table                tables[SMU_TABLE_COUNT];
        /*
         * The driver table is just a staging buffer for
         * uploading/downloading content from the SMU.
@@ -366,6 +366,17 @@ struct smu_umd_pstate_table {
        struct pstates_clk_freq         dclk_pstate;
 };
 
+struct cmn2asic_msg_mapping {
+       int     valid_mapping;
+       int     map_to;
+       int     valid_in_vf;
+};
+
+struct cmn2asic_mapping {
+       int     valid_mapping;
+       int     map_to;
+};
+
 #define WORKLOAD_POLICY_MAX 7
 struct smu_context
 {
@@ -373,6 +384,12 @@ struct smu_context
        struct amdgpu_irq_src           irq_source;
 
        const struct pptable_funcs      *ppt_funcs;
+       const struct cmn2asic_msg_mapping       *message_map;
+       const struct cmn2asic_mapping   *clock_map;
+       const struct cmn2asic_mapping   *feature_map;
+       const struct cmn2asic_mapping   *table_map;
+       const struct cmn2asic_mapping   *pwr_src_map;
+       const struct cmn2asic_mapping   *workload_map;
        struct mutex                    mutex;
        struct mutex                    sensor_lock;
        struct mutex                    metrics_lock;
@@ -434,13 +451,6 @@ struct smu_context
 struct i2c_adapter;
 
 struct pptable_funcs {
-       int (*alloc_dpm_context)(struct smu_context *smu);
-       int (*get_smu_msg_index)(struct smu_context *smu, uint32_t index);
-       int (*get_smu_clk_index)(struct smu_context *smu, uint32_t index);
-       int (*get_smu_feature_index)(struct smu_context *smu, uint32_t index);
-       int (*get_smu_table_index)(struct smu_context *smu, uint32_t index);
-       int (*get_smu_power_index)(struct smu_context *smu, uint32_t index);
-       int (*get_workload_type)(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile);
        int (*run_btc)(struct smu_context *smu);
        int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
        enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu);
@@ -479,10 +489,9 @@ struct pptable_funcs {
        int (*notify_smc_display_config)(struct smu_context *smu);
        int (*set_cpu_power_state)(struct smu_context *smu);
        bool (*is_dpm_running)(struct smu_context *smu);
-       int (*tables_init)(struct smu_context *smu, struct smu_table *tables);
        int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed);
        int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed);
-       int (*set_watermarks_table)(struct smu_context *smu, void *watermarks,
+       int (*set_watermarks_table)(struct smu_context *smu,
                                    struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
        int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range);
        int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states);
@@ -494,8 +503,8 @@ struct pptable_funcs {
        int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state);
        int (*allow_xgmi_power_down)(struct smu_context *smu, bool en);
        int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap);
-       int (*i2c_eeprom_init)(struct smu_context *smu, struct i2c_adapter *control);
-       void (*i2c_eeprom_fini)(struct smu_context *smu, struct i2c_adapter *control);
+       int (*i2c_init)(struct smu_context *smu, struct i2c_adapter *control);
+       void (*i2c_fini)(struct smu_context *smu, struct i2c_adapter *control);
        void (*get_unique_id)(struct smu_context *smu);
        int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks *clock_table);
        int (*init_microcode)(struct smu_context *smu);
@@ -519,9 +528,14 @@ struct pptable_funcs {
        int (*system_features_control)(struct smu_context *smu, bool en);
        int (*send_smc_msg_with_param)(struct smu_context *smu,
                                       enum smu_message_type msg, uint32_t param, uint32_t *read_arg);
+       int (*send_smc_msg)(struct smu_context *smu,
+                           enum smu_message_type msg,
+                           uint32_t *read_arg);
        int (*init_display_count)(struct smu_context *smu, uint32_t count);
        int (*set_allowed_mask)(struct smu_context *smu);
        int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
+       int (*feature_is_enabled)(struct smu_context *smu, enum smu_feature_mask mask);
+       int (*disable_all_features_with_exception)(struct smu_context *smu, enum smu_feature_mask mask);
        int (*notify_display_change)(struct smu_context *smu);
        int (*set_power_limit)(struct smu_context *smu, uint32_t n);
        int (*init_max_sustainable_clocks)(struct smu_context *smu);
@@ -555,6 +569,7 @@ struct pptable_funcs {
        int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
        int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate);
        int (*gfx_off_control)(struct smu_context *smu, bool enable);
+       uint32_t (*get_gfx_off_status)(struct smu_context *smu);
        int (*register_irq_handler)(struct smu_context *smu);
        int (*set_azalia_d3_pme)(struct smu_context *smu);
        int (*get_max_sustainable_clocks_by_dc)(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks);
@@ -571,6 +586,8 @@ struct pptable_funcs {
        int (*disable_umc_cdr_12gbps_workaround)(struct smu_context *smu);
        int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src);
        void (*log_thermal_throttling_event)(struct smu_context *smu);
+       size_t (*get_pp_feature_mask)(struct smu_context *smu, char *buf);
+       int (*set_pp_feature_mask)(struct smu_context *smu, uint64_t new_mask);
 };
 
 typedef enum {
@@ -603,6 +620,40 @@ typedef enum {
        METRICS_CURR_FANSPEED,
 } MetricsMember_t;
 
+enum smu_cmn2asic_mapping_type {
+       CMN2ASIC_MAPPING_MSG,
+       CMN2ASIC_MAPPING_CLK,
+       CMN2ASIC_MAPPING_FEATURE,
+       CMN2ASIC_MAPPING_TABLE,
+       CMN2ASIC_MAPPING_PWR,
+       CMN2ASIC_MAPPING_WORKLOAD,
+};
+
+#define MSG_MAP(msg, index, valid_in_vf) \
+       [SMU_MSG_##msg] = {1, (index), (valid_in_vf)}
+
+#define CLK_MAP(clk, index) \
+       [SMU_##clk] = {1, (index)}
+
+#define FEA_MAP(fea) \
+       [SMU_FEATURE_##fea##_BIT] = {1, FEATURE_##fea##_BIT}
+
+#define TAB_MAP(tab) \
+       [SMU_TABLE_##tab] = {1, TABLE_##tab}
+
+#define TAB_MAP_VALID(tab) \
+       [SMU_TABLE_##tab] = {1, TABLE_##tab}
+
+#define TAB_MAP_INVALID(tab) \
+       [SMU_TABLE_##tab] = {0, TABLE_##tab}
+
+#define PWR_MAP(tab) \
+       [SMU_POWER_SOURCE_##tab] = {1, POWER_SOURCE_##tab}
+
+#define WORKLOAD_MAP(profile, workload) \
+       [profile] = {1, (workload)}
+
+#if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
 int smu_load_microcode(struct smu_context *smu);
 
 int smu_check_fw_status(struct smu_context *smu);
@@ -678,25 +729,11 @@ bool smu_mode1_reset_is_support(struct smu_context *smu);
 int smu_mode1_reset(struct smu_context *smu);
 int smu_mode2_reset(struct smu_context *smu);
 
-extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
-                                  uint16_t *size, uint8_t *frev, uint8_t *crev,
-                                  uint8_t **addr);
-
 extern const struct amd_ip_funcs smu_ip_funcs;
 
 extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
 extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
 
-extern int smu_feature_is_enabled(struct smu_context *smu,
-                                 enum smu_feature_mask mask);
-extern int smu_feature_set_enabled(struct smu_context *smu,
-                                  enum smu_feature_mask mask, bool enable);
-extern int smu_feature_is_supported(struct smu_context *smu,
-                                   enum smu_feature_mask mask);
-
-int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
-                    void *table_data, bool drv2smu);
-
 bool is_support_sw_smu(struct amdgpu_device *adev);
 int smu_reset(struct smu_context *smu);
 int smu_sys_get_pp_table(struct smu_context *smu, void **table);
@@ -722,7 +759,6 @@ extern int smu_handle_task(struct smu_context *smu,
 int smu_switch_power_profile(struct smu_context *smu,
                             enum PP_SMC_POWER_PROFILE type,
                             bool en);
-int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version);
 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
                           uint32_t *min, uint32_t *max);
 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
@@ -731,9 +767,6 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level);
 int smu_set_display_count(struct smu_context *smu, uint32_t count);
 int smu_set_ac_dc(struct smu_context *smu);
-bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type);
-const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type);
-const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature);
 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf);
 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask);
 int smu_force_clk_levels(struct smu_context *smu,
@@ -755,4 +788,7 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,
 int smu_get_dpm_clock_table(struct smu_context *smu,
                            struct dpm_clocks *clock_table);
 
+int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value);
+
+#endif
 #endif
index f061585..429f5aa 100644 (file)
@@ -31,7 +31,7 @@
 #define SMU11_DRIVER_IF_VERSION_NV12 0x33
 #define SMU11_DRIVER_IF_VERSION_NV14 0x36
 #define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x33
-#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x2B
+#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x2
 
 /* MP Apertures */
 #define MP0_Public                     0x03800000
 #define MAX_DPM_LEVELS 16
 #define MAX_PCIE_CONF 2
 
-#define CLK_MAP(clk, index) \
-       [SMU_##clk] = {1, (index)}
-
-#define FEA_MAP(fea) \
-       [SMU_FEATURE_##fea##_BIT] = {1, FEATURE_##fea##_BIT}
-
-#define TAB_MAP(tab) \
-       [SMU_TABLE_##tab] = {1, TABLE_##tab}
-
-#define PWR_MAP(tab) \
-       [SMU_POWER_SOURCE_##tab] = {1, POWER_SOURCE_##tab}
-
-#define WORKLOAD_MAP(profile, workload) \
-       [profile] = {1, (workload)}
-
 #define CTF_OFFSET_EDGE                        5
 #define CTF_OFFSET_HOTSPOT             5
 #define CTF_OFFSET_MEM                 5
@@ -77,17 +62,6 @@ static const struct smu_temperature_range smu11_thermal_policy[] =
        { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
 };
 
-struct smu_11_0_msg_mapping {
-       int     valid_mapping;
-       int     map_to;
-       int     valid_in_vf;
-};
-
-struct smu_11_0_cmn2aisc_mapping {
-       int     valid_mapping;
-       int     map_to;
-};
-
 struct smu_11_0_max_sustainable_clocks {
        uint32_t display_clock;
        uint32_t phy_clock;
@@ -160,6 +134,8 @@ enum smu_v11_0_baco_seq {
        BACO_SEQ_COUNT,
 };
 
+#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
+
 int smu_v11_0_init_microcode(struct smu_context *smu);
 
 void smu_v11_0_fini_microcode(struct smu_context *smu);
@@ -182,8 +158,6 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu);
 
 int smu_v11_0_check_fw_version(struct smu_context *smu);
 
-int smu_v11_0_write_pptable(struct smu_context *smu);
-
 int smu_v11_0_set_driver_table_location(struct smu_context *smu);
 
 int smu_v11_0_set_tool_table_location(struct smu_context *smu);
@@ -193,19 +167,10 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu);
 int smu_v11_0_system_features_control(struct smu_context *smu,
                                             bool en);
 
-int
-smu_v11_0_send_msg_with_param(struct smu_context *smu,
-                             enum smu_message_type msg,
-                             uint32_t param,
-                             uint32_t *read_arg);
-
 int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count);
 
 int smu_v11_0_set_allowed_mask(struct smu_context *smu);
 
-int smu_v11_0_get_enabled_mask(struct smu_context *smu,
-                                     uint32_t *feature_mask, uint32_t num);
-
 int smu_v11_0_notify_display_change(struct smu_context *smu);
 
 int smu_v11_0_get_current_power_limit(struct smu_context *smu,
@@ -300,3 +265,4 @@ int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
                                  uint32_t *max_value);
 
 #endif
+#endif
index fd83a72..02de3b6 100644 (file)
 #define MP1_Public                     0x03b00000
 #define MP1_SRAM                       0x03c00004
 
-
-struct smu_12_0_cmn2aisc_mapping {
-       int     valid_mapping;
-       int     map_to;
-};
-
-int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
-                                             uint16_t msg);
-
-int smu_v12_0_wait_for_response(struct smu_context *smu);
-
-int
-smu_v12_0_send_msg_with_param(struct smu_context *smu,
-                             enum smu_message_type msg,
-                             uint32_t param,
-                             uint32_t *read_arg);
+#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
 
 int smu_v12_0_check_fw_status(struct smu_context *smu);
 
@@ -64,15 +49,10 @@ uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu);
 
 int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable);
 
-int smu_v12_0_init_smc_tables(struct smu_context *smu);
-
 int smu_v12_0_fini_smc_tables(struct smu_context *smu);
 
 int smu_v12_0_set_default_dpm_tables(struct smu_context *smu);
 
-int smu_v12_0_get_enabled_mask(struct smu_context *smu,
-                                     uint32_t *feature_mask, uint32_t num);
-
 int smu_v12_0_mode2_reset(struct smu_context *smu);
 
 int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
@@ -81,3 +61,4 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
 int smu_v12_0_set_driver_table_location(struct smu_context *smu);
 
 #endif
+#endif
index ead135f..6aaf483 100644 (file)
  *
  */
 
+#define SWSMU_CODE_LAYER_L2
+
 #include <linux/firmware.h>
 #include <linux/pci.h>
+#include <linux/i2c.h>
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
-#include "smu_internal.h"
 #include "atomfirmware.h"
 #include "amdgpu_atomfirmware.h"
+#include "amdgpu_atombios.h"
 #include "soc15_common.h"
 #include "smu_v11_0.h"
 #include "smu11_driver_if_navi10.h"
@@ -41,6 +44,7 @@
 #include "thm/thm_11_0_2_sh_mask.h"
 
 #include "asic_reg/mp/mp_11_0_sh_mask.h"
+#include "smu_cmn.h"
 
 /*
  * DO NOT use these for err/warn/info/debug messages.
@@ -52,6 +56,8 @@
 #undef pr_info
 #undef pr_debug
 
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
+
 #define FEATURE_MASK(feature) (1ULL << feature)
 #define SMC_DPM_FEATURE ( \
        FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
        FEATURE_MASK(FEATURE_DPM_LINK_BIT)       | \
        FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
 
-#define MSG_MAP(msg, index, valid_in_vf) \
-       [SMU_MSG_##msg] = {1, (index), (valid_in_vf)}
-
-static struct smu_11_0_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
+static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
        MSG_MAP(TestMessage,                    PPSMC_MSG_TestMessage,                  1),
        MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetSmuVersion,                1),
        MSG_MAP(GetDriverIfVersion,             PPSMC_MSG_GetDriverIfVersion,           1),
@@ -137,7 +140,7 @@ static struct smu_11_0_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
        MSG_MAP(GetVoltageByDpmOverdrive,       PPSMC_MSG_GetVoltageByDpmOverdrive,     0),
 };
 
-static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = {
+static struct cmn2asic_mapping navi10_clk_map[SMU_CLK_COUNT] = {
        CLK_MAP(GFXCLK, PPCLK_GFXCLK),
        CLK_MAP(SCLK,   PPCLK_GFXCLK),
        CLK_MAP(SOCCLK, PPCLK_SOCCLK),
@@ -152,7 +155,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = {
        CLK_MAP(PHYCLK, PPCLK_PHYCLK),
 };
 
-static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map[SMU_FEATURE_COUNT] = {
+static struct cmn2asic_mapping navi10_feature_mask_map[SMU_FEATURE_COUNT] = {
        FEA_MAP(DPM_PREFETCHER),
        FEA_MAP(DPM_GFXCLK),
        FEA_MAP(DPM_GFX_PACE),
@@ -198,7 +201,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map[SMU_FEATURE_COUN
        FEA_MAP(APCC_DFLL),
 };
 
-static struct smu_11_0_cmn2aisc_mapping navi10_table_map[SMU_TABLE_COUNT] = {
+static struct cmn2asic_mapping navi10_table_map[SMU_TABLE_COUNT] = {
        TAB_MAP(PPTABLE),
        TAB_MAP(WATERMARKS),
        TAB_MAP(AVFS),
@@ -213,12 +216,12 @@ static struct smu_11_0_cmn2aisc_mapping navi10_table_map[SMU_TABLE_COUNT] = {
        TAB_MAP(PACE),
 };
 
-static struct smu_11_0_cmn2aisc_mapping navi10_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
+static struct cmn2asic_mapping navi10_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
        PWR_MAP(AC),
        PWR_MAP(DC),
 };
 
-static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
+static struct cmn2asic_mapping navi10_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT,       WORKLOAD_PPLIB_DEFAULT_BIT),
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D,         WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING,          WORKLOAD_PPLIB_POWER_SAVING_BIT),
@@ -228,100 +231,6 @@ static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM,               WORKLOAD_PPLIB_CUSTOM_BIT),
 };
 
-static int navi10_get_smu_msg_index(struct smu_context *smc, uint32_t index)
-{
-       struct smu_11_0_msg_mapping mapping;
-
-       if (index >= SMU_MSG_MAX_COUNT)
-               return -EINVAL;
-
-       mapping = navi10_message_map[index];
-       if (!(mapping.valid_mapping)) {
-               return -EINVAL;
-       }
-
-       if (amdgpu_sriov_vf(smc->adev) && !mapping.valid_in_vf)
-               return -EACCES;
-
-       return mapping.map_to;
-}
-
-static int navi10_get_smu_clk_index(struct smu_context *smc, uint32_t index)
-{
-       struct smu_11_0_cmn2aisc_mapping mapping;
-
-       if (index >= SMU_CLK_COUNT)
-               return -EINVAL;
-
-       mapping = navi10_clk_map[index];
-       if (!(mapping.valid_mapping)) {
-               return -EINVAL;
-       }
-
-       return mapping.map_to;
-}
-
-static int navi10_get_smu_feature_index(struct smu_context *smc, uint32_t index)
-{
-       struct smu_11_0_cmn2aisc_mapping mapping;
-
-       if (index >= SMU_FEATURE_COUNT)
-               return -EINVAL;
-
-       mapping = navi10_feature_mask_map[index];
-       if (!(mapping.valid_mapping)) {
-               return -EINVAL;
-       }
-
-       return mapping.map_to;
-}
-
-static int navi10_get_smu_table_index(struct smu_context *smc, uint32_t index)
-{
-       struct smu_11_0_cmn2aisc_mapping mapping;
-
-       if (index >= SMU_TABLE_COUNT)
-               return -EINVAL;
-
-       mapping = navi10_table_map[index];
-       if (!(mapping.valid_mapping)) {
-               return -EINVAL;
-       }
-
-       return mapping.map_to;
-}
-
-static int navi10_get_pwr_src_index(struct smu_context *smc, uint32_t index)
-{
-       struct smu_11_0_cmn2aisc_mapping mapping;
-
-       if (index >= SMU_POWER_SOURCE_COUNT)
-               return -EINVAL;
-
-       mapping = navi10_pwr_src_map[index];
-       if (!(mapping.valid_mapping)) {
-               return -EINVAL;
-       }
-
-       return mapping.map_to;
-}
-
-
-static int navi10_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile)
-{
-       struct smu_11_0_cmn2aisc_mapping mapping;
-
-       if (profile > PP_SMC_POWER_PROFILE_CUSTOM)
-               return -EINVAL;
-
-       mapping = navi10_workload_map[profile];
-       if (!(mapping.valid_mapping)) {
-               return -EINVAL;
-       }
-
-       return mapping.map_to;
-}
-
 static bool is_asic_secure(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
@@ -467,7 +376,7 @@ static int navi10_append_powerplay_table(struct smu_context *smu)
        index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
                                           smc_dpm_info);
 
-       ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
+       ret = amdgpu_atombios_get_data_table(adev, index, NULL, NULL, NULL,
                                      (uint8_t **)&smc_dpm_table);
        if (ret)
                return ret;
@@ -487,7 +396,7 @@ static int navi10_append_powerplay_table(struct smu_context *smu)
                        sizeof(*smc_dpm_table) - sizeof(smc_dpm_table->table_header));
                break;
        case 7: /* nv12 */
-               ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
+               ret = amdgpu_atombios_get_data_table(adev, index, NULL, NULL, NULL,
                                              (uint8_t **)&smc_dpm_table_v4_7);
                if (ret)
                        return ret;
@@ -543,9 +452,10 @@ static int navi10_setup_pptable(struct smu_context *smu)
        return ret;
 }
 
-static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
+static int navi10_tables_init(struct smu_context *smu)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
+       struct smu_table *tables = smu_table->tables;
 
        SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
@@ -553,6 +463,8 @@ static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
        SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+       SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
+                      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
        SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
        SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
@@ -584,7 +496,7 @@ static int navi10_get_smu_metrics_data(struct smu_context *smu,
        mutex_lock(&smu->metrics_lock);
        if (!smu_table->metrics_time ||
             time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
-               ret = smu_update_table(smu,
+               ret = smu_cmn_update_table(smu,
                                       SMU_TABLE_SMU_METRICS,
                                       0,
                                       smu_table->metrics_table,
@@ -674,9 +586,6 @@ static int navi10_allocate_dpm_context(struct smu_context *smu)
 {
        struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
 
-       if (smu_dpm->dpm_context)
-               return -EINVAL;
-
        smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
                                       GFP_KERNEL);
        if (!smu_dpm->dpm_context)
@@ -687,6 +596,21 @@ static int navi10_allocate_dpm_context(struct smu_context *smu)
        return 0;
 }
 
+static int navi10_init_smc_tables(struct smu_context *smu)
+{
+       int ret = 0;
+
+       ret = navi10_tables_init(smu);
+       if (ret)
+               return ret;
+
+       ret = navi10_allocate_dpm_context(smu);
+       if (ret)
+               return ret;
+
+       return smu_v11_0_init_smc_tables(smu);
+}
+
 static int navi10_set_default_dpm_table(struct smu_context *smu)
 {
        struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
@@ -696,7 +620,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
 
        /* socclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.soc_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_SOCCLK,
                                                     dpm_table);
@@ -714,7 +638,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
 
        /* gfxclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.gfx_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_GFXCLK,
                                                     dpm_table);
@@ -732,7 +656,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
 
        /* uclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.uclk_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_UCLK,
                                                     dpm_table);
@@ -750,7 +674,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
 
        /* vclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.vclk_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_VCLK,
                                                     dpm_table);
@@ -768,7 +692,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
 
        /* dclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.dclk_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_DCLK,
                                                     dpm_table);
@@ -786,7 +710,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
 
        /* dcefclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.dcef_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_DCEFCLK,
                                                     dpm_table);
@@ -804,7 +728,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
 
        /* pixelclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.pixel_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_PIXCLK,
                                                     dpm_table);
@@ -822,7 +746,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
 
        /* displayclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.display_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_DISPCLK,
                                                     dpm_table);
@@ -840,7 +764,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
 
        /* phyclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.phy_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_PHYCLK,
                                                     dpm_table);
@@ -867,15 +791,15 @@ static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
 
        if (enable) {
                /* vcn dpm on is a prerequisite for vcn power gate messages */
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
-                       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL);
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+                       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL);
                        if (ret)
                                return ret;
                }
                power_gate->vcn_gated = false;
        } else {
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
-                       ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+                       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
                        if (ret)
                                return ret;
                }
@@ -892,15 +816,15 @@ static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
        int ret = 0;
 
        if (enable) {
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
-                       ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL);
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+                       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL);
                        if (ret)
                                return ret;
                }
                power_gate->jpeg_gated = false;
        } else {
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
-                       ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+                       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
                        if (ret)
                                return ret;
                }
@@ -917,7 +841,9 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
        MetricsMember_t member_type;
        int clk_id = 0;
 
-       clk_id = smu_clk_get_index(smu, clk_type);
+       clk_id = smu_cmn_to_asic_specific_index(smu,
+                                               CMN2ASIC_MAPPING_CLK,
+                                               clk_type);
        if (clk_id < 0)
                return clk_id;
 
@@ -955,7 +881,9 @@ static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu
        DpmDescriptor_t *dpm_desc = NULL;
        uint32_t clk_index = 0;
 
-       clk_index = smu_clk_get_index(smu, clk_type);
+       clk_index = smu_cmn_to_asic_specific_index(smu,
+                                                  CMN2ASIC_MAPPING_CLK,
+                                                  clk_type);
        dpm_desc = &pptable->DpmDescriptor[clk_index];
 
        /* 0 - Fine grained DPM, 1 - Discrete DPM */
@@ -1336,11 +1264,11 @@ static int navi10_pre_display_config_changed(struct smu_context *smu)
        int ret = 0;
        uint32_t max_freq = 0;
 
-       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
+       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
        if (ret)
                return ret;
 
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
                ret = smu_v11_0_get_dpm_ultimate_freq(smu, SMU_UCLK, NULL, &max_freq);
                if (ret)
                        return ret;
@@ -1357,9 +1285,9 @@ static int navi10_display_config_changed(struct smu_context *smu)
        int ret = 0;
 
        if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
-           smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
-           smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
+           smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
+           smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
                                                  smu->display_config->num_display,
                                                  NULL);
                if (ret)
@@ -1412,7 +1340,7 @@ static bool navi10_is_dpm_running(struct smu_context *smu)
        int ret = 0;
        uint32_t feature_mask[2];
        unsigned long feature_enabled;
-       ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+       ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
        feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
                           ((uint64_t)feature_mask[1] << 32));
        return !!(feature_enabled & SMC_DPM_FEATURE);
@@ -1483,11 +1411,13 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
 
        for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
                /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
-               workload_type = smu_workload_get_type(smu, i);
+               workload_type = smu_cmn_to_asic_specific_index(smu,
+                                                              CMN2ASIC_MAPPING_WORKLOAD,
+                                                              i);
                if (workload_type < 0)
                        return -EINVAL;
 
-               result = smu_update_table(smu,
+               result = smu_cmn_update_table(smu,
                                          SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
                                          (void *)(&activity_monitor), false);
                if (result) {
@@ -1558,7 +1488,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
 
        if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
 
-               ret = smu_update_table(smu,
+               ret = smu_cmn_update_table(smu,
                                       SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
                                       (void *)(&activity_monitor), false);
                if (ret) {
@@ -1602,7 +1532,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
                        break;
                }
 
-               ret = smu_update_table(smu,
+               ret = smu_cmn_update_table(smu,
                                       SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
                                       (void *)(&activity_monitor), true);
                if (ret) {
@@ -1612,10 +1542,12 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
        }
 
        /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
-       workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
+       workload_type = smu_cmn_to_asic_specific_index(smu,
+                                                      CMN2ASIC_MAPPING_WORKLOAD,
+                                                      smu->power_profile_mode);
        if (workload_type < 0)
                return -EINVAL;
-       smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
+       smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
                                    1 << workload_type, NULL);
 
        return ret;
@@ -1631,14 +1563,14 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
        min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
        min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
 
-       if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+       if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
                clock_req.clock_type = amd_pp_dcef_clock;
                clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
 
                ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
                if (!ret) {
-                       if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
-                               ret = smu_send_smc_msg_with_param(smu,
+                       if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
+                               ret = smu_cmn_send_smc_msg_with_param(smu,
                                                                  SMU_MSG_SetMinDeepSleepDcefclk,
                                                                  min_clocks.dcef_clock_in_sr/100,
                                                                  NULL);
@@ -1652,7 +1584,7 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
                }
        }
 
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
                ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_clocks.memory_clock/100, 0);
                if (ret) {
                        dev_err(smu->adev->dev, "[%s] Set hard min uclk failed!", __func__);
@@ -1664,68 +1596,66 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
 }
 
 static int navi10_set_watermarks_table(struct smu_context *smu,
-                                      void *watermarks, struct
-                                      dm_pp_wm_sets_with_clock_ranges_soc15
-                                      *clock_ranges)
+                                      struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
 {
-       int i;
+       Watermarks_t *table = smu->smu_table.watermarks_table;
        int ret = 0;
-       Watermarks_t *table = watermarks;
+       int i;
 
-       if (!table || !clock_ranges)
-               return -EINVAL;
+       if (clock_ranges) {
+               if (clock_ranges->num_wm_dmif_sets > 4 ||
+                   clock_ranges->num_wm_mcif_sets > 4)
+                       return -EINVAL;
 
-       if (clock_ranges->num_wm_dmif_sets > 4 ||
-           clock_ranges->num_wm_mcif_sets > 4)
-               return -EINVAL;
+               for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
+                       table->WatermarkRow[1][i].MinClock =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
+                               1000));
+                       table->WatermarkRow[1][i].MaxClock =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
+                               1000));
+                       table->WatermarkRow[1][i].MinUclk =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+                               1000));
+                       table->WatermarkRow[1][i].MaxUclk =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+                               1000));
+                       table->WatermarkRow[1][i].WmSetting = (uint8_t)
+                                       clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
+               }
 
-       for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
-               table->WatermarkRow[1][i].MinClock =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
-                       1000));
-               table->WatermarkRow[1][i].MaxClock =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
-                       1000));
-               table->WatermarkRow[1][i].MinUclk =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
-                       1000));
-               table->WatermarkRow[1][i].MaxUclk =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
-                       1000));
-               table->WatermarkRow[1][i].WmSetting = (uint8_t)
-                               clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
-       }
+               for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
+                       table->WatermarkRow[0][i].MinClock =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
+                               1000));
+                       table->WatermarkRow[0][i].MaxClock =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
+                               1000));
+                       table->WatermarkRow[0][i].MinUclk =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+                               1000));
+                       table->WatermarkRow[0][i].MaxUclk =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+                               1000));
+                       table->WatermarkRow[0][i].WmSetting = (uint8_t)
+                                       clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+               }
 
-       for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
-               table->WatermarkRow[0][i].MinClock =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
-                       1000));
-               table->WatermarkRow[0][i].MaxClock =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
-                       1000));
-               table->WatermarkRow[0][i].MinUclk =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
-                       1000));
-               table->WatermarkRow[0][i].MaxUclk =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
-                       1000));
-               table->WatermarkRow[0][i].WmSetting = (uint8_t)
-                               clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+               smu->watermarks_bitmap |= WATERMARKS_EXIST;
        }
 
-       smu->watermarks_bitmap |= WATERMARKS_EXIST;
-
        /* pass data to smu controller */
-       if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
-               ret = smu_write_watermarks_table(smu);
+       if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
+            !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+               ret = smu_cmn_write_watermarks_table(smu);
                if (ret) {
                        dev_err(smu->adev->dev, "Failed to update WMTABLE!");
                        return ret;
@@ -1960,7 +1890,7 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
                        ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
                                (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
                                        pptable->PcieLaneCount[i] : pcie_width_cap);
-               ret = smu_send_smc_msg_with_param(smu,
+               ret = smu_cmn_send_smc_msg_with_param(smu,
                                          SMU_MSG_OverridePcieParameters,
                                          smu_pcie_arg,
                                          NULL);
@@ -2012,7 +1942,7 @@ static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
        uint32_t value = 0;
        int ret;
 
-       ret = smu_send_smc_msg_with_param(smu,
+       ret = smu_cmn_send_smc_msg_with_param(smu,
                                          SMU_MSG_GetVoltageByDpm,
                                          param,
                                          &value);
@@ -2046,7 +1976,7 @@ static int navi10_set_default_od_settings(struct smu_context *smu)
                (OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
        int ret = 0;
 
-       ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, false);
+       ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, false);
        if (ret) {
                dev_err(smu->adev->dev, "Failed to get overdrive table!\n");
                return ret;
@@ -2180,18 +2110,11 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
                break;
        case PP_OD_COMMIT_DPM_TABLE:
                navi10_dump_od_table(smu, od_table);
-               ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
+               ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
                if (ret) {
                        dev_err(smu->adev->dev, "Failed to import overdrive table!\n");
                        return ret;
                }
-               // no lock needed because smu_od_edit_dpm_table has it
-               ret = smu_handle_task(smu, smu->smu_dpm.dpm_level,
-                       AMD_PP_TASK_READJUST_POWER_STATE,
-                       false);
-               if (ret) {
-                       return ret;
-               }
                break;
        case PP_OD_EDIT_VDDC_CURVE:
                if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
@@ -2260,7 +2183,7 @@ static int navi10_run_btc(struct smu_context *smu)
 {
        int ret = 0;
 
-       ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc, NULL);
+       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunBtc, NULL);
        if (ret)
                dev_err(smu->adev->dev, "RunBtc failed!\n");
 
@@ -2272,9 +2195,9 @@ static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable)
        int result = 0;
 
        if (!enable)
-               result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE, NULL);
+               result = smu_cmn_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE, NULL);
        else
-               result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
+               result = smu_cmn_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
 
        return result;
 }
@@ -2303,7 +2226,7 @@ static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu)
        if (!navi10_need_umc_cdr_12gbps_workaround(smu->adev))
                return 0;
 
-       ret = smu_get_smc_version(smu, NULL, &smu_version);
+       ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
        if (ret)
                return ret;
 
@@ -2340,19 +2263,245 @@ static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu)
        return navi10_dummy_pstate_control(smu, true);
 }
 
+static void navi10_fill_i2c_req(SwI2cRequest_t  *req, bool write,
+                                 uint8_t address, uint32_t numbytes,
+                                 uint8_t *data)
+{
+       int i;
+
+       BUG_ON(numbytes > MAX_SW_I2C_COMMANDS);
+
+       req->I2CcontrollerPort = 0;
+       req->I2CSpeed = 2;
+       req->SlaveAddress = address;
+       req->NumCmds = numbytes;
+
+       for (i = 0; i < numbytes; i++) {
+               SwI2cCmd_t *cmd =  &req->SwI2cCmds[i];
+
+               /* First 2 bytes are always write for lower 2b EEPROM address */
+               if (i < 2)
+                       cmd->Cmd = 1;
+               else
+                       cmd->Cmd = write;
+
+
+               /* Add RESTART for read  after address filled */
+               cmd->CmdConfig |= (i == 2 && !write) ? CMDCONFIG_RESTART_MASK : 0;
+
+               /* Add STOP in the end */
+               cmd->CmdConfig |= (i == (numbytes - 1)) ? CMDCONFIG_STOP_MASK : 0;
+
+               /* Fill with data regardless if read or write to simplify code */
+               cmd->RegisterAddr = data[i];
+       }
+}
+
+static int navi10_i2c_read_data(struct i2c_adapter *control,
+                                              uint8_t address,
+                                              uint8_t *data,
+                                              uint32_t numbytes)
+{
+       uint32_t  i, ret = 0;
+       SwI2cRequest_t req;
+       struct amdgpu_device *adev = to_amdgpu_device(control);
+       struct smu_table_context *smu_table = &adev->smu.smu_table;
+       struct smu_table *table = &smu_table->driver_table;
+
+       memset(&req, 0, sizeof(req));
+       navi10_fill_i2c_req(&req, false, address, numbytes, data);
+
+       mutex_lock(&adev->smu.mutex);
+       /* Now read data starting with that address */
+       ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
+                                  true);
+       mutex_unlock(&adev->smu.mutex);
+
+       if (!ret) {
+               SwI2cRequest_t *res = (SwI2cRequest_t *)table->cpu_addr;
+
+               /* Assume SMU  fills res.SwI2cCmds[i].Data with read bytes */
+               for (i = 0; i < numbytes; i++)
+                       data[i] = res->SwI2cCmds[i].Data;
+
+               dev_dbg(adev->dev, "navi10_i2c_read_data, address = %x, bytes = %d, data :",
+                                 (uint16_t)address, numbytes);
+
+               print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+                              8, 1, data, numbytes, false);
+       } else
+               dev_err(adev->dev, "navi10_i2c_read_data - error occurred :%x", ret);
+
+       return ret;
+}
+
+static int navi10_i2c_write_data(struct i2c_adapter *control,
+                                               uint8_t address,
+                                               uint8_t *data,
+                                               uint32_t numbytes)
+{
+       uint32_t ret;
+       SwI2cRequest_t req;
+       struct amdgpu_device *adev = to_amdgpu_device(control);
+
+       memset(&req, 0, sizeof(req));
+       navi10_fill_i2c_req(&req, true, address, numbytes, data);
+
+       mutex_lock(&adev->smu.mutex);
+       ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
+       mutex_unlock(&adev->smu.mutex);
+
+       if (!ret) {
+               dev_dbg(adev->dev, "navi10_i2c_write(), address = %x, bytes = %d , data: ",
+                                        (uint16_t)address, numbytes);
+
+               print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+                              8, 1, data, numbytes, false);
+               /*
+                * According to EEPROM spec there is a MAX of 10 ms required for
+                * EEPROM to flush internal RX buffer after STOP was issued at the
+                * end of write transaction. During this time the EEPROM will not be
+                * responsive to any more commands - so wait a bit more.
+                */
+               msleep(10);
+
+       } else
+               dev_err(adev->dev, "navi10_i2c_write- error occurred :%x", ret);
+
+       return ret;
+}
+
+static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
+                             struct i2c_msg *msgs, int num)
+{
+       uint32_t  i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0;
+       uint8_t *data_ptr, data_chunk[MAX_SW_I2C_COMMANDS] = { 0 };
+
+       for (i = 0; i < num; i++) {
+               /*
+                * SMU interface allows at most MAX_SW_I2C_COMMANDS bytes of data at
+                * once and hence the data needs to be spliced into chunks and sent each
+                * chunk separately
+                */
+               data_size = msgs[i].len - 2;
+               data_chunk_size = MAX_SW_I2C_COMMANDS - 2;
+               next_eeprom_addr = (msgs[i].buf[0] << 8 & 0xff00) | (msgs[i].buf[1] & 0xff);
+               data_ptr = msgs[i].buf + 2;
+
+               for (j = 0; j < data_size / data_chunk_size; j++) {
+                       /* Insert the EEPROM dest addess, bits 0-15 */
+                       data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+                       data_chunk[1] = (next_eeprom_addr & 0xff);
+
+                       if (msgs[i].flags & I2C_M_RD) {
+                               ret = navi10_i2c_read_data(i2c_adap,
+                                                            (uint8_t)msgs[i].addr,
+                                                            data_chunk, MAX_SW_I2C_COMMANDS);
+
+                               memcpy(data_ptr, data_chunk + 2, data_chunk_size);
+                       } else {
+
+                               memcpy(data_chunk + 2, data_ptr, data_chunk_size);
+
+                               ret = navi10_i2c_write_data(i2c_adap,
+                                                             (uint8_t)msgs[i].addr,
+                                                             data_chunk, MAX_SW_I2C_COMMANDS);
+                       }
+
+                       if (ret) {
+                               num = -EIO;
+                               goto fail;
+                       }
+
+                       next_eeprom_addr += data_chunk_size;
+                       data_ptr += data_chunk_size;
+               }
+
+               if (data_size % data_chunk_size) {
+                       data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+                       data_chunk[1] = (next_eeprom_addr & 0xff);
+
+                       if (msgs[i].flags & I2C_M_RD) {
+                               ret = navi10_i2c_read_data(i2c_adap,
+                                                            (uint8_t)msgs[i].addr,
+                                                            data_chunk, (data_size % data_chunk_size) + 2);
+
+                               memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size);
+                       } else {
+                               memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size);
+
+                               ret = navi10_i2c_write_data(i2c_adap,
+                                                             (uint8_t)msgs[i].addr,
+                                                             data_chunk, (data_size % data_chunk_size) + 2);
+                       }
+
+                       if (ret) {
+                               num = -EIO;
+                               goto fail;
+                       }
+               }
+       }
+
+fail:
+       return num;
+}
+
+static u32 navi10_i2c_func(struct i2c_adapter *adap)
+{
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+
+static const struct i2c_algorithm navi10_i2c_algo = {
+       .master_xfer = navi10_i2c_xfer,
+       .functionality = navi10_i2c_func,
+};
+
+static bool navi10_i2c_adapter_is_added(struct i2c_adapter *control)
+{
+       struct amdgpu_device *adev = to_amdgpu_device(control);
+
+       return control->dev.parent == &adev->pdev->dev;
+}
+
+static int navi10_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
+{
+       struct amdgpu_device *adev = to_amdgpu_device(control);
+       int res;
+
+       /* smu_i2c_eeprom_init may be called twice in sriov */
+       if (navi10_i2c_adapter_is_added(control))
+               return 0;
+
+       control->owner = THIS_MODULE;
+       control->class = I2C_CLASS_SPD;
+       control->dev.parent = &adev->pdev->dev;
+       control->algo = &navi10_i2c_algo;
+       snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
+
+       res = i2c_add_adapter(control);
+       if (res)
+               DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+
+       return res;
+}
+
+static void navi10_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
+{
+       if (!navi10_i2c_adapter_is_added(control))
+               return;
+
+       i2c_del_adapter(control);
+}
+
+
 static const struct pptable_funcs navi10_ppt_funcs = {
-       .tables_init = navi10_tables_init,
-       .alloc_dpm_context = navi10_allocate_dpm_context,
-       .get_smu_msg_index = navi10_get_smu_msg_index,
-       .get_smu_clk_index = navi10_get_smu_clk_index,
-       .get_smu_feature_index = navi10_get_smu_feature_index,
-       .get_smu_table_index = navi10_get_smu_table_index,
-       .get_smu_power_index = navi10_get_pwr_src_index,
-       .get_workload_type = navi10_get_workload_type,
        .get_allowed_feature_mask = navi10_get_allowed_feature_mask,
        .set_default_dpm_table = navi10_set_default_dpm_table,
        .dpm_set_vcn_enable = navi10_dpm_set_vcn_enable,
        .dpm_set_jpeg_enable = navi10_dpm_set_jpeg_enable,
+       .i2c_init = navi10_i2c_control_init,
+       .i2c_fini = navi10_i2c_control_fini,
        .print_clk_levels = navi10_print_clk_levels,
        .force_clk_levels = navi10_force_clk_levels,
        .populate_umd_state_clk = navi10_populate_umd_state_clk,
@@ -2376,7 +2525,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
        .init_microcode = smu_v11_0_init_microcode,
        .load_microcode = smu_v11_0_load_microcode,
        .fini_microcode = smu_v11_0_fini_microcode,
-       .init_smc_tables = smu_v11_0_init_smc_tables,
+       .init_smc_tables = navi10_init_smc_tables,
        .fini_smc_tables = smu_v11_0_fini_smc_tables,
        .init_power = smu_v11_0_init_power,
        .fini_power = smu_v11_0_fini_power,
@@ -2384,15 +2533,18 @@ static const struct pptable_funcs navi10_ppt_funcs = {
        .setup_pptable = navi10_setup_pptable,
        .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
        .check_fw_version = smu_v11_0_check_fw_version,
-       .write_pptable = smu_v11_0_write_pptable,
+       .write_pptable = smu_cmn_write_pptable,
        .set_driver_table_location = smu_v11_0_set_driver_table_location,
        .set_tool_table_location = smu_v11_0_set_tool_table_location,
        .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
        .system_features_control = smu_v11_0_system_features_control,
-       .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+       .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
+       .send_smc_msg = smu_cmn_send_smc_msg,
        .init_display_count = smu_v11_0_init_display_count,
        .set_allowed_mask = smu_v11_0_set_allowed_mask,
-       .get_enabled_mask = smu_v11_0_get_enabled_mask,
+       .get_enabled_mask = smu_cmn_get_enabled_mask,
+       .feature_is_enabled = smu_cmn_feature_is_enabled,
+       .disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
        .notify_display_change = smu_v11_0_notify_display_change,
        .set_power_limit = smu_v11_0_set_power_limit,
        .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
@@ -2421,9 +2573,17 @@ static const struct pptable_funcs navi10_ppt_funcs = {
        .run_btc = navi10_run_btc,
        .disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround,
        .set_power_source = smu_v11_0_set_power_source,
+       .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
+       .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
 };
 
 void navi10_set_ppt_funcs(struct smu_context *smu)
 {
        smu->ppt_funcs = &navi10_ppt_funcs;
+       smu->message_map = navi10_message_map;
+       smu->clock_map = navi10_clk_map;
+       smu->feature_map = navi10_feature_mask_map;
+       smu->table_map = navi10_table_map;
+       smu->pwr_src_map = navi10_pwr_src_map;
+       smu->workload_map = navi10_workload_map;
 }
index 79cadc2..575ae4b 100644 (file)
  *
  */
 
+#define SWSMU_CODE_LAYER_L2
+
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
-#include "smu_internal.h"
 #include "smu_v12_0_ppsmc.h"
 #include "smu12_driver_if.h"
 #include "smu_v12_0.h"
 #include "renoir_ppt.h"
+#include "smu_cmn.h"
 
 /*
  * DO NOT use these for err/warn/info/debug messages.
 #undef pr_info
 #undef pr_debug
 
-#define CLK_MAP(clk, index) \
-       [SMU_##clk] = {1, (index)}
-
-#define MSG_MAP(msg, index) \
-       [SMU_MSG_##msg] = {1, (index)}
-
-#define TAB_MAP_VALID(tab) \
-       [SMU_TABLE_##tab] = {1, TABLE_##tab}
-
-#define TAB_MAP_INVALID(tab) \
-       [SMU_TABLE_##tab] = {0, TABLE_##tab}
-
-static struct smu_12_0_cmn2aisc_mapping renoir_message_map[SMU_MSG_MAX_COUNT] = {
-       MSG_MAP(TestMessage,                    PPSMC_MSG_TestMessage),
-       MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetSmuVersion),
-       MSG_MAP(GetDriverIfVersion,             PPSMC_MSG_GetDriverIfVersion),
-       MSG_MAP(PowerUpGfx,                     PPSMC_MSG_PowerUpGfx),
-       MSG_MAP(AllowGfxOff,                    PPSMC_MSG_EnableGfxOff),
-       MSG_MAP(DisallowGfxOff,                 PPSMC_MSG_DisableGfxOff),
-       MSG_MAP(PowerDownIspByTile,             PPSMC_MSG_PowerDownIspByTile),
-       MSG_MAP(PowerUpIspByTile,               PPSMC_MSG_PowerUpIspByTile),
-       MSG_MAP(PowerDownVcn,                   PPSMC_MSG_PowerDownVcn),
-       MSG_MAP(PowerUpVcn,                     PPSMC_MSG_PowerUpVcn),
-       MSG_MAP(PowerDownSdma,                  PPSMC_MSG_PowerDownSdma),
-       MSG_MAP(PowerUpSdma,                    PPSMC_MSG_PowerUpSdma),
-       MSG_MAP(SetHardMinIspclkByFreq,         PPSMC_MSG_SetHardMinIspclkByFreq),
-       MSG_MAP(SetHardMinVcn,                  PPSMC_MSG_SetHardMinVcn),
-       MSG_MAP(Spare1,                         PPSMC_MSG_spare1),
-       MSG_MAP(Spare2,                         PPSMC_MSG_spare2),
-       MSG_MAP(SetAllowFclkSwitch,             PPSMC_MSG_SetAllowFclkSwitch),
-       MSG_MAP(SetMinVideoGfxclkFreq,          PPSMC_MSG_SetMinVideoGfxclkFreq),
-       MSG_MAP(ActiveProcessNotify,            PPSMC_MSG_ActiveProcessNotify),
-       MSG_MAP(SetCustomPolicy,                PPSMC_MSG_SetCustomPolicy),
-       MSG_MAP(SetVideoFps,                    PPSMC_MSG_SetVideoFps),
-       MSG_MAP(NumOfDisplays,                  PPSMC_MSG_SetDisplayCount),
-       MSG_MAP(QueryPowerLimit,                PPSMC_MSG_QueryPowerLimit),
-       MSG_MAP(SetDriverDramAddrHigh,          PPSMC_MSG_SetDriverDramAddrHigh),
-       MSG_MAP(SetDriverDramAddrLow,           PPSMC_MSG_SetDriverDramAddrLow),
-       MSG_MAP(TransferTableSmu2Dram,          PPSMC_MSG_TransferTableSmu2Dram),
-       MSG_MAP(TransferTableDram2Smu,          PPSMC_MSG_TransferTableDram2Smu),
-       MSG_MAP(GfxDeviceDriverReset,           PPSMC_MSG_GfxDeviceDriverReset),
-       MSG_MAP(SetGfxclkOverdriveByFreqVid,    PPSMC_MSG_SetGfxclkOverdriveByFreqVid),
-       MSG_MAP(SetHardMinDcfclkByFreq,         PPSMC_MSG_SetHardMinDcfclkByFreq),
-       MSG_MAP(SetHardMinSocclkByFreq,         PPSMC_MSG_SetHardMinSocclkByFreq),
-       MSG_MAP(ControlIgpuATS,                 PPSMC_MSG_ControlIgpuATS),
-       MSG_MAP(SetMinVideoFclkFreq,            PPSMC_MSG_SetMinVideoFclkFreq),
-       MSG_MAP(SetMinDeepSleepDcfclk,          PPSMC_MSG_SetMinDeepSleepDcfclk),
-       MSG_MAP(ForcePowerDownGfx,              PPSMC_MSG_ForcePowerDownGfx),
-       MSG_MAP(SetPhyclkVoltageByFreq,         PPSMC_MSG_SetPhyclkVoltageByFreq),
-       MSG_MAP(SetDppclkVoltageByFreq,         PPSMC_MSG_SetDppclkVoltageByFreq),
-       MSG_MAP(SetSoftMinVcn,                  PPSMC_MSG_SetSoftMinVcn),
-       MSG_MAP(EnablePostCode,                 PPSMC_MSG_EnablePostCode),
-       MSG_MAP(GetGfxclkFrequency,             PPSMC_MSG_GetGfxclkFrequency),
-       MSG_MAP(GetFclkFrequency,               PPSMC_MSG_GetFclkFrequency),
-       MSG_MAP(GetMinGfxclkFrequency,          PPSMC_MSG_GetMinGfxclkFrequency),
-       MSG_MAP(GetMaxGfxclkFrequency,          PPSMC_MSG_GetMaxGfxclkFrequency),
-       MSG_MAP(SoftReset,                      PPSMC_MSG_SoftReset),
-       MSG_MAP(SetGfxCGPG,                     PPSMC_MSG_SetGfxCGPG),
-       MSG_MAP(SetSoftMaxGfxClk,               PPSMC_MSG_SetSoftMaxGfxClk),
-       MSG_MAP(SetHardMinGfxClk,               PPSMC_MSG_SetHardMinGfxClk),
-       MSG_MAP(SetSoftMaxSocclkByFreq,         PPSMC_MSG_SetSoftMaxSocclkByFreq),
-       MSG_MAP(SetSoftMaxFclkByFreq,           PPSMC_MSG_SetSoftMaxFclkByFreq),
-       MSG_MAP(SetSoftMaxVcn,                  PPSMC_MSG_SetSoftMaxVcn),
-       MSG_MAP(PowerGateMmHub,                 PPSMC_MSG_PowerGateMmHub),
-       MSG_MAP(UpdatePmeRestore,               PPSMC_MSG_UpdatePmeRestore),
-       MSG_MAP(GpuChangeState,                 PPSMC_MSG_GpuChangeState),
-       MSG_MAP(SetPowerLimitPercentage,        PPSMC_MSG_SetPowerLimitPercentage),
-       MSG_MAP(ForceGfxContentSave,            PPSMC_MSG_ForceGfxContentSave),
-       MSG_MAP(EnableTmdp48MHzRefclkPwrDown,   PPSMC_MSG_EnableTmdp48MHzRefclkPwrDown),
-       MSG_MAP(PowerDownJpeg,                  PPSMC_MSG_PowerDownJpeg),
-       MSG_MAP(PowerUpJpeg,                    PPSMC_MSG_PowerUpJpeg),
-       MSG_MAP(PowerGateAtHub,                 PPSMC_MSG_PowerGateAtHub),
-       MSG_MAP(SetSoftMinJpeg,                 PPSMC_MSG_SetSoftMinJpeg),
-       MSG_MAP(SetHardMinFclkByFreq,           PPSMC_MSG_SetHardMinFclkByFreq),
+static struct cmn2asic_msg_mapping renoir_message_map[SMU_MSG_MAX_COUNT] = {
+       MSG_MAP(TestMessage,                    PPSMC_MSG_TestMessage,                  1),
+       MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetSmuVersion,                1),
+       MSG_MAP(GetDriverIfVersion,             PPSMC_MSG_GetDriverIfVersion,           1),
+       MSG_MAP(PowerUpGfx,                     PPSMC_MSG_PowerUpGfx,                   1),
+       MSG_MAP(AllowGfxOff,                    PPSMC_MSG_EnableGfxOff,                 1),
+       MSG_MAP(DisallowGfxOff,                 PPSMC_MSG_DisableGfxOff,                1),
+       MSG_MAP(PowerDownIspByTile,             PPSMC_MSG_PowerDownIspByTile,           1),
+       MSG_MAP(PowerUpIspByTile,               PPSMC_MSG_PowerUpIspByTile,             1),
+       MSG_MAP(PowerDownVcn,                   PPSMC_MSG_PowerDownVcn,                 1),
+       MSG_MAP(PowerUpVcn,                     PPSMC_MSG_PowerUpVcn,                   1),
+       MSG_MAP(PowerDownSdma,                  PPSMC_MSG_PowerDownSdma,                1),
+       MSG_MAP(PowerUpSdma,                    PPSMC_MSG_PowerUpSdma,                  1),
+       MSG_MAP(SetHardMinIspclkByFreq,         PPSMC_MSG_SetHardMinIspclkByFreq,       1),
+       MSG_MAP(SetHardMinVcn,                  PPSMC_MSG_SetHardMinVcn,                1),
+       MSG_MAP(Spare1,                         PPSMC_MSG_spare1,                       1),
+       MSG_MAP(Spare2,                         PPSMC_MSG_spare2,                       1),
+       MSG_MAP(SetAllowFclkSwitch,             PPSMC_MSG_SetAllowFclkSwitch,           1),
+       MSG_MAP(SetMinVideoGfxclkFreq,          PPSMC_MSG_SetMinVideoGfxclkFreq,        1),
+       MSG_MAP(ActiveProcessNotify,            PPSMC_MSG_ActiveProcessNotify,          1),
+       MSG_MAP(SetCustomPolicy,                PPSMC_MSG_SetCustomPolicy,              1),
+       MSG_MAP(SetVideoFps,                    PPSMC_MSG_SetVideoFps,                  1),
+       MSG_MAP(NumOfDisplays,                  PPSMC_MSG_SetDisplayCount,              1),
+       MSG_MAP(QueryPowerLimit,                PPSMC_MSG_QueryPowerLimit,              1),
+       MSG_MAP(SetDriverDramAddrHigh,          PPSMC_MSG_SetDriverDramAddrHigh,        1),
+       MSG_MAP(SetDriverDramAddrLow,           PPSMC_MSG_SetDriverDramAddrLow,         1),
+       MSG_MAP(TransferTableSmu2Dram,          PPSMC_MSG_TransferTableSmu2Dram,        1),
+       MSG_MAP(TransferTableDram2Smu,          PPSMC_MSG_TransferTableDram2Smu,        1),
+       MSG_MAP(GfxDeviceDriverReset,           PPSMC_MSG_GfxDeviceDriverReset,         1),
+       MSG_MAP(SetGfxclkOverdriveByFreqVid,    PPSMC_MSG_SetGfxclkOverdriveByFreqVid,  1),
+       MSG_MAP(SetHardMinDcfclkByFreq,         PPSMC_MSG_SetHardMinDcfclkByFreq,       1),
+       MSG_MAP(SetHardMinSocclkByFreq,         PPSMC_MSG_SetHardMinSocclkByFreq,       1),
+       MSG_MAP(ControlIgpuATS,                 PPSMC_MSG_ControlIgpuATS,               1),
+       MSG_MAP(SetMinVideoFclkFreq,            PPSMC_MSG_SetMinVideoFclkFreq,          1),
+       MSG_MAP(SetMinDeepSleepDcfclk,          PPSMC_MSG_SetMinDeepSleepDcfclk,        1),
+       MSG_MAP(ForcePowerDownGfx,              PPSMC_MSG_ForcePowerDownGfx,            1),
+       MSG_MAP(SetPhyclkVoltageByFreq,         PPSMC_MSG_SetPhyclkVoltageByFreq,       1),
+       MSG_MAP(SetDppclkVoltageByFreq,         PPSMC_MSG_SetDppclkVoltageByFreq,       1),
+       MSG_MAP(SetSoftMinVcn,                  PPSMC_MSG_SetSoftMinVcn,                1),
+       MSG_MAP(EnablePostCode,                 PPSMC_MSG_EnablePostCode,               1),
+       MSG_MAP(GetGfxclkFrequency,             PPSMC_MSG_GetGfxclkFrequency,           1),
+       MSG_MAP(GetFclkFrequency,               PPSMC_MSG_GetFclkFrequency,             1),
+       MSG_MAP(GetMinGfxclkFrequency,          PPSMC_MSG_GetMinGfxclkFrequency,        1),
+       MSG_MAP(GetMaxGfxclkFrequency,          PPSMC_MSG_GetMaxGfxclkFrequency,        1),
+       MSG_MAP(SoftReset,                      PPSMC_MSG_SoftReset,                    1),
+       MSG_MAP(SetGfxCGPG,                     PPSMC_MSG_SetGfxCGPG,                   1),
+       MSG_MAP(SetSoftMaxGfxClk,               PPSMC_MSG_SetSoftMaxGfxClk,             1),
+       MSG_MAP(SetHardMinGfxClk,               PPSMC_MSG_SetHardMinGfxClk,             1),
+       MSG_MAP(SetSoftMaxSocclkByFreq,         PPSMC_MSG_SetSoftMaxSocclkByFreq,       1),
+       MSG_MAP(SetSoftMaxFclkByFreq,           PPSMC_MSG_SetSoftMaxFclkByFreq,         1),
+       MSG_MAP(SetSoftMaxVcn,                  PPSMC_MSG_SetSoftMaxVcn,                1),
+       MSG_MAP(PowerGateMmHub,                 PPSMC_MSG_PowerGateMmHub,               1),
+       MSG_MAP(UpdatePmeRestore,               PPSMC_MSG_UpdatePmeRestore,             1),
+       MSG_MAP(GpuChangeState,                 PPSMC_MSG_GpuChangeState,               1),
+       MSG_MAP(SetPowerLimitPercentage,        PPSMC_MSG_SetPowerLimitPercentage,      1),
+       MSG_MAP(ForceGfxContentSave,            PPSMC_MSG_ForceGfxContentSave,          1),
+       MSG_MAP(EnableTmdp48MHzRefclkPwrDown,   PPSMC_MSG_EnableTmdp48MHzRefclkPwrDown, 1),
+       MSG_MAP(PowerDownJpeg,                  PPSMC_MSG_PowerDownJpeg,                1),
+       MSG_MAP(PowerUpJpeg,                    PPSMC_MSG_PowerUpJpeg,                  1),
+       MSG_MAP(PowerGateAtHub,                 PPSMC_MSG_PowerGateAtHub,               1),
+       MSG_MAP(SetSoftMinJpeg,                 PPSMC_MSG_SetSoftMinJpeg,               1),
+       MSG_MAP(SetHardMinFclkByFreq,           PPSMC_MSG_SetHardMinFclkByFreq,         1),
 };
 
-static struct smu_12_0_cmn2aisc_mapping renoir_clk_map[SMU_CLK_COUNT] = {
+static struct cmn2asic_mapping renoir_clk_map[SMU_CLK_COUNT] = {
        CLK_MAP(GFXCLK, CLOCK_GFXCLK),
        CLK_MAP(SCLK,   CLOCK_GFXCLK),
        CLK_MAP(SOCCLK, CLOCK_SOCCLK),
@@ -123,55 +113,20 @@ static struct smu_12_0_cmn2aisc_mapping renoir_clk_map[SMU_CLK_COUNT] = {
        CLK_MAP(MCLK, CLOCK_FCLK),
 };
 
-static struct smu_12_0_cmn2aisc_mapping renoir_table_map[SMU_TABLE_COUNT] = {
+static struct cmn2asic_mapping renoir_table_map[SMU_TABLE_COUNT] = {
        TAB_MAP_VALID(WATERMARKS),
        TAB_MAP_INVALID(CUSTOM_DPM),
        TAB_MAP_VALID(DPMCLOCKS),
        TAB_MAP_VALID(SMU_METRICS),
 };
 
-static int renoir_get_smu_msg_index(struct smu_context *smc, uint32_t index)
-{
-       struct smu_12_0_cmn2aisc_mapping mapping;
-
-       if (index >= SMU_MSG_MAX_COUNT)
-               return -EINVAL;
-
-       mapping = renoir_message_map[index];
-       if (!(mapping.valid_mapping))
-               return -EINVAL;
-
-       return mapping.map_to;
-}
-
-static int renoir_get_smu_clk_index(struct smu_context *smc, uint32_t index)
-{
-       struct smu_12_0_cmn2aisc_mapping mapping;
-
-       if (index >= SMU_CLK_COUNT)
-               return -EINVAL;
-
-       mapping = renoir_clk_map[index];
-       if (!(mapping.valid_mapping)) {
-               return -EINVAL;
-       }
-
-       return mapping.map_to;
-}
-
-static int renoir_get_smu_table_index(struct smu_context *smc, uint32_t index)
-{
-       struct smu_12_0_cmn2aisc_mapping mapping;
-
-       if (index >= SMU_TABLE_COUNT)
-               return -EINVAL;
-
-       mapping = renoir_table_map[index];
-       if (!(mapping.valid_mapping))
-               return -EINVAL;
-
-       return mapping.map_to;
-}
+static struct cmn2asic_mapping renoir_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
+       WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D,         WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
+       WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO,                WORKLOAD_PPLIB_VIDEO_BIT),
+       WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR,                   WORKLOAD_PPLIB_VR_BIT),
+       WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE,              WORKLOAD_PPLIB_COMPUTE_BIT),
+       WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM,               WORKLOAD_PPLIB_CUSTOM_BIT),
+};
 
 static int renoir_get_metrics_table(struct smu_context *smu,
                                    SmuMetrics_t *metrics_table)
@@ -181,7 +136,7 @@ static int renoir_get_metrics_table(struct smu_context *smu,
 
        mutex_lock(&smu->metrics_lock);
        if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
-               ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
+               ret = smu_cmn_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
                                (void *)smu_table->metrics_table, false);
                if (ret) {
                        dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
@@ -197,9 +152,10 @@ static int renoir_get_metrics_table(struct smu_context *smu,
        return ret;
 }
 
-static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables)
+static int renoir_init_smc_tables(struct smu_context *smu)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
+       struct smu_table *tables = smu_table->tables;
 
        SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
                PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
@@ -301,7 +257,7 @@ static int renoir_get_dpm_ultimate_freq(struct smu_context *smu,
        uint32_t mclk_mask, soc_mask;
        uint32_t clock_limit;
 
-       if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
+       if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
                switch (clk_type) {
                case SMU_MCLK:
                case SMU_UCLK:
@@ -340,7 +296,7 @@ static int renoir_get_dpm_ultimate_freq(struct smu_context *smu,
                switch (clk_type) {
                case SMU_GFXCLK:
                case SMU_SCLK:
-                       ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency, max);
+                       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency, max);
                        if (ret) {
                                dev_err(smu->adev->dev, "Attempt to get max GX frequency from SMC Failed !\n");
                                goto failed;
@@ -368,7 +324,7 @@ static int renoir_get_dpm_ultimate_freq(struct smu_context *smu,
                switch (clk_type) {
                case SMU_GFXCLK:
                case SMU_SCLK:
-                       ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency, min);
+                       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency, min);
                        if (ret) {
                                dev_err(smu->adev->dev, "Attempt to get min GX frequency from SMC Failed !\n");
                                goto failed;
@@ -509,15 +465,15 @@ static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
 
        if (enable) {
                /* vcn dpm on is a prerequisite for vcn power gate messages */
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
-                       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+                       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
                        if (ret)
                                return ret;
                }
                power_gate->vcn_gated = false;
        } else {
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
-                       ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+                       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
                        if (ret)
                                return ret;
                }
@@ -534,15 +490,15 @@ static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
        int ret = 0;
 
        if (enable) {
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
-                       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+                       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
                        if (ret)
                                return ret;
                }
                power_gate->jpeg_gated = false;
        } else {
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
-                       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+                       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
                        if (ret)
                                return ret;
                }
@@ -563,7 +519,9 @@ static int renoir_get_current_clk_freq_by_table(struct smu_context *smu,
        if (ret)
                return ret;
 
-       clk_id = smu_clk_get_index(smu, clk_type);
+       clk_id = smu_cmn_to_asic_specific_index(smu,
+                                               CMN2ASIC_MAPPING_CLK,
+                                               clk_type);
        if (clk_id < 0)
                return clk_id;
 
@@ -615,7 +573,7 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) {
        };
 
        for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) {
-               if (!smu_feature_is_enabled(smu, clk_feature_map[i].feature))
+               if (!smu_cmn_feature_is_enabled(smu, clk_feature_map[i].feature))
                    continue;
 
                clk_type = clk_feature_map[i].clk_type;
@@ -676,35 +634,6 @@ static int renoir_get_current_activity_percent(struct smu_context *smu,
        return 0;
 }
 
-static int renoir_get_workload_type(struct smu_context *smu, uint32_t profile)
-{
-
-       uint32_t  pplib_workload = 0;
-
-       switch (profile) {
-       case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
-               pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
-               break;
-       case PP_SMC_POWER_PROFILE_CUSTOM:
-               pplib_workload = WORKLOAD_PPLIB_COUNT;
-               break;
-       case PP_SMC_POWER_PROFILE_VIDEO:
-               pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
-               break;
-       case PP_SMC_POWER_PROFILE_VR:
-               pplib_workload = WORKLOAD_PPLIB_VR_BIT;
-               break;
-       case PP_SMC_POWER_PROFILE_COMPUTE:
-               pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return pplib_workload;
-}
-
-
 /**
  * This interface get dpm clock table for dc
  */
@@ -760,13 +689,13 @@ static int renoir_force_clk_levels(struct smu_context *smu,
                ret = renoir_get_dpm_ultimate_freq(smu, SMU_GFXCLK, &min_freq, &max_freq);
                if (ret)
                        return ret;
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
                                        soft_max_level == 0 ? min_freq :
                                        soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq,
                                        NULL);
                if (ret)
                        return ret;
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
                                        soft_min_level == 2 ? max_freq :
                                        soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq,
                                        NULL);
@@ -780,10 +709,10 @@ static int renoir_force_clk_levels(struct smu_context *smu,
                ret = renoir_get_dpm_clk_limited(smu, clk_type, soft_max_level, &max_freq);
                if (ret)
                        return ret;
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq, NULL);
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq, NULL);
                if (ret)
                        return ret;
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq, NULL);
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq, NULL);
                if (ret)
                        return ret;
                break;
@@ -795,10 +724,10 @@ static int renoir_force_clk_levels(struct smu_context *smu,
                ret = renoir_get_dpm_clk_limited(smu, clk_type, soft_max_level, &max_freq);
                if (ret)
                        return ret;
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq, NULL);
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq, NULL);
                if (ret)
                        return ret;
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq, NULL);
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq, NULL);
                if (ret)
                        return ret;
                break;
@@ -820,7 +749,9 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u
        }
 
        /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
-       workload_type = smu_workload_get_type(smu, profile_mode);
+       workload_type = smu_cmn_to_asic_specific_index(smu,
+                                                      CMN2ASIC_MAPPING_WORKLOAD,
+                                                      profile_mode);
        if (workload_type < 0) {
                /*
                 * TODO: If some case need switch to powersave/default power mode
@@ -830,7 +761,7 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u
                return -EINVAL;
        }
 
-       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
+       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
                                    1 << workload_type,
                                    NULL);
        if (ret) {
@@ -912,60 +843,59 @@ static int renoir_set_performance_level(struct smu_context *smu,
  */
 static int renoir_set_watermarks_table(
                struct smu_context *smu,
-               void *watermarks,
                struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
 {
-       int i;
+       Watermarks_t *table = smu->smu_table.watermarks_table;
        int ret = 0;
-       Watermarks_t *table = watermarks;
+       int i;
 
-       if (!table || !clock_ranges)
-               return -EINVAL;
+       if (clock_ranges) {
+               if (clock_ranges->num_wm_dmif_sets > 4 ||
+                               clock_ranges->num_wm_mcif_sets > 4)
+                       return -EINVAL;
 
-       if (clock_ranges->num_wm_dmif_sets > 4 ||
-                       clock_ranges->num_wm_mcif_sets > 4)
-               return -EINVAL;
+               /* save into smu->smu_table.tables[SMU_TABLE_WATERMARKS]->cpu_addr*/
+               for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
+                       table->WatermarkRow[WM_DCFCLK][i].MinClock =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz));
+                       table->WatermarkRow[WM_DCFCLK][i].MaxClock =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz));
+                       table->WatermarkRow[WM_DCFCLK][i].MinMclk =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz));
+                       table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz));
+                       table->WatermarkRow[WM_DCFCLK][i].WmSetting = (uint8_t)
+                                       clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
+               }
 
-       /* save into smu->smu_table.tables[SMU_TABLE_WATERMARKS]->cpu_addr*/
-       for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
-               table->WatermarkRow[WM_DCFCLK][i].MinClock =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz));
-               table->WatermarkRow[WM_DCFCLK][i].MaxClock =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz));
-               table->WatermarkRow[WM_DCFCLK][i].MinMclk =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz));
-               table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz));
-               table->WatermarkRow[WM_DCFCLK][i].WmSetting = (uint8_t)
-                               clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
-       }
+               for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
+                       table->WatermarkRow[WM_SOCCLK][i].MinClock =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz));
+                       table->WatermarkRow[WM_SOCCLK][i].MaxClock =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz));
+                       table->WatermarkRow[WM_SOCCLK][i].MinMclk =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz));
+                       table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz));
+                       table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
+                                       clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+               }
 
-       for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
-               table->WatermarkRow[WM_SOCCLK][i].MinClock =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz));
-               table->WatermarkRow[WM_SOCCLK][i].MaxClock =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz));
-               table->WatermarkRow[WM_SOCCLK][i].MinMclk =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz));
-               table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz));
-               table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
-                               clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+               smu->watermarks_bitmap |= WATERMARKS_EXIST;
        }
 
-       smu->watermarks_bitmap |= WATERMARKS_EXIST;
-
        /* pass data to smu controller */
-       if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
-               ret = smu_write_watermarks_table(smu);
+       if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
+            !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+               ret = smu_cmn_write_watermarks_table(smu);
                if (ret) {
                        dev_err(smu->adev->dev, "Failed to update WMTABLE!");
                        return ret;
@@ -998,7 +928,9 @@ static int renoir_get_power_profile_mode(struct smu_context *smu,
                 * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
                 * Not all profile modes are supported on arcturus.
                 */
-               workload_type = smu_workload_get_type(smu, i);
+               workload_type = smu_cmn_to_asic_specific_index(smu,
+                                                              CMN2ASIC_MAPPING_WORKLOAD,
+                                                              i);
                if (workload_type < 0)
                        continue;
 
@@ -1064,16 +996,11 @@ static bool renoir_is_dpm_running(struct smu_context *smu)
 }
 
 static const struct pptable_funcs renoir_ppt_funcs = {
-       .get_smu_msg_index = renoir_get_smu_msg_index,
-       .get_smu_clk_index = renoir_get_smu_clk_index,
-       .get_smu_table_index = renoir_get_smu_table_index,
-       .tables_init = renoir_tables_init,
        .set_power_state = NULL,
        .print_clk_levels = renoir_print_clk_levels,
        .get_current_power_state = renoir_get_current_power_state,
        .dpm_set_vcn_enable = renoir_dpm_set_vcn_enable,
        .dpm_set_jpeg_enable = renoir_dpm_set_jpeg_enable,
-       .get_workload_type = renoir_get_workload_type,
        .force_clk_levels = renoir_force_clk_levels,
        .set_power_profile_mode = renoir_set_power_profile_mode,
        .set_performance_level = renoir_set_performance_level,
@@ -1084,23 +1011,33 @@ static const struct pptable_funcs renoir_ppt_funcs = {
        .check_fw_status = smu_v12_0_check_fw_status,
        .check_fw_version = smu_v12_0_check_fw_version,
        .powergate_sdma = smu_v12_0_powergate_sdma,
-       .send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
+       .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
+       .send_smc_msg = smu_cmn_send_smc_msg,
        .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
        .gfx_off_control = smu_v12_0_gfx_off_control,
-       .init_smc_tables = smu_v12_0_init_smc_tables,
+       .get_gfx_off_status = smu_v12_0_get_gfxoff_status,
+       .init_smc_tables = renoir_init_smc_tables,
        .fini_smc_tables = smu_v12_0_fini_smc_tables,
        .set_default_dpm_table = smu_v12_0_set_default_dpm_tables,
-       .get_enabled_mask = smu_v12_0_get_enabled_mask,
+       .get_enabled_mask = smu_cmn_get_enabled_mask,
+       .feature_is_enabled = smu_cmn_feature_is_enabled,
+       .disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
        .get_dpm_ultimate_freq = renoir_get_dpm_ultimate_freq,
        .mode2_reset = smu_v12_0_mode2_reset,
        .set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
        .set_driver_table_location = smu_v12_0_set_driver_table_location,
        .is_dpm_running = renoir_is_dpm_running,
+       .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
+       .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
 };
 
 void renoir_set_ppt_funcs(struct smu_context *smu)
 {
        smu->ppt_funcs = &renoir_ppt_funcs;
+       smu->message_map = renoir_message_map;
+       smu->clock_map = renoir_clk_map;
+       smu->table_map = renoir_table_map;
+       smu->workload_map = renoir_workload_map;
        smu->smc_driver_if_version = SMU12_DRIVER_IF_VERSION;
        smu->is_apu = true;
 }
index 5faef41..59da3ca 100644 (file)
  *
  */
 
+#define SWSMU_CODE_LAYER_L2
+
 #include <linux/firmware.h>
 #include <linux/pci.h>
+#include <linux/i2c.h>
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
-#include "smu_internal.h"
 #include "atomfirmware.h"
 #include "amdgpu_atomfirmware.h"
+#include "amdgpu_atombios.h"
 #include "smu_v11_0.h"
 #include "smu11_driver_if_sienna_cichlid.h"
 #include "soc15_common.h"
@@ -42,6 +45,9 @@
 #include "mp/mp_11_0_offset.h"
 #include "mp/mp_11_0_sh_mask.h"
 
+#include "asic_reg/mp/mp_11_0_sh_mask.h"
+#include "smu_cmn.h"
+
 /*
  * DO NOT use these for err/warn/info/debug messages.
  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
@@ -52,6 +58,8 @@
 #undef pr_info
 #undef pr_debug
 
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
+
 #define FEATURE_MASK(feature) (1ULL << feature)
 #define SMC_DPM_FEATURE ( \
        FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
        FEATURE_MASK(FEATURE_DPM_FCLK_BIT)       | \
        FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
 
-#define MSG_MAP(msg, index) \
-       [SMU_MSG_##msg] = {1, (index)}
-
-static struct smu_11_0_cmn2aisc_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT] = {
-       MSG_MAP(TestMessage,                    PPSMC_MSG_TestMessage),
-       MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetSmuVersion),
-       MSG_MAP(GetDriverIfVersion,             PPSMC_MSG_GetDriverIfVersion),
-       MSG_MAP(SetAllowedFeaturesMaskLow,      PPSMC_MSG_SetAllowedFeaturesMaskLow),
-       MSG_MAP(SetAllowedFeaturesMaskHigh,     PPSMC_MSG_SetAllowedFeaturesMaskHigh),
-       MSG_MAP(EnableAllSmuFeatures,           PPSMC_MSG_EnableAllSmuFeatures),
-       MSG_MAP(DisableAllSmuFeatures,          PPSMC_MSG_DisableAllSmuFeatures),
-       MSG_MAP(EnableSmuFeaturesLow,           PPSMC_MSG_EnableSmuFeaturesLow),
-       MSG_MAP(EnableSmuFeaturesHigh,          PPSMC_MSG_EnableSmuFeaturesHigh),
-       MSG_MAP(DisableSmuFeaturesLow,          PPSMC_MSG_DisableSmuFeaturesLow),
-       MSG_MAP(DisableSmuFeaturesHigh,         PPSMC_MSG_DisableSmuFeaturesHigh),
-       MSG_MAP(GetEnabledSmuFeaturesLow,       PPSMC_MSG_GetRunningSmuFeaturesLow),
-       MSG_MAP(GetEnabledSmuFeaturesHigh,      PPSMC_MSG_GetRunningSmuFeaturesHigh),
-       MSG_MAP(SetWorkloadMask,                PPSMC_MSG_SetWorkloadMask),
-       MSG_MAP(SetPptLimit,                    PPSMC_MSG_SetPptLimit),
-       MSG_MAP(SetDriverDramAddrHigh,          PPSMC_MSG_SetDriverDramAddrHigh),
-       MSG_MAP(SetDriverDramAddrLow,           PPSMC_MSG_SetDriverDramAddrLow),
-       MSG_MAP(SetToolsDramAddrHigh,           PPSMC_MSG_SetToolsDramAddrHigh),
-       MSG_MAP(SetToolsDramAddrLow,            PPSMC_MSG_SetToolsDramAddrLow),
-       MSG_MAP(TransferTableSmu2Dram,          PPSMC_MSG_TransferTableSmu2Dram),
-       MSG_MAP(TransferTableDram2Smu,          PPSMC_MSG_TransferTableDram2Smu),
-       MSG_MAP(UseDefaultPPTable,              PPSMC_MSG_UseDefaultPPTable),
-       MSG_MAP(EnterBaco,                      PPSMC_MSG_EnterBaco),
-       MSG_MAP(SetSoftMinByFreq,               PPSMC_MSG_SetSoftMinByFreq),
-       MSG_MAP(SetSoftMaxByFreq,               PPSMC_MSG_SetSoftMaxByFreq),
-       MSG_MAP(SetHardMinByFreq,               PPSMC_MSG_SetHardMinByFreq),
-       MSG_MAP(SetHardMaxByFreq,               PPSMC_MSG_SetHardMaxByFreq),
-       MSG_MAP(GetMinDpmFreq,                  PPSMC_MSG_GetMinDpmFreq),
-       MSG_MAP(GetMaxDpmFreq,                  PPSMC_MSG_GetMaxDpmFreq),
-       MSG_MAP(GetDpmFreqByIndex,              PPSMC_MSG_GetDpmFreqByIndex),
-       MSG_MAP(SetGeminiMode,                  PPSMC_MSG_SetGeminiMode),
-       MSG_MAP(SetGeminiApertureHigh,          PPSMC_MSG_SetGeminiApertureHigh),
-       MSG_MAP(SetGeminiApertureLow,           PPSMC_MSG_SetGeminiApertureLow),
-       MSG_MAP(OverridePcieParameters,         PPSMC_MSG_OverridePcieParameters),
-       MSG_MAP(ReenableAcDcInterrupt,          PPSMC_MSG_ReenableAcDcInterrupt),
-       MSG_MAP(NotifyPowerSource,              PPSMC_MSG_NotifyPowerSource),
-       MSG_MAP(SetUclkFastSwitch,              PPSMC_MSG_SetUclkFastSwitch),
-       MSG_MAP(SetVideoFps,                    PPSMC_MSG_SetVideoFps),
-       MSG_MAP(PrepareMp1ForUnload,            PPSMC_MSG_PrepareMp1ForUnload),
-       MSG_MAP(AllowGfxOff,                    PPSMC_MSG_AllowGfxOff),
-       MSG_MAP(DisallowGfxOff,                 PPSMC_MSG_DisallowGfxOff),
-       MSG_MAP(GetPptLimit,                    PPSMC_MSG_GetPptLimit),
-       MSG_MAP(GetDcModeMaxDpmFreq,            PPSMC_MSG_GetDcModeMaxDpmFreq),
-       MSG_MAP(ExitBaco,                       PPSMC_MSG_ExitBaco),
-       MSG_MAP(PowerUpVcn,                     PPSMC_MSG_PowerUpVcn),
-       MSG_MAP(PowerDownVcn,                   PPSMC_MSG_PowerDownVcn),
-       MSG_MAP(PowerUpJpeg,                    PPSMC_MSG_PowerUpJpeg),
-       MSG_MAP(PowerDownJpeg,                  PPSMC_MSG_PowerDownJpeg),
-       MSG_MAP(BacoAudioD3PME,                 PPSMC_MSG_BacoAudioD3PME),
-       MSG_MAP(ArmD3,                          PPSMC_MSG_ArmD3),
-       MSG_MAP(Mode1Reset,                     PPSMC_MSG_Mode1Reset),
+static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT] = {
+       MSG_MAP(TestMessage,                    PPSMC_MSG_TestMessage,                 1),
+       MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetSmuVersion,               1),
+       MSG_MAP(GetDriverIfVersion,             PPSMC_MSG_GetDriverIfVersion,          1),
+       MSG_MAP(SetAllowedFeaturesMaskLow,      PPSMC_MSG_SetAllowedFeaturesMaskLow,   1),
+       MSG_MAP(SetAllowedFeaturesMaskHigh,     PPSMC_MSG_SetAllowedFeaturesMaskHigh,  1),
+       MSG_MAP(EnableAllSmuFeatures,           PPSMC_MSG_EnableAllSmuFeatures,        1),
+       MSG_MAP(DisableAllSmuFeatures,          PPSMC_MSG_DisableAllSmuFeatures,       1),
+       MSG_MAP(EnableSmuFeaturesLow,           PPSMC_MSG_EnableSmuFeaturesLow,        1),
+       MSG_MAP(EnableSmuFeaturesHigh,          PPSMC_MSG_EnableSmuFeaturesHigh,       1),
+       MSG_MAP(DisableSmuFeaturesLow,          PPSMC_MSG_DisableSmuFeaturesLow,       1),
+       MSG_MAP(DisableSmuFeaturesHigh,         PPSMC_MSG_DisableSmuFeaturesHigh,      1),
+       MSG_MAP(GetEnabledSmuFeaturesLow,       PPSMC_MSG_GetRunningSmuFeaturesLow,    1),
+       MSG_MAP(GetEnabledSmuFeaturesHigh,      PPSMC_MSG_GetRunningSmuFeaturesHigh,   1),
+       MSG_MAP(SetWorkloadMask,                PPSMC_MSG_SetWorkloadMask,             1),
+       MSG_MAP(SetPptLimit,                    PPSMC_MSG_SetPptLimit,                 1),
+       MSG_MAP(SetDriverDramAddrHigh,          PPSMC_MSG_SetDriverDramAddrHigh,       1),
+       MSG_MAP(SetDriverDramAddrLow,           PPSMC_MSG_SetDriverDramAddrLow,        1),
+       MSG_MAP(SetToolsDramAddrHigh,           PPSMC_MSG_SetToolsDramAddrHigh,        1),
+       MSG_MAP(SetToolsDramAddrLow,            PPSMC_MSG_SetToolsDramAddrLow,         1),
+       MSG_MAP(TransferTableSmu2Dram,          PPSMC_MSG_TransferTableSmu2Dram,       1),
+       MSG_MAP(TransferTableDram2Smu,          PPSMC_MSG_TransferTableDram2Smu,       1),
+       MSG_MAP(UseDefaultPPTable,              PPSMC_MSG_UseDefaultPPTable,           1),
+       MSG_MAP(EnterBaco,                      PPSMC_MSG_EnterBaco,                   1),
+       MSG_MAP(SetSoftMinByFreq,               PPSMC_MSG_SetSoftMinByFreq,            1),
+       MSG_MAP(SetSoftMaxByFreq,               PPSMC_MSG_SetSoftMaxByFreq,            1),
+       MSG_MAP(SetHardMinByFreq,               PPSMC_MSG_SetHardMinByFreq,            1),
+       MSG_MAP(SetHardMaxByFreq,               PPSMC_MSG_SetHardMaxByFreq,            1),
+       MSG_MAP(GetMinDpmFreq,                  PPSMC_MSG_GetMinDpmFreq,               1),
+       MSG_MAP(GetMaxDpmFreq,                  PPSMC_MSG_GetMaxDpmFreq,               1),
+       MSG_MAP(GetDpmFreqByIndex,              PPSMC_MSG_GetDpmFreqByIndex,           1),
+       MSG_MAP(SetGeminiMode,                  PPSMC_MSG_SetGeminiMode,               1),
+       MSG_MAP(SetGeminiApertureHigh,          PPSMC_MSG_SetGeminiApertureHigh,       1),
+       MSG_MAP(SetGeminiApertureLow,           PPSMC_MSG_SetGeminiApertureLow,        1),
+       MSG_MAP(OverridePcieParameters,         PPSMC_MSG_OverridePcieParameters,      1),
+       MSG_MAP(ReenableAcDcInterrupt,          PPSMC_MSG_ReenableAcDcInterrupt,       1),
+       MSG_MAP(NotifyPowerSource,              PPSMC_MSG_NotifyPowerSource,           1),
+       MSG_MAP(SetUclkFastSwitch,              PPSMC_MSG_SetUclkFastSwitch,           1),
+       MSG_MAP(SetVideoFps,                    PPSMC_MSG_SetVideoFps,                 1),
+       MSG_MAP(PrepareMp1ForUnload,            PPSMC_MSG_PrepareMp1ForUnload,         1),
+       MSG_MAP(AllowGfxOff,                    PPSMC_MSG_AllowGfxOff,                 1),
+       MSG_MAP(DisallowGfxOff,                 PPSMC_MSG_DisallowGfxOff,              1),
+       MSG_MAP(GetPptLimit,                    PPSMC_MSG_GetPptLimit,                 1),
+       MSG_MAP(GetDcModeMaxDpmFreq,            PPSMC_MSG_GetDcModeMaxDpmFreq,         1),
+       MSG_MAP(ExitBaco,                       PPSMC_MSG_ExitBaco,                    1),
+       MSG_MAP(PowerUpVcn,                     PPSMC_MSG_PowerUpVcn,                  1),
+       MSG_MAP(PowerDownVcn,                   PPSMC_MSG_PowerDownVcn,                1),
+       MSG_MAP(PowerUpJpeg,                    PPSMC_MSG_PowerUpJpeg,                 1),
+       MSG_MAP(PowerDownJpeg,                  PPSMC_MSG_PowerDownJpeg,               1),
+       MSG_MAP(BacoAudioD3PME,                 PPSMC_MSG_BacoAudioD3PME,              1),
+       MSG_MAP(ArmD3,                          PPSMC_MSG_ArmD3,                       1),
 };
 
-static struct smu_11_0_cmn2aisc_mapping sienna_cichlid_clk_map[SMU_CLK_COUNT] = {
+static struct cmn2asic_mapping sienna_cichlid_clk_map[SMU_CLK_COUNT] = {
        CLK_MAP(GFXCLK,         PPCLK_GFXCLK),
        CLK_MAP(SCLK,           PPCLK_GFXCLK),
        CLK_MAP(SOCCLK,         PPCLK_SOCCLK),
@@ -127,8 +131,8 @@ static struct smu_11_0_cmn2aisc_mapping sienna_cichlid_clk_map[SMU_CLK_COUNT] =
        CLK_MAP(UCLK,           PPCLK_UCLK),
        CLK_MAP(MCLK,           PPCLK_UCLK),
        CLK_MAP(DCLK,           PPCLK_DCLK_0),
-       CLK_MAP(DCLK1,          PPCLK_DCLK_0),
-       CLK_MAP(VCLK,           PPCLK_VCLK_1),
+       CLK_MAP(DCLK1,          PPCLK_DCLK_1),
+       CLK_MAP(VCLK,           PPCLK_VCLK_0),
        CLK_MAP(VCLK1,          PPCLK_VCLK_1),
        CLK_MAP(DCEFCLK,        PPCLK_DCEFCLK),
        CLK_MAP(DISPCLK,        PPCLK_DISPCLK),
@@ -136,7 +140,7 @@ static struct smu_11_0_cmn2aisc_mapping sienna_cichlid_clk_map[SMU_CLK_COUNT] =
        CLK_MAP(PHYCLK,         PPCLK_PHYCLK),
 };
 
-static struct smu_11_0_cmn2aisc_mapping sienna_cichlid_feature_mask_map[SMU_FEATURE_COUNT] = {
+static struct cmn2asic_mapping sienna_cichlid_feature_mask_map[SMU_FEATURE_COUNT] = {
        FEA_MAP(DPM_PREFETCHER),
        FEA_MAP(DPM_GFXCLK),
        FEA_MAP(DPM_GFX_GPO),
@@ -179,7 +183,7 @@ static struct smu_11_0_cmn2aisc_mapping sienna_cichlid_feature_mask_map[SMU_FEAT
        FEA_MAP(APCC_DFLL),
 };
 
-static struct smu_11_0_cmn2aisc_mapping sienna_cichlid_table_map[SMU_TABLE_COUNT] = {
+static struct cmn2asic_mapping sienna_cichlid_table_map[SMU_TABLE_COUNT] = {
        TAB_MAP(PPTABLE),
        TAB_MAP(WATERMARKS),
        TAB_MAP(AVFS_PSM_DEBUG),
@@ -193,12 +197,12 @@ static struct smu_11_0_cmn2aisc_mapping sienna_cichlid_table_map[SMU_TABLE_COUNT
        TAB_MAP(PACE),
 };
 
-static struct smu_11_0_cmn2aisc_mapping sienna_cichlid_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
+static struct cmn2asic_mapping sienna_cichlid_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
        PWR_MAP(AC),
        PWR_MAP(DC),
 };
 
-static struct smu_11_0_cmn2aisc_mapping sienna_cichlid_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
+static struct cmn2asic_mapping sienna_cichlid_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT,       WORKLOAD_PPLIB_DEFAULT_BIT),
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D,         WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING,          WORKLOAD_PPLIB_POWER_SAVING_BIT),
@@ -208,96 +212,6 @@ static struct smu_11_0_cmn2aisc_mapping sienna_cichlid_workload_map[PP_SMC_POWER
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM,               WORKLOAD_PPLIB_CUSTOM_BIT),
 };
 
-static int sienna_cichlid_get_smu_msg_index(struct smu_context *smc, uint32_t index)
-{
-       struct smu_11_0_cmn2aisc_mapping mapping;
-
-       if (index >= SMU_MSG_MAX_COUNT)
-               return -EINVAL;
-
-       mapping = sienna_cichlid_message_map[index];
-       if (!(mapping.valid_mapping)) {
-               return -EINVAL;
-       }
-
-       return mapping.map_to;
-}
-
-static int sienna_cichlid_get_smu_clk_index(struct smu_context *smc, uint32_t index)
-{
-       struct smu_11_0_cmn2aisc_mapping mapping;
-
-       if (index >= SMU_CLK_COUNT)
-               return -EINVAL;
-
-       mapping = sienna_cichlid_clk_map[index];
-       if (!(mapping.valid_mapping)) {
-               return -EINVAL;
-       }
-
-       return mapping.map_to;
-}
-
-static int sienna_cichlid_get_smu_feature_index(struct smu_context *smc, uint32_t index)
-{
-       struct smu_11_0_cmn2aisc_mapping mapping;
-
-       if (index >= SMU_FEATURE_COUNT)
-               return -EINVAL;
-
-       mapping = sienna_cichlid_feature_mask_map[index];
-       if (!(mapping.valid_mapping)) {
-               return -EINVAL;
-       }
-
-       return mapping.map_to;
-}
-
-static int sienna_cichlid_get_smu_table_index(struct smu_context *smc, uint32_t index)
-{
-       struct smu_11_0_cmn2aisc_mapping mapping;
-
-       if (index >= SMU_TABLE_COUNT)
-               return -EINVAL;
-
-       mapping = sienna_cichlid_table_map[index];
-       if (!(mapping.valid_mapping)) {
-               return -EINVAL;
-       }
-
-       return mapping.map_to;
-}
-
-static int sienna_cichlid_get_pwr_src_index(struct smu_context *smc, uint32_t index)
-{
-       struct smu_11_0_cmn2aisc_mapping mapping;
-
-       if (index >= SMU_POWER_SOURCE_COUNT)
-               return -EINVAL;
-
-       mapping = sienna_cichlid_pwr_src_map[index];
-       if (!(mapping.valid_mapping)) {
-               return -EINVAL;
-       }
-
-       return mapping.map_to;
-}
-
-static int sienna_cichlid_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile)
-{
-       struct smu_11_0_cmn2aisc_mapping mapping;
-
-       if (profile > PP_SMC_POWER_PROFILE_CUSTOM)
-               return -EINVAL;
-
-       mapping = sienna_cichlid_workload_map[profile];
-       if (!(mapping.valid_mapping)) {
-               return -EINVAL;
-       }
-
-       return mapping.map_to;
-}
-
 static int
 sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu,
                                  uint32_t *feature_mask, uint32_t num)
@@ -399,7 +313,7 @@ static int sienna_cichlid_append_powerplay_table(struct smu_context *smu)
        index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
                                            smc_dpm_info);
 
-       ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
+       ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL,
                                      (uint8_t **)&smc_dpm_table);
        if (ret)
                return ret;
@@ -445,9 +359,10 @@ static int sienna_cichlid_setup_pptable(struct smu_context *smu)
        return ret;
 }
 
-static int sienna_cichlid_tables_init(struct smu_context *smu, struct smu_table *tables)
+static int sienna_cichlid_tables_init(struct smu_context *smu)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
+       struct smu_table *tables = smu_table->tables;
 
        SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
@@ -455,6 +370,8 @@ static int sienna_cichlid_tables_init(struct smu_context *smu, struct smu_table
                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
        SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+       SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
+                      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
        SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
        SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
@@ -486,7 +403,7 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
        mutex_lock(&smu->metrics_lock);
        if (!smu_table->metrics_time ||
             time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
-               ret = smu_update_table(smu,
+               ret = smu_cmn_update_table(smu,
                                       SMU_TABLE_SMU_METRICS,
                                       0,
                                       smu_table->metrics_table,
@@ -583,9 +500,6 @@ static int sienna_cichlid_allocate_dpm_context(struct smu_context *smu)
 {
        struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
 
-       if (smu_dpm->dpm_context)
-               return -EINVAL;
-
        smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
                                       GFP_KERNEL);
        if (!smu_dpm->dpm_context)
@@ -596,16 +510,32 @@ static int sienna_cichlid_allocate_dpm_context(struct smu_context *smu)
        return 0;
 }
 
+static int sienna_cichlid_init_smc_tables(struct smu_context *smu)
+{
+       int ret = 0;
+
+       ret = sienna_cichlid_tables_init(smu);
+       if (ret)
+               return ret;
+
+       ret = sienna_cichlid_allocate_dpm_context(smu);
+       if (ret)
+               return ret;
+
+       return smu_v11_0_init_smc_tables(smu);
+}
+
 static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
 {
        struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
        PPTable_t *driver_ppt = smu->smu_table.driver_pptable;
        struct smu_11_0_dpm_table *dpm_table;
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
        /* socclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.soc_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_SOCCLK,
                                                     dpm_table);
@@ -623,7 +553,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
 
        /* gfxclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.gfx_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_GFXCLK,
                                                     dpm_table);
@@ -641,7 +571,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
 
        /* uclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.uclk_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_UCLK,
                                                     dpm_table);
@@ -659,7 +589,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
 
        /* fclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.fclk_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_FCLK,
                                                     dpm_table);
@@ -677,7 +607,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
 
        /* vclk0 dpm table setup */
        dpm_table = &dpm_context->dpm_tables.vclk_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_VCLK,
                                                     dpm_table);
@@ -694,26 +624,29 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
        }
 
        /* vclk1 dpm table setup */
-       dpm_table = &dpm_context->dpm_tables.vclk1_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
-               ret = smu_v11_0_set_single_dpm_table(smu,
-                                                    SMU_VCLK1,
-                                                    dpm_table);
-               if (ret)
-                       return ret;
-               dpm_table->is_fine_grained =
-                       !driver_ppt->DpmDescriptor[PPCLK_VCLK_1].SnapToDiscrete;
-       } else {
-               dpm_table->count = 1;
-               dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
-               dpm_table->dpm_levels[0].enabled = true;
-               dpm_table->min = dpm_table->dpm_levels[0].value;
-               dpm_table->max = dpm_table->dpm_levels[0].value;
+       if (adev->vcn.num_vcn_inst > 1) {
+               dpm_table = &dpm_context->dpm_tables.vclk1_table;
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
+                       ret = smu_v11_0_set_single_dpm_table(smu,
+                                                            SMU_VCLK1,
+                                                            dpm_table);
+                       if (ret)
+                               return ret;
+                       dpm_table->is_fine_grained =
+                               !driver_ppt->DpmDescriptor[PPCLK_VCLK_1].SnapToDiscrete;
+               } else {
+                       dpm_table->count = 1;
+                       dpm_table->dpm_levels[0].value =
+                               smu->smu_table.boot_values.vclk / 100;
+                       dpm_table->dpm_levels[0].enabled = true;
+                       dpm_table->min = dpm_table->dpm_levels[0].value;
+                       dpm_table->max = dpm_table->dpm_levels[0].value;
+               }
        }
 
        /* dclk0 dpm table setup */
        dpm_table = &dpm_context->dpm_tables.dclk_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_DCLK,
                                                     dpm_table);
@@ -730,26 +663,29 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
        }
 
        /* dclk1 dpm table setup */
-       dpm_table = &dpm_context->dpm_tables.dclk1_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
-               ret = smu_v11_0_set_single_dpm_table(smu,
-                                                    SMU_DCLK1,
-                                                    dpm_table);
-               if (ret)
-                       return ret;
-               dpm_table->is_fine_grained =
-                       !driver_ppt->DpmDescriptor[PPCLK_DCLK_1].SnapToDiscrete;
-       } else {
-               dpm_table->count = 1;
-               dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
-               dpm_table->dpm_levels[0].enabled = true;
-               dpm_table->min = dpm_table->dpm_levels[0].value;
-               dpm_table->max = dpm_table->dpm_levels[0].value;
+       if (adev->vcn.num_vcn_inst > 1) {
+               dpm_table = &dpm_context->dpm_tables.dclk1_table;
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
+                       ret = smu_v11_0_set_single_dpm_table(smu,
+                                                            SMU_DCLK1,
+                                                            dpm_table);
+                       if (ret)
+                               return ret;
+                       dpm_table->is_fine_grained =
+                               !driver_ppt->DpmDescriptor[PPCLK_DCLK_1].SnapToDiscrete;
+               } else {
+                       dpm_table->count = 1;
+                       dpm_table->dpm_levels[0].value =
+                               smu->smu_table.boot_values.dclk / 100;
+                       dpm_table->dpm_levels[0].enabled = true;
+                       dpm_table->min = dpm_table->dpm_levels[0].value;
+                       dpm_table->max = dpm_table->dpm_levels[0].value;
+               }
        }
 
        /* dcefclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.dcef_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_DCEFCLK,
                                                     dpm_table);
@@ -767,7 +703,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
 
        /* pixelclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.pixel_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_PIXCLK,
                                                     dpm_table);
@@ -785,7 +721,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
 
        /* displayclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.display_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_DISPCLK,
                                                     dpm_table);
@@ -803,7 +739,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
 
        /* phyclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.phy_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_PHYCLK,
                                                     dpm_table);
@@ -832,12 +768,12 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl
 
        if (enable) {
                /* vcn dpm on is a prerequisite for vcn power gate messages */
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
-                       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
+                       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
                        if (ret)
                                return ret;
                        if (adev->asic_type == CHIP_SIENNA_CICHLID) {
-                               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn,
+                               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn,
                                                                  0x10000, NULL);
                                if (ret)
                                        return ret;
@@ -845,12 +781,12 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl
                }
                power_gate->vcn_gated = false;
        } else {
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
-                       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
+                       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
                        if (ret)
                                return ret;
                        if (adev->asic_type == CHIP_SIENNA_CICHLID) {
-                               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn,
+                               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn,
                                                                  0x10000, NULL);
                                if (ret)
                                        return ret;
@@ -869,15 +805,15 @@ static int sienna_cichlid_dpm_set_jpeg_enable(struct smu_context *smu, bool enab
        int ret = 0;
 
        if (enable) {
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
-                       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
+                       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
                        if (ret)
                                return ret;
                }
                power_gate->jpeg_gated = false;
        } else {
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
-                       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
+                       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
                        if (ret)
                                return ret;
                }
@@ -894,7 +830,9 @@ static int sienna_cichlid_get_current_clk_freq_by_table(struct smu_context *smu,
        MetricsMember_t member_type;
        int clk_id = 0;
 
-       clk_id = smu_clk_get_index(smu, clk_type);
+       clk_id = smu_cmn_to_asic_specific_index(smu,
+                                               CMN2ASIC_MAPPING_CLK,
+                                               clk_type);
        if (clk_id < 0)
                return clk_id;
 
@@ -942,7 +880,9 @@ static bool sienna_cichlid_is_support_fine_grained_dpm(struct smu_context *smu,
        DpmDescriptor_t *dpm_desc = NULL;
        uint32_t clk_index = 0;
 
-       clk_index = smu_clk_get_index(smu, clk_type);
+       clk_index = smu_cmn_to_asic_specific_index(smu,
+                                                  CMN2ASIC_MAPPING_CLK,
+                                                  clk_type);
        dpm_desc = &pptable->DpmDescriptor[clk_index];
 
        /* 0 - Fine grained DPM, 1 - Discrete DPM */
@@ -1131,12 +1071,12 @@ static int sienna_cichlid_pre_display_config_changed(struct smu_context *smu)
        /* Sienna_Cichlid do not support to change display num currently */
        return 0;
 #if 0
-       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
+       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
        if (ret)
                return ret;
 #endif
 
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
                ret = smu_v11_0_get_dpm_ultimate_freq(smu, SMU_UCLK, NULL, &max_freq);
                if (ret)
                        return ret;
@@ -1153,10 +1093,10 @@ static int sienna_cichlid_display_config_changed(struct smu_context *smu)
        int ret = 0;
 
        if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
-           smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
-           smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+           smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
+           smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
 #if 0
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
                                                  smu->display_config->num_display,
                                                  NULL);
 #endif
@@ -1210,7 +1150,7 @@ static bool sienna_cichlid_is_dpm_running(struct smu_context *smu)
        int ret = 0;
        uint32_t feature_mask[2];
        unsigned long feature_enabled;
-       ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+       ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
        feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
                           ((uint64_t)feature_mask[1] << 32));
        return !!(feature_enabled & SMC_DPM_FEATURE);
@@ -1281,11 +1221,13 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
 
        for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
                /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
-               workload_type = smu_workload_get_type(smu, i);
+               workload_type = smu_cmn_to_asic_specific_index(smu,
+                                                              CMN2ASIC_MAPPING_WORKLOAD,
+                                                              i);
                if (workload_type < 0)
                        return -EINVAL;
 
-               result = smu_update_table(smu,
+               result = smu_cmn_update_table(smu,
                                          SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
                                          (void *)(&activity_monitor), false);
                if (result) {
@@ -1356,7 +1298,7 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *
 
        if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
 
-               ret = smu_update_table(smu,
+               ret = smu_cmn_update_table(smu,
                                       SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
                                       (void *)(&activity_monitor), false);
                if (ret) {
@@ -1400,7 +1342,7 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *
                        break;
                }
 
-               ret = smu_update_table(smu,
+               ret = smu_cmn_update_table(smu,
                                       SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
                                       (void *)(&activity_monitor), true);
                if (ret) {
@@ -1410,10 +1352,12 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *
        }
 
        /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
-       workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
+       workload_type = smu_cmn_to_asic_specific_index(smu,
+                                                      CMN2ASIC_MAPPING_WORKLOAD,
+                                                      smu->power_profile_mode);
        if (workload_type < 0)
                return -EINVAL;
-       smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
+       smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
                                    1 << workload_type, NULL);
 
        return ret;
@@ -1429,14 +1373,14 @@ static int sienna_cichlid_notify_smc_display_config(struct smu_context *smu)
        min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
        min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
 
-       if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+       if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
                clock_req.clock_type = amd_pp_dcef_clock;
                clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
 
                ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
                if (!ret) {
-                       if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
-                               ret = smu_send_smc_msg_with_param(smu,
+                       if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
+                               ret = smu_cmn_send_smc_msg_with_param(smu,
                                                                  SMU_MSG_SetMinDeepSleepDcefclk,
                                                                  min_clocks.dcef_clock_in_sr/100,
                                                                  NULL);
@@ -1450,7 +1394,7 @@ static int sienna_cichlid_notify_smc_display_config(struct smu_context *smu)
                }
        }
 
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
                ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_clocks.memory_clock/100, 0);
                if (ret) {
                        dev_err(smu->adev->dev, "[%s] Set hard min uclk failed!", __func__);
@@ -1462,67 +1406,66 @@ static int sienna_cichlid_notify_smc_display_config(struct smu_context *smu)
 }
 
 static int sienna_cichlid_set_watermarks_table(struct smu_context *smu,
-                                      void *watermarks, struct
-                                      dm_pp_wm_sets_with_clock_ranges_soc15
-                                      *clock_ranges)
+                                              struct dm_pp_wm_sets_with_clock_ranges_soc15
+                                              *clock_ranges)
 {
-       int i;
+       Watermarks_t *table = smu->smu_table.watermarks_table;
        int ret = 0;
-       Watermarks_t *table = watermarks;
+       int i;
 
-       if (!table || !clock_ranges)
-               return -EINVAL;
+       if (clock_ranges) {
+               if (clock_ranges->num_wm_dmif_sets > 4 ||
+                   clock_ranges->num_wm_mcif_sets > 4)
+                       return -EINVAL;
+
+               for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
+                       table->WatermarkRow[1][i].MinClock =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
+                               1000));
+                       table->WatermarkRow[1][i].MaxClock =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
+                               1000));
+                       table->WatermarkRow[1][i].MinUclk =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+                               1000));
+                       table->WatermarkRow[1][i].MaxUclk =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+                               1000));
+                       table->WatermarkRow[1][i].WmSetting = (uint8_t)
+                                       clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
+               }
+
+               for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
+                       table->WatermarkRow[0][i].MinClock =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
+                               1000));
+                       table->WatermarkRow[0][i].MaxClock =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
+                               1000));
+                       table->WatermarkRow[0][i].MinUclk =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+                               1000));
+                       table->WatermarkRow[0][i].MaxUclk =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+                               1000));
+                       table->WatermarkRow[0][i].WmSetting = (uint8_t)
+                                       clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+               }
 
-       if (clock_ranges->num_wm_dmif_sets > 4 ||
-           clock_ranges->num_wm_mcif_sets > 4)
-                return -EINVAL;
-
-        for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
-               table->WatermarkRow[1][i].MinClock =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
-                       1000));
-               table->WatermarkRow[1][i].MaxClock =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
-                       1000));
-               table->WatermarkRow[1][i].MinUclk =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
-                       1000));
-               table->WatermarkRow[1][i].MaxUclk =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
-                       1000));
-               table->WatermarkRow[1][i].WmSetting = (uint8_t)
-                               clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
-        }
-
-       for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
-               table->WatermarkRow[0][i].MinClock =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
-                       1000));
-               table->WatermarkRow[0][i].MaxClock =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
-                       1000));
-               table->WatermarkRow[0][i].MinUclk =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
-                       1000));
-               table->WatermarkRow[0][i].MaxUclk =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
-                       1000));
-               table->WatermarkRow[0][i].WmSetting = (uint8_t)
-                               clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
-        }
-
-       smu->watermarks_bitmap |= WATERMARKS_EXIST;
-
-       if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
-               ret = smu_write_watermarks_table(smu);
+               smu->watermarks_bitmap |= WATERMARKS_EXIST;
+       }
+
+       if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
+            !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+               ret = smu_cmn_write_watermarks_table(smu);
                if (ret) {
                        dev_err(smu->adev->dev, "Failed to update WMTABLE!");
                        return ret;
@@ -1759,7 +1702,7 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
                                        pptable->PcieLaneCount[i] :
                                        pcie_width_cap);
 
-               ret = smu_send_smc_msg_with_param(smu,
+               ret = smu_cmn_send_smc_msg_with_param(smu,
                                          SMU_MSG_OverridePcieParameters,
                                          smu_pcie_arg,
                                          NULL);
@@ -1814,7 +1757,7 @@ static bool sienna_cichlid_is_mode1_reset_supported(struct smu_context *smu)
         * SRIOV env will not support SMU mode1 reset
         * PM FW support mode1 reset from 58.26
         */
-       smu_get_smc_version(smu, NULL, &smu_version);
+       smu_cmn_get_smc_version(smu, NULL, &smu_version);
        if (amdgpu_sriov_vf(adev) || (smu_version < 0x003a1a00))
                return false;
 
@@ -2487,19 +2430,245 @@ static void sienna_cichlid_dump_pptable(struct smu_context *smu)
        dev_info(smu->adev->dev, "MmHubPadding[7] = 0x%x\n", pptable->MmHubPadding[7]);
 }
 
+static void sienna_cichlid_fill_i2c_req(SwI2cRequest_t  *req, bool write,
+                                 uint8_t address, uint32_t numbytes,
+                                 uint8_t *data)
+{
+       int i;
+
+       BUG_ON(numbytes > MAX_SW_I2C_COMMANDS);
+
+       req->I2CcontrollerPort = 0;
+       req->I2CSpeed = 2;
+       req->SlaveAddress = address;
+       req->NumCmds = numbytes;
+
+       for (i = 0; i < numbytes; i++) {
+               SwI2cCmd_t *cmd =  &req->SwI2cCmds[i];
+
+               /* First 2 bytes are always write for lower 2b EEPROM address */
+               if (i < 2)
+                       cmd->CmdConfig = CMDCONFIG_READWRITE_MASK;
+               else
+                       cmd->CmdConfig = write ? CMDCONFIG_READWRITE_MASK : 0;
+
+
+               /* Add RESTART for read  after address filled */
+               cmd->CmdConfig |= (i == 2 && !write) ? CMDCONFIG_RESTART_MASK : 0;
+
+               /* Add STOP in the end */
+               cmd->CmdConfig |= (i == (numbytes - 1)) ? CMDCONFIG_STOP_MASK : 0;
+
+               /* Fill with data regardless if read or write to simplify code */
+               cmd->ReadWriteData = data[i];
+       }
+}
+
+static int sienna_cichlid_i2c_read_data(struct i2c_adapter *control,
+                                              uint8_t address,
+                                              uint8_t *data,
+                                              uint32_t numbytes)
+{
+       uint32_t  i, ret = 0;
+       SwI2cRequest_t req;
+       struct amdgpu_device *adev = to_amdgpu_device(control);
+       struct smu_table_context *smu_table = &adev->smu.smu_table;
+       struct smu_table *table = &smu_table->driver_table;
+
+       memset(&req, 0, sizeof(req));
+       sienna_cichlid_fill_i2c_req(&req, false, address, numbytes, data);
+
+       mutex_lock(&adev->smu.mutex);
+       /* Now read data starting with that address */
+       ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
+                                       true);
+       mutex_unlock(&adev->smu.mutex);
+
+       if (!ret) {
+               SwI2cRequest_t *res = (SwI2cRequest_t *)table->cpu_addr;
+
+               /* Assume SMU  fills res.SwI2cCmds[i].Data with read bytes */
+               for (i = 0; i < numbytes; i++)
+                       data[i] = res->SwI2cCmds[i].ReadWriteData;
+
+               dev_dbg(adev->dev, "sienna_cichlid_i2c_read_data, address = %x, bytes = %d, data :",
+                                 (uint16_t)address, numbytes);
+
+               print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+                              8, 1, data, numbytes, false);
+       } else
+               dev_err(adev->dev, "sienna_cichlid_i2c_read_data - error occurred :%x", ret);
+
+       return ret;
+}
+
+static int sienna_cichlid_i2c_write_data(struct i2c_adapter *control,
+                                               uint8_t address,
+                                               uint8_t *data,
+                                               uint32_t numbytes)
+{
+       uint32_t ret;
+       SwI2cRequest_t req;
+       struct amdgpu_device *adev = to_amdgpu_device(control);
+
+       memset(&req, 0, sizeof(req));
+       sienna_cichlid_fill_i2c_req(&req, true, address, numbytes, data);
+
+       mutex_lock(&adev->smu.mutex);
+       ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
+       mutex_unlock(&adev->smu.mutex);
+
+       if (!ret) {
+               dev_dbg(adev->dev, "sienna_cichlid_i2c_write(), address = %x, bytes = %d , data: ",
+                                        (uint16_t)address, numbytes);
+
+               print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+                              8, 1, data, numbytes, false);
+               /*
+                * According to EEPROM spec there is a MAX of 10 ms required for
+                * EEPROM to flush internal RX buffer after STOP was issued at the
+                * end of write transaction. During this time the EEPROM will not be
+                * responsive to any more commands - so wait a bit more.
+                */
+               msleep(10);
+
+       } else
+               dev_err(adev->dev, "sienna_cichlid_i2c_write- error occurred :%x", ret);
+
+       return ret;
+}
+
+static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
+                             struct i2c_msg *msgs, int num)
+{
+       uint32_t  i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0;
+       uint8_t *data_ptr, data_chunk[MAX_SW_I2C_COMMANDS] = { 0 };
+
+       for (i = 0; i < num; i++) {
+               /*
+                * SMU interface allows at most MAX_SW_I2C_COMMANDS bytes of data at
+                * once and hence the data needs to be spliced into chunks and sent each
+                * chunk separately
+                */
+               data_size = msgs[i].len - 2;
+               data_chunk_size = MAX_SW_I2C_COMMANDS - 2;
+               next_eeprom_addr = (msgs[i].buf[0] << 8 & 0xff00) | (msgs[i].buf[1] & 0xff);
+               data_ptr = msgs[i].buf + 2;
+
+               for (j = 0; j < data_size / data_chunk_size; j++) {
+                       /* Insert the EEPROM dest addess, bits 0-15 */
+                       data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+                       data_chunk[1] = (next_eeprom_addr & 0xff);
+
+                       if (msgs[i].flags & I2C_M_RD) {
+                               ret = sienna_cichlid_i2c_read_data(i2c_adap,
+                                                            (uint8_t)msgs[i].addr,
+                                                            data_chunk, MAX_SW_I2C_COMMANDS);
+
+                               memcpy(data_ptr, data_chunk + 2, data_chunk_size);
+                       } else {
+
+                               memcpy(data_chunk + 2, data_ptr, data_chunk_size);
+
+                               ret = sienna_cichlid_i2c_write_data(i2c_adap,
+                                                             (uint8_t)msgs[i].addr,
+                                                             data_chunk, MAX_SW_I2C_COMMANDS);
+                       }
+
+                       if (ret) {
+                               num = -EIO;
+                               goto fail;
+                       }
+
+                       next_eeprom_addr += data_chunk_size;
+                       data_ptr += data_chunk_size;
+               }
+
+               if (data_size % data_chunk_size) {
+                       data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+                       data_chunk[1] = (next_eeprom_addr & 0xff);
+
+                       if (msgs[i].flags & I2C_M_RD) {
+                               ret = sienna_cichlid_i2c_read_data(i2c_adap,
+                                                            (uint8_t)msgs[i].addr,
+                                                            data_chunk, (data_size % data_chunk_size) + 2);
+
+                               memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size);
+                       } else {
+                               memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size);
+
+                               ret = sienna_cichlid_i2c_write_data(i2c_adap,
+                                                             (uint8_t)msgs[i].addr,
+                                                             data_chunk, (data_size % data_chunk_size) + 2);
+                       }
+
+                       if (ret) {
+                               num = -EIO;
+                               goto fail;
+                       }
+               }
+       }
+
+fail:
+       return num;
+}
+
+static u32 sienna_cichlid_i2c_func(struct i2c_adapter *adap)
+{
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+
+static const struct i2c_algorithm sienna_cichlid_i2c_algo = {
+       .master_xfer = sienna_cichlid_i2c_xfer,
+       .functionality = sienna_cichlid_i2c_func,
+};
+
+static bool sienna_cichlid_i2c_adapter_is_added(struct i2c_adapter *control)
+{
+       struct amdgpu_device *adev = to_amdgpu_device(control);
+
+       return control->dev.parent == &adev->pdev->dev;
+}
+
+static int sienna_cichlid_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
+{
+       struct amdgpu_device *adev = to_amdgpu_device(control);
+       int res;
+
+       /* smu_i2c_eeprom_init may be called twice in sriov */
+       if (sienna_cichlid_i2c_adapter_is_added(control))
+               return 0;
+
+       control->owner = THIS_MODULE;
+       control->class = I2C_CLASS_SPD;
+       control->dev.parent = &adev->pdev->dev;
+       control->algo = &sienna_cichlid_i2c_algo;
+       snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
+
+       res = i2c_add_adapter(control);
+       if (res)
+               DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+
+       return res;
+}
+
+static void sienna_cichlid_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
+{
+       if (!sienna_cichlid_i2c_adapter_is_added(control))
+               return;
+
+       i2c_del_adapter(control);
+}
+
+
 static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
-       .tables_init = sienna_cichlid_tables_init,
-       .alloc_dpm_context = sienna_cichlid_allocate_dpm_context,
-       .get_smu_msg_index = sienna_cichlid_get_smu_msg_index,
-       .get_smu_clk_index = sienna_cichlid_get_smu_clk_index,
-       .get_smu_feature_index = sienna_cichlid_get_smu_feature_index,
-       .get_smu_table_index = sienna_cichlid_get_smu_table_index,
-       .get_smu_power_index = sienna_cichlid_get_pwr_src_index,
-       .get_workload_type = sienna_cichlid_get_workload_type,
        .get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask,
        .set_default_dpm_table = sienna_cichlid_set_default_dpm_table,
        .dpm_set_vcn_enable = sienna_cichlid_dpm_set_vcn_enable,
        .dpm_set_jpeg_enable = sienna_cichlid_dpm_set_jpeg_enable,
+       .i2c_init = sienna_cichlid_i2c_control_init,
+       .i2c_fini = sienna_cichlid_i2c_control_fini,
        .print_clk_levels = sienna_cichlid_print_clk_levels,
        .force_clk_levels = sienna_cichlid_force_clk_levels,
        .populate_umd_state_clk = sienna_cichlid_populate_umd_state_clk,
@@ -2522,7 +2691,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
        .dump_pptable = sienna_cichlid_dump_pptable,
        .init_microcode = smu_v11_0_init_microcode,
        .load_microcode = smu_v11_0_load_microcode,
-       .init_smc_tables = smu_v11_0_init_smc_tables,
+       .init_smc_tables = sienna_cichlid_init_smc_tables,
        .fini_smc_tables = smu_v11_0_fini_smc_tables,
        .init_power = smu_v11_0_init_power,
        .fini_power = smu_v11_0_fini_power,
@@ -2530,15 +2699,18 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
        .setup_pptable = sienna_cichlid_setup_pptable,
        .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
        .check_fw_version = smu_v11_0_check_fw_version,
-       .write_pptable = smu_v11_0_write_pptable,
+       .write_pptable = smu_cmn_write_pptable,
        .set_driver_table_location = smu_v11_0_set_driver_table_location,
        .set_tool_table_location = smu_v11_0_set_tool_table_location,
        .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
        .system_features_control = smu_v11_0_system_features_control,
-       .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+       .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
+       .send_smc_msg = smu_cmn_send_smc_msg,
        .init_display_count = NULL,
        .set_allowed_mask = smu_v11_0_set_allowed_mask,
-       .get_enabled_mask = smu_v11_0_get_enabled_mask,
+       .get_enabled_mask = smu_cmn_get_enabled_mask,
+       .feature_is_enabled = smu_cmn_feature_is_enabled,
+       .disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
        .notify_display_change = NULL,
        .set_power_limit = smu_v11_0_set_power_limit,
        .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
@@ -2564,9 +2736,17 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
        .mode1_reset = smu_v11_0_mode1_reset,
        .get_dpm_ultimate_freq = sienna_cichlid_get_dpm_ultimate_freq,
        .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+       .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
+       .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
 };
 
 void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
 {
        smu->ppt_funcs = &sienna_cichlid_ppt_funcs;
+       smu->message_map = sienna_cichlid_message_map;
+       smu->clock_map = sienna_cichlid_clk_map;
+       smu->feature_map = sienna_cichlid_feature_mask_map;
+       smu->table_map = sienna_cichlid_table_map;
+       smu->pwr_src_map = sienna_cichlid_pwr_src_map;
+       smu->workload_map = sienna_cichlid_workload_map;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/smu_cmn.c b/drivers/gpu/drm/amd/powerplay/smu_cmn.c
new file mode 100644 (file)
index 0000000..be4b678
--- /dev/null
@@ -0,0 +1,633 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#define SWSMU_CODE_LAYER_L4
+
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "smu_cmn.h"
+#include "soc15_common.h"
+
+/*
+ * DO NOT use these for err/warn/info/debug messages.
+ * Use dev_err, dev_warn, dev_info and dev_dbg instead.
+ * They are more MGPU friendly.
+ */
+#undef pr_err
+#undef pr_warn
+#undef pr_info
+#undef pr_debug
+
+/*
+ * Although these are defined in each ASIC's specific header file.
+ * They share the same definitions and values. That makes common
+ * APIs for SMC messages issuing for all ASICs possible.
+ */
+#define mmMP1_SMN_C2PMSG_66                                                                            0x0282
+#define mmMP1_SMN_C2PMSG_66_BASE_IDX                                                                   0
+
+#define mmMP1_SMN_C2PMSG_82                                                                            0x0292
+#define mmMP1_SMN_C2PMSG_82_BASE_IDX                                                                   0
+
+#define mmMP1_SMN_C2PMSG_90                                                                            0x029a
+#define mmMP1_SMN_C2PMSG_90_BASE_IDX                                                                   0
+
+#define MP1_C2PMSG_90__CONTENT_MASK                                                                    0xFFFFFFFFL
+
+#undef __SMU_DUMMY_MAP
+#define __SMU_DUMMY_MAP(type)  #type
+static const char* __smu_message_names[] = {
+       SMU_MESSAGE_TYPES
+};
+
+static const char *smu_get_message_name(struct smu_context *smu,
+                                       enum smu_message_type type)
+{
+       if (type < 0 || type >= SMU_MSG_MAX_COUNT)
+               return "unknown smu message";
+
+       return __smu_message_names[type];
+}
+
+static void smu_cmn_send_msg_without_waiting(struct smu_context *smu,
+                                            uint16_t msg)
+{
+       struct amdgpu_device *adev = smu->adev;
+
+       WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+}
+
+static void smu_cmn_read_arg(struct smu_context *smu,
+                            uint32_t *arg)
+{
+       struct amdgpu_device *adev = smu->adev;
+
+       *arg = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82);
+}
+
+static int smu_cmn_wait_for_response(struct smu_context *smu)
+{
+       struct amdgpu_device *adev = smu->adev;
+       uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
+
+       for (i = 0; i < timeout; i++) {
+               cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
+               if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
+                       return cur_value == 0x1 ? 0 : -EIO;
+
+               udelay(1);
+       }
+
+       /* timeout means wrong logic */
+       if (i == timeout)
+               return -ETIME;
+
+       return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
+}
+
+int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
+                                   enum smu_message_type msg,
+                                   uint32_t param,
+                                   uint32_t *read_arg)
+{
+       struct amdgpu_device *adev = smu->adev;
+       int ret = 0, index = 0;
+
+       index = smu_cmn_to_asic_specific_index(smu,
+                                              CMN2ASIC_MAPPING_MSG,
+                                              msg);
+       if (index < 0)
+               return index == -EACCES ? 0 : index;
+
+       mutex_lock(&smu->message_lock);
+       ret = smu_cmn_wait_for_response(smu);
+       if (ret) {
+               dev_err(adev->dev, "Msg issuing pre-check failed and "
+                      "SMU may be not in the right state!\n");
+               goto out;
+       }
+
+       WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+
+       WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
+
+       smu_cmn_send_msg_without_waiting(smu, (uint16_t)index);
+
+       ret = smu_cmn_wait_for_response(smu);
+       if (ret) {
+               dev_err(adev->dev, "failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
+                      smu_get_message_name(smu, msg), index, param, ret);
+               goto out;
+       }
+
+       if (read_arg)
+               smu_cmn_read_arg(smu, read_arg);
+
+out:
+       mutex_unlock(&smu->message_lock);
+       return ret;
+}
+
+int smu_cmn_send_smc_msg(struct smu_context *smu,
+                        enum smu_message_type msg,
+                        uint32_t *read_arg)
+{
+       return smu_cmn_send_smc_msg_with_param(smu,
+                                              msg,
+                                              0,
+                                              read_arg);
+}
+
+int smu_cmn_to_asic_specific_index(struct smu_context *smu,
+                                  enum smu_cmn2asic_mapping_type type,
+                                  uint32_t index)
+{
+       struct cmn2asic_msg_mapping msg_mapping;
+       struct cmn2asic_mapping mapping;
+
+       switch (type) {
+       case CMN2ASIC_MAPPING_MSG:
+               if (index > SMU_MSG_MAX_COUNT ||
+                   !smu->message_map)
+                       return -EINVAL;
+
+               msg_mapping = smu->message_map[index];
+               if (!msg_mapping.valid_mapping)
+                       return -EINVAL;
+
+               if (amdgpu_sriov_vf(smu->adev) &&
+                   !msg_mapping.valid_in_vf)
+                       return -EACCES;
+
+               return msg_mapping.map_to;
+
+       case CMN2ASIC_MAPPING_CLK:
+               if (index > SMU_CLK_COUNT ||
+                   !smu->clock_map)
+                       return -EINVAL;
+
+               mapping = smu->clock_map[index];
+               if (!mapping.valid_mapping)
+                       return -EINVAL;
+
+               return mapping.map_to;
+
+       case CMN2ASIC_MAPPING_FEATURE:
+               if (index > SMU_FEATURE_COUNT ||
+                   !smu->feature_map)
+                       return -EINVAL;
+
+               mapping = smu->feature_map[index];
+               if (!mapping.valid_mapping)
+                       return -EINVAL;
+
+               return mapping.map_to;
+
+       case CMN2ASIC_MAPPING_TABLE:
+               if (index > SMU_TABLE_COUNT ||
+                   !smu->table_map)
+                       return -EINVAL;
+
+               mapping = smu->table_map[index];
+               if (!mapping.valid_mapping)
+                       return -EINVAL;
+
+               return mapping.map_to;
+
+       case CMN2ASIC_MAPPING_PWR:
+               if (index > SMU_POWER_SOURCE_COUNT ||
+                   !smu->pwr_src_map)
+                       return -EINVAL;
+
+               mapping = smu->pwr_src_map[index];
+               if (!mapping.valid_mapping)
+                       return -EINVAL;
+
+               return mapping.map_to;
+
+       case CMN2ASIC_MAPPING_WORKLOAD:
+               if (index > PP_SMC_POWER_PROFILE_CUSTOM ||
+                   !smu->workload_map)
+                       return -EINVAL;
+
+               mapping = smu->workload_map[index];
+               if (!mapping.valid_mapping)
+                       return -EINVAL;
+
+               return mapping.map_to;
+
+       default:
+               return -EINVAL;
+       }
+}
+
+int smu_cmn_feature_is_supported(struct smu_context *smu,
+                                enum smu_feature_mask mask)
+{
+       struct smu_feature *feature = &smu->smu_feature;
+       int feature_id;
+       int ret = 0;
+
+       feature_id = smu_cmn_to_asic_specific_index(smu,
+                                                   CMN2ASIC_MAPPING_FEATURE,
+                                                   mask);
+       if (feature_id < 0)
+               return 0;
+
+       WARN_ON(feature_id > feature->feature_num);
+
+       mutex_lock(&feature->mutex);
+       ret = test_bit(feature_id, feature->supported);
+       mutex_unlock(&feature->mutex);
+
+       return ret;
+}
+
+int smu_cmn_feature_is_enabled(struct smu_context *smu,
+                              enum smu_feature_mask mask)
+{
+       struct smu_feature *feature = &smu->smu_feature;
+       int feature_id;
+       int ret = 0;
+
+       if (smu->is_apu)
+               return 1;
+       feature_id = smu_cmn_to_asic_specific_index(smu,
+                                                   CMN2ASIC_MAPPING_FEATURE,
+                                                   mask);
+       if (feature_id < 0)
+               return 0;
+
+       WARN_ON(feature_id > feature->feature_num);
+
+       mutex_lock(&feature->mutex);
+       ret = test_bit(feature_id, feature->enabled);
+       mutex_unlock(&feature->mutex);
+
+       return ret;
+}
+
+bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
+                               enum smu_clk_type clk_type)
+{
+       enum smu_feature_mask feature_id = 0;
+
+       switch (clk_type) {
+       case SMU_MCLK:
+       case SMU_UCLK:
+               feature_id = SMU_FEATURE_DPM_UCLK_BIT;
+               break;
+       case SMU_GFXCLK:
+       case SMU_SCLK:
+               feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
+               break;
+       case SMU_SOCCLK:
+               feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
+               break;
+       default:
+               return true;
+       }
+
+       if (!smu_cmn_feature_is_enabled(smu, feature_id))
+               return false;
+
+       return true;
+}
+
+int smu_cmn_get_enabled_mask(struct smu_context *smu,
+                            uint32_t *feature_mask,
+                            uint32_t num)
+{
+       uint32_t feature_mask_high = 0, feature_mask_low = 0;
+       struct smu_feature *feature = &smu->smu_feature;
+       int ret = 0;
+
+       if (!feature_mask || num < 2)
+               return -EINVAL;
+
+       if (bitmap_empty(feature->enabled, feature->feature_num)) {
+               ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
+               if (ret)
+                       return ret;
+
+               ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
+               if (ret)
+                       return ret;
+
+               feature_mask[0] = feature_mask_low;
+               feature_mask[1] = feature_mask_high;
+       } else {
+               bitmap_copy((unsigned long *)feature_mask, feature->enabled,
+                            feature->feature_num);
+       }
+
+       return ret;
+}
+
+static int smu_cmn_feature_update_enable_state(struct smu_context *smu,
+                                              uint64_t feature_mask,
+                                              bool enabled)
+{
+       struct smu_feature *feature = &smu->smu_feature;
+       int ret = 0;
+
+       if (enabled) {
+               ret = smu_cmn_send_smc_msg_with_param(smu,
+                                                 SMU_MSG_EnableSmuFeaturesLow,
+                                                 lower_32_bits(feature_mask),
+                                                 NULL);
+               if (ret)
+                       return ret;
+               ret = smu_cmn_send_smc_msg_with_param(smu,
+                                                 SMU_MSG_EnableSmuFeaturesHigh,
+                                                 upper_32_bits(feature_mask),
+                                                 NULL);
+               if (ret)
+                       return ret;
+       } else {
+               ret = smu_cmn_send_smc_msg_with_param(smu,
+                                                 SMU_MSG_DisableSmuFeaturesLow,
+                                                 lower_32_bits(feature_mask),
+                                                 NULL);
+               if (ret)
+                       return ret;
+               ret = smu_cmn_send_smc_msg_with_param(smu,
+                                                 SMU_MSG_DisableSmuFeaturesHigh,
+                                                 upper_32_bits(feature_mask),
+                                                 NULL);
+               if (ret)
+                       return ret;
+       }
+
+       mutex_lock(&feature->mutex);
+       if (enabled)
+               bitmap_or(feature->enabled, feature->enabled,
+                               (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
+       else
+               bitmap_andnot(feature->enabled, feature->enabled,
+                               (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
+       mutex_unlock(&feature->mutex);
+
+       return ret;
+}
+
+int smu_cmn_feature_set_enabled(struct smu_context *smu,
+                               enum smu_feature_mask mask,
+                               bool enable)
+{
+       struct smu_feature *feature = &smu->smu_feature;
+       int feature_id;
+
+       feature_id = smu_cmn_to_asic_specific_index(smu,
+                                                   CMN2ASIC_MAPPING_FEATURE,
+                                                   mask);
+       if (feature_id < 0)
+               return -EINVAL;
+
+       WARN_ON(feature_id > feature->feature_num);
+
+       return smu_cmn_feature_update_enable_state(smu,
+                                              1ULL << feature_id,
+                                              enable);
+}
+
+#undef __SMU_DUMMY_MAP
+#define __SMU_DUMMY_MAP(fea)   #fea
+static const char* __smu_feature_names[] = {
+       SMU_FEATURE_MASKS
+};
+
+static const char *smu_get_feature_name(struct smu_context *smu,
+                                       enum smu_feature_mask feature)
+{
+       if (feature < 0 || feature >= SMU_FEATURE_COUNT)
+               return "unknown smu feature";
+       return __smu_feature_names[feature];
+}
+
+size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
+                                  char *buf)
+{
+       uint32_t feature_mask[2] = { 0 };
+       int32_t feature_index = 0;
+       uint32_t count = 0;
+       uint32_t sort_feature[SMU_FEATURE_COUNT];
+       uint64_t hw_feature_count = 0;
+       size_t size = 0;
+       int ret = 0, i;
+
+       ret = smu_cmn_get_enabled_mask(smu,
+                                      feature_mask,
+                                      2);
+       if (ret)
+               return 0;
+
+       size =  sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
+                       feature_mask[1], feature_mask[0]);
+
+       for (i = 0; i < SMU_FEATURE_COUNT; i++) {
+               feature_index = smu_cmn_to_asic_specific_index(smu,
+                                                              CMN2ASIC_MAPPING_FEATURE,
+                                                              i);
+               if (feature_index < 0)
+                       continue;
+               sort_feature[feature_index] = i;
+               hw_feature_count++;
+       }
+
+       for (i = 0; i < hw_feature_count; i++) {
+               size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
+                              count++,
+                              smu_get_feature_name(smu, sort_feature[i]),
+                              i,
+                              !!smu_cmn_feature_is_enabled(smu, sort_feature[i]) ?
+                              "enabled" : "disabled");
+       }
+
+       return size;
+}
+
+int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
+                               uint64_t new_mask)
+{
+       int ret = 0;
+       uint32_t feature_mask[2] = { 0 };
+       uint64_t feature_2_enabled = 0;
+       uint64_t feature_2_disabled = 0;
+       uint64_t feature_enables = 0;
+
+       ret = smu_cmn_get_enabled_mask(smu,
+                                      feature_mask,
+                                      2);
+       if (ret)
+               return ret;
+
+       feature_enables = ((uint64_t)feature_mask[1] << 32 |
+                          (uint64_t)feature_mask[0]);
+
+       feature_2_enabled  = ~feature_enables & new_mask;
+       feature_2_disabled = feature_enables & ~new_mask;
+
+       if (feature_2_enabled) {
+               ret = smu_cmn_feature_update_enable_state(smu,
+                                                         feature_2_enabled,
+                                                         true);
+               if (ret)
+                       return ret;
+       }
+       if (feature_2_disabled) {
+               ret = smu_cmn_feature_update_enable_state(smu,
+                                                         feature_2_disabled,
+                                                         false);
+               if (ret)
+                       return ret;
+       }
+
+       return ret;
+}
+
+int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
+                                               enum smu_feature_mask mask)
+{
+       uint64_t features_to_disable = U64_MAX;
+       int skipped_feature_id;
+
+       skipped_feature_id = smu_cmn_to_asic_specific_index(smu,
+                                                           CMN2ASIC_MAPPING_FEATURE,
+                                                           mask);
+       if (skipped_feature_id < 0)
+               return -EINVAL;
+
+       features_to_disable &= ~(1ULL << skipped_feature_id);
+
+       return smu_cmn_feature_update_enable_state(smu,
+                                                  features_to_disable,
+                                                  0);
+}
+
+int smu_cmn_get_smc_version(struct smu_context *smu,
+                           uint32_t *if_version,
+                           uint32_t *smu_version)
+{
+       int ret = 0;
+
+       if (!if_version && !smu_version)
+               return -EINVAL;
+
+       if (smu->smc_fw_if_version && smu->smc_fw_version)
+       {
+               if (if_version)
+                       *if_version = smu->smc_fw_if_version;
+
+               if (smu_version)
+                       *smu_version = smu->smc_fw_version;
+
+               return 0;
+       }
+
+       if (if_version) {
+               ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
+               if (ret)
+                       return ret;
+
+               smu->smc_fw_if_version = *if_version;
+       }
+
+       if (smu_version) {
+               ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
+               if (ret)
+                       return ret;
+
+               smu->smc_fw_version = *smu_version;
+       }
+
+       return ret;
+}
+
+int smu_cmn_update_table(struct smu_context *smu,
+                        enum smu_table_id table_index,
+                        int argument,
+                        void *table_data,
+                        bool drv2smu)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       struct amdgpu_device *adev = smu->adev;
+       struct smu_table *table = &smu_table->driver_table;
+       int table_id = smu_cmn_to_asic_specific_index(smu,
+                                                     CMN2ASIC_MAPPING_TABLE,
+                                                     table_index);
+       uint32_t table_size;
+       int ret = 0;
+       if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
+               return -EINVAL;
+
+       table_size = smu_table->tables[table_index].size;
+
+       if (drv2smu) {
+               memcpy(table->cpu_addr, table_data, table_size);
+               /*
+                * Flush hdp cache: to guard the content seen by
+                * GPU is consitent with CPU.
+                */
+               amdgpu_asic_flush_hdp(adev, NULL);
+       }
+
+       ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
+                                         SMU_MSG_TransferTableDram2Smu :
+                                         SMU_MSG_TransferTableSmu2Dram,
+                                         table_id | ((argument & 0xFFFF) << 16),
+                                         NULL);
+       if (ret)
+               return ret;
+
+       if (!drv2smu) {
+               amdgpu_asic_flush_hdp(adev, NULL);
+               memcpy(table_data, table->cpu_addr, table_size);
+       }
+
+       return ret;
+}
+
+int smu_cmn_write_watermarks_table(struct smu_context *smu)
+{
+       void *watermarks_table = smu->smu_table.watermarks_table;
+
+       if (!watermarks_table)
+               return -EINVAL;
+
+       return smu_cmn_update_table(smu,
+                                   SMU_TABLE_WATERMARKS,
+                                   0,
+                                   watermarks_table,
+                                   true);
+}
+
+int smu_cmn_write_pptable(struct smu_context *smu)
+{
+       void *pptable = smu->smu_table.driver_pptable;
+
+       return smu_cmn_update_table(smu,
+                                   SMU_TABLE_PPTABLE,
+                                   0,
+                                   pptable,
+                                   true);
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smu_cmn.h b/drivers/gpu/drm/amd/powerplay/smu_cmn.h
new file mode 100644 (file)
index 0000000..98face8
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __SMU_CMN_H__
+#define __SMU_CMN_H__
+
+#include "amdgpu_smu.h"
+
+#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) || defined(SWSMU_CODE_LAYER_L4)
+int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
+                                   enum smu_message_type msg,
+                                   uint32_t param,
+                                   uint32_t *read_arg);
+
+int smu_cmn_send_smc_msg(struct smu_context *smu,
+                        enum smu_message_type msg,
+                        uint32_t *read_arg);
+
+int smu_cmn_to_asic_specific_index(struct smu_context *smu,
+                                  enum smu_cmn2asic_mapping_type type,
+                                  uint32_t index);
+
+int smu_cmn_feature_is_supported(struct smu_context *smu,
+                                enum smu_feature_mask mask);
+
+int smu_cmn_feature_is_enabled(struct smu_context *smu,
+                              enum smu_feature_mask mask);
+
+bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
+                               enum smu_clk_type clk_type);
+
+int smu_cmn_get_enabled_mask(struct smu_context *smu,
+                            uint32_t *feature_mask,
+                            uint32_t num);
+
+int smu_cmn_feature_set_enabled(struct smu_context *smu,
+                               enum smu_feature_mask mask,
+                               bool enable);
+
+size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
+                                  char *buf);
+
+int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
+                               uint64_t new_mask);
+
+int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
+                                               enum smu_feature_mask mask);
+
+int smu_cmn_get_smc_version(struct smu_context *smu,
+                           uint32_t *if_version,
+                           uint32_t *smu_version);
+
+int smu_cmn_update_table(struct smu_context *smu,
+                        enum smu_table_id table_index,
+                        int argument,
+                        void *table_data,
+                        bool drv2smu);
+
+int smu_cmn_write_watermarks_table(struct smu_context *smu);
+
+int smu_cmn_write_pptable(struct smu_context *smu);
+
+#endif
+#endif
index afb3ef8..d0deaef 100644 (file)
@@ -25,6 +25,8 @@
 
 #include "amdgpu_smu.h"
 
+#if defined(SWSMU_CODE_LAYER_L1)
+
 #define smu_ppt_funcs(intf, ret, smu, args...) \
        ((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ? (smu)->ppt_funcs->intf(smu, ##args) : ret) : -EINVAL)
 
 #define smu_set_tool_table_location(smu)                               smu_ppt_funcs(set_tool_table_location, 0, smu)
 #define smu_notify_memory_pool_location(smu)                           smu_ppt_funcs(notify_memory_pool_location, 0, smu)
 #define smu_gfx_off_control(smu, enable)                               smu_ppt_funcs(gfx_off_control, 0, smu, enable)
+#define smu_get_gfx_off_status(smu)                                            smu_ppt_funcs(get_gfx_off_status, 0, smu)
 #define smu_set_last_dcef_min_deep_sleep_clk(smu)                      smu_ppt_funcs(set_last_dcef_min_deep_sleep_clk, 0, smu)
 #define smu_system_features_control(smu, en)                           smu_ppt_funcs(system_features_control, 0, smu, en)
 #define smu_init_max_sustainable_clocks(smu)                           smu_ppt_funcs(init_max_sustainable_clocks, 0, smu)
 #define smu_set_default_od_settings(smu)                               smu_ppt_funcs(set_default_od_settings, 0, smu)
 #define smu_send_smc_msg_with_param(smu, msg, param, read_arg)         smu_ppt_funcs(send_smc_msg_with_param, 0, smu, msg, param, read_arg)
-#define smu_send_smc_msg(smu, msg, read_arg)                           smu_ppt_funcs(send_smc_msg_with_param, 0, smu, msg, 0, read_arg)
-#define smu_alloc_dpm_context(smu)                                     smu_ppt_funcs(alloc_dpm_context, 0, smu)
+#define smu_send_smc_msg(smu, msg, read_arg)                           smu_ppt_funcs(send_smc_msg, 0, smu, msg, read_arg)
 #define smu_init_display_count(smu, count)                             smu_ppt_funcs(init_display_count, 0, smu, count)
 #define smu_feature_set_allowed_mask(smu)                              smu_ppt_funcs(set_allowed_mask, 0, smu)
 #define smu_feature_get_enabled_mask(smu, mask, num)                   smu_ppt_funcs(get_enabled_mask, 0, smu, mask, num)
+#define smu_feature_is_enabled(smu, mask)                              smu_ppt_funcs(feature_is_enabled, 0, smu, mask)
+#define smu_disable_all_features_with_exception(smu, mask)             smu_ppt_funcs(disable_all_features_with_exception, 0, smu, mask)
 #define smu_is_dpm_running(smu)                                                smu_ppt_funcs(is_dpm_running, 0 , smu)
 #define smu_notify_display_change(smu)                                 smu_ppt_funcs(notify_display_change, 0, smu)
 #define smu_set_default_dpm_table(smu)                                 smu_ppt_funcs(set_default_dpm_table, 0, smu)
 #define smu_populate_umd_state_clk(smu)                                        smu_ppt_funcs(populate_umd_state_clk, 0, smu)
 #define smu_set_default_od8_settings(smu)                              smu_ppt_funcs(set_default_od8_settings, 0, smu)
-#define smu_tables_init(smu, tab)                                      smu_ppt_funcs(tables_init, 0, smu, tab)
 #define smu_enable_thermal_alert(smu)                                  smu_ppt_funcs(enable_thermal_alert, 0, smu)
 #define smu_disable_thermal_alert(smu)                                 smu_ppt_funcs(disable_thermal_alert, 0, smu)
 #define smu_smc_read_sensor(smu, sensor, data, size)                   smu_ppt_funcs(read_sensor, -EINVAL, smu, sensor, data, size)
 #define smu_apply_clocks_adjust_rules(smu)                             smu_ppt_funcs(apply_clocks_adjust_rules, 0, smu)
 #define smu_notify_smc_display_config(smu)                             smu_ppt_funcs(notify_smc_display_config, 0, smu)
 #define smu_set_cpu_power_state(smu)                                   smu_ppt_funcs(set_cpu_power_state, 0, smu)
-#define smu_msg_get_index(smu, msg)                                    smu_ppt_funcs(get_smu_msg_index, -EINVAL, smu, msg)
-#define smu_clk_get_index(smu, clk)                                    smu_ppt_funcs(get_smu_clk_index, -EINVAL, smu, clk)
-#define smu_feature_get_index(smu, fea)                                        smu_ppt_funcs(get_smu_feature_index, -EINVAL, smu, fea)
-#define smu_table_get_index(smu, tab)                                  smu_ppt_funcs(get_smu_table_index, -EINVAL, smu, tab)
-#define smu_power_get_index(smu, src)                                  smu_ppt_funcs(get_smu_power_index, -EINVAL, smu, src)
-#define smu_workload_get_type(smu, type)                               smu_ppt_funcs(get_workload_type, -EINVAL, smu, type)
 #define smu_run_btc(smu)                                               smu_ppt_funcs(run_btc, 0, smu)
 #define smu_get_allowed_feature_mask(smu, feature_mask, num)           smu_ppt_funcs(get_allowed_feature_mask, 0, smu, feature_mask, num)
 #define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis)      smu_ppt_funcs(store_cc6_data, 0, smu, st, cc6_dis, pst_dis, pst_sw_dis)
@@ -82,7 +79,7 @@
 #define smu_get_current_shallow_sleep_clocks(smu, clocks)              smu_ppt_funcs(get_current_shallow_sleep_clocks, 0, smu, clocks)
 #define smu_dpm_set_vcn_enable(smu, enable)                            smu_ppt_funcs(dpm_set_vcn_enable, 0, smu, enable)
 #define smu_dpm_set_jpeg_enable(smu, enable)                           smu_ppt_funcs(dpm_set_jpeg_enable, 0, smu, enable)
-#define smu_set_watermarks_table(smu, tab, clock_ranges)               smu_ppt_funcs(set_watermarks_table, 0, smu, tab, clock_ranges)
+#define smu_set_watermarks_table(smu, clock_ranges)                    smu_ppt_funcs(set_watermarks_table, 0, smu, clock_ranges)
 #define smu_thermal_temperature_range_update(smu, range, rw)           smu_ppt_funcs(thermal_temperature_range_update, 0, smu, range, rw)
 #define smu_register_irq_handler(smu)                                  smu_ppt_funcs(register_irq_handler, 0, smu)
 #define smu_get_dpm_ultimate_freq(smu, param, min, max)                        smu_ppt_funcs(get_dpm_ultimate_freq, 0, smu, param, min, max)
 #define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap)  smu_ppt_funcs(update_pcie_parameters, 0, smu, pcie_gen_cap, pcie_width_cap)
 #define smu_disable_umc_cdr_12gbps_workaround(smu)                     smu_ppt_funcs(disable_umc_cdr_12gbps_workaround, 0, smu)
 #define smu_set_power_source(smu, power_src)                           smu_ppt_funcs(set_power_source, 0, smu, power_src)
-#define smu_i2c_eeprom_init(smu, control)                              smu_ppt_funcs(i2c_eeprom_init, 0, smu, control)
-#define smu_i2c_eeprom_fini(smu, control)                              smu_ppt_funcs(i2c_eeprom_fini, 0, smu, control)
+#define smu_i2c_init(smu, control)                                     smu_ppt_funcs(i2c_init, 0, smu, control)
+#define smu_i2c_fini(smu, control)                                     smu_ppt_funcs(i2c_fini, 0, smu, control)
 #define smu_get_unique_id(smu)                                         smu_ppt_funcs(get_unique_id, 0, smu)
 #define smu_log_thermal_throttling(smu)                                        smu_ppt_funcs(log_thermal_throttling_event, 0, smu)
 #define smu_get_asic_power_limits(smu)                                 smu_ppt_funcs(get_power_limit, 0, smu)
+#define smu_get_pp_feature_mask(smu, buf)                              smu_ppt_funcs(get_pp_feature_mask, 0, smu, buf)
+#define smu_set_pp_feature_mask(smu, new_mask)                         smu_ppt_funcs(set_pp_feature_mask, 0, smu, new_mask)
 
 #endif
+#endif
index a733655..fd82402 100644 (file)
 #include <linux/reboot.h>
 
 #define SMU_11_0_PARTIAL_PPTABLE
+#define SWSMU_CODE_LAYER_L3
 
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
-#include "smu_internal.h"
 #include "atomfirmware.h"
 #include "amdgpu_atomfirmware.h"
+#include "amdgpu_atombios.h"
 #include "smu_v11_0.h"
 #include "soc15_common.h"
 #include "atom.h"
 #include "amdgpu_ras.h"
+#include "smu_cmn.h"
 
 #include "asic_reg/thm/thm_11_0_2_offset.h"
 #include "asic_reg/thm/thm_11_0_2_sh_mask.h"
@@ -65,89 +67,6 @@ MODULE_FIRMWARE("amdgpu/navy_flounder_smc.bin");
 
 #define SMU11_MODE1_RESET_WAIT_TIME_IN_MS 500  //500ms
 
-static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
-                                             uint16_t msg)
-{
-       struct amdgpu_device *adev = smu->adev;
-       WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
-       return 0;
-}
-
-static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
-{
-       struct amdgpu_device *adev = smu->adev;
-
-       *arg = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82);
-       return 0;
-}
-
-static int smu_v11_0_wait_for_response(struct smu_context *smu)
-{
-       struct amdgpu_device *adev = smu->adev;
-       uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
-
-       for (i = 0; i < timeout; i++) {
-               cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
-               if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
-                       return cur_value == 0x1 ? 0 : -EIO;
-
-               udelay(1);
-       }
-
-       /* timeout means wrong logic */
-       if (i == timeout)
-               return -ETIME;
-
-       return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
-}
-
-int
-smu_v11_0_send_msg_with_param(struct smu_context *smu,
-                             enum smu_message_type msg,
-                             uint32_t param,
-                             uint32_t *read_arg)
-{
-       struct amdgpu_device *adev = smu->adev;
-       int ret = 0, index = 0;
-
-       index = smu_msg_get_index(smu, msg);
-       if (index < 0)
-               return index == -EACCES ? 0 : index;
-
-       mutex_lock(&smu->message_lock);
-       ret = smu_v11_0_wait_for_response(smu);
-       if (ret) {
-               dev_err(adev->dev, "Msg issuing pre-check failed and "
-                      "SMU may be not in the right state!\n");
-               goto out;
-       }
-
-       WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
-       WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
-
-       smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
-
-       ret = smu_v11_0_wait_for_response(smu);
-       if (ret) {
-               dev_err(adev->dev, "failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
-                      smu_get_message_name(smu, msg), index, param, ret);
-               goto out;
-       }
-
-       if (read_arg) {
-               ret = smu_v11_0_read_arg(smu, read_arg);
-               if (ret) {
-                       dev_err(adev->dev, "failed to read message arg: %10s (%d) \tparam: 0x%08x response %#x\n",
-                              smu_get_message_name(smu, msg), index, param, ret);
-                       goto out;
-               }
-       }
-out:
-       mutex_unlock(&smu->message_lock);
-       return ret;
-}
-
 int smu_v11_0_init_microcode(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
@@ -285,7 +204,7 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
        uint8_t smu_minor, smu_debug;
        int ret = 0;
 
-       ret = smu_get_smc_version(smu, &if_version, &smu_version);
+       ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
        if (ret)
                return ret;
 
@@ -416,7 +335,7 @@ int smu_v11_0_setup_pptable(struct smu_context *smu)
                index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
                                                    powerplayinfo);
 
-               ret = smu_get_atom_data_table(smu, index, &atom_table_size, &frev, &crev,
+               ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
                                              (uint8_t **)&table);
                if (ret)
                        return ret;
@@ -431,70 +350,24 @@ int smu_v11_0_setup_pptable(struct smu_context *smu)
        return 0;
 }
 
-static int smu_v11_0_init_dpm_context(struct smu_context *smu)
-{
-       struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
-
-       if (smu_dpm->dpm_context || smu_dpm->dpm_context_size != 0)
-               return -EINVAL;
-
-       return smu_alloc_dpm_context(smu);
-}
-
-static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
-{
-       struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
-
-       if (!smu_dpm->dpm_context || smu_dpm->dpm_context_size == 0)
-               return -EINVAL;
-
-       kfree(smu_dpm->dpm_context);
-       kfree(smu_dpm->golden_dpm_context);
-       kfree(smu_dpm->dpm_current_power_state);
-       kfree(smu_dpm->dpm_request_power_state);
-       smu_dpm->dpm_context = NULL;
-       smu_dpm->golden_dpm_context = NULL;
-       smu_dpm->dpm_context_size = 0;
-       smu_dpm->dpm_current_power_state = NULL;
-       smu_dpm->dpm_request_power_state = NULL;
-
-       return 0;
-}
-
 int smu_v11_0_init_smc_tables(struct smu_context *smu)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
-       struct smu_table *tables = NULL;
+       struct smu_table *tables = smu_table->tables;
        int ret = 0;
 
-       tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
-                        GFP_KERNEL);
-       if (!tables) {
-               ret = -ENOMEM;
-               goto err0_out;
-       }
-       smu_table->tables = tables;
-
-       ret = smu_tables_init(smu, tables);
-       if (ret)
-               goto err1_out;
-
-       ret = smu_v11_0_init_dpm_context(smu);
-       if (ret)
-               goto err1_out;
-
        smu_table->driver_pptable =
                kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL);
        if (!smu_table->driver_pptable) {
                ret = -ENOMEM;
-               goto err2_out;
+               goto err0_out;
        }
 
        smu_table->max_sustainable_clocks =
                kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks), GFP_KERNEL);
        if (!smu_table->max_sustainable_clocks) {
                ret = -ENOMEM;
-               goto err3_out;
+               goto err1_out;
        }
 
        /* Arcturus does not support OVERDRIVE */
@@ -503,29 +376,25 @@ int smu_v11_0_init_smc_tables(struct smu_context *smu)
                        kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
                if (!smu_table->overdrive_table) {
                        ret = -ENOMEM;
-                       goto err4_out;
+                       goto err2_out;
                }
 
                smu_table->boot_overdrive_table =
                        kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
                if (!smu_table->boot_overdrive_table) {
                        ret = -ENOMEM;
-                       goto err5_out;
+                       goto err3_out;
                }
        }
 
        return 0;
 
-err5_out:
-       kfree(smu_table->overdrive_table);
-err4_out:
-       kfree(smu_table->max_sustainable_clocks);
 err3_out:
-       kfree(smu_table->driver_pptable);
+       kfree(smu_table->overdrive_table);
 err2_out:
-       smu_v11_0_fini_dpm_context(smu);
+       kfree(smu_table->max_sustainable_clocks);
 err1_out:
-       kfree(tables);
+       kfree(smu_table->driver_pptable);
 err0_out:
        return ret;
 }
@@ -533,10 +402,7 @@ err0_out:
 int smu_v11_0_fini_smc_tables(struct smu_context *smu)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
-       int ret = 0;
-
-       if (!smu_table->tables)
-               return -EINVAL;
+       struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
 
        kfree(smu_table->boot_overdrive_table);
        kfree(smu_table->overdrive_table);
@@ -549,17 +415,22 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu)
        kfree(smu_table->hardcode_pptable);
        smu_table->hardcode_pptable = NULL;
 
-       kfree(smu_table->tables);
        kfree(smu_table->metrics_table);
        kfree(smu_table->watermarks_table);
-       smu_table->tables = NULL;
        smu_table->metrics_table = NULL;
        smu_table->watermarks_table = NULL;
        smu_table->metrics_time = 0;
 
-       ret = smu_v11_0_fini_dpm_context(smu);
-       if (ret)
-               return ret;
+       kfree(smu_dpm->dpm_context);
+       kfree(smu_dpm->golden_dpm_context);
+       kfree(smu_dpm->dpm_current_power_state);
+       kfree(smu_dpm->dpm_request_power_state);
+       smu_dpm->dpm_context = NULL;
+       smu_dpm->golden_dpm_context = NULL;
+       smu_dpm->dpm_context_size = 0;
+       smu_dpm->dpm_current_power_state = NULL;
+       smu_dpm->dpm_request_power_state = NULL;
+
        return 0;
 }
 
@@ -631,7 +502,7 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
        index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
                                            firmwareinfo);
 
-       ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
+       ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
                                      (uint8_t **)&header);
        if (ret)
                return ret;
@@ -727,13 +598,13 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
        address_high = (uint32_t)upper_32_bits(address);
        address_low  = (uint32_t)lower_32_bits(address);
 
-       ret = smu_send_smc_msg_with_param(smu,
+       ret = smu_cmn_send_smc_msg_with_param(smu,
                                          SMU_MSG_SetSystemVirtualDramAddrHigh,
                                          address_high,
                                          NULL);
        if (ret)
                return ret;
-       ret = smu_send_smc_msg_with_param(smu,
+       ret = smu_cmn_send_smc_msg_with_param(smu,
                                          SMU_MSG_SetSystemVirtualDramAddrLow,
                                          address_low,
                                          NULL);
@@ -744,15 +615,15 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
        address_high = (uint32_t)upper_32_bits(address);
        address_low  = (uint32_t)lower_32_bits(address);
 
-       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
+       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
                                          address_high, NULL);
        if (ret)
                return ret;
-       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
+       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
                                          address_low, NULL);
        if (ret)
                return ret;
-       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
+       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
                                          (uint32_t)memory_pool->size, NULL);
        if (ret)
                return ret;
@@ -760,22 +631,11 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
        return ret;
 }
 
-int smu_v11_0_write_pptable(struct smu_context *smu)
-{
-       struct smu_table_context *table_context = &smu->smu_table;
-       int ret = 0;
-
-       ret = smu_update_table(smu, SMU_TABLE_PPTABLE, 0,
-                              table_context->driver_pptable, true);
-
-       return ret;
-}
-
 int smu_v11_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
 {
        int ret;
 
-       ret = smu_send_smc_msg_with_param(smu,
+       ret = smu_cmn_send_smc_msg_with_param(smu,
                                          SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
        if (ret)
                dev_err(smu->adev->dev, "SMU11 attempt to set divider for DCEFCLK Failed!");
@@ -789,12 +649,12 @@ int smu_v11_0_set_driver_table_location(struct smu_context *smu)
        int ret = 0;
 
        if (driver_table->mc_address) {
-               ret = smu_send_smc_msg_with_param(smu,
+               ret = smu_cmn_send_smc_msg_with_param(smu,
                                SMU_MSG_SetDriverDramAddrHigh,
                                upper_32_bits(driver_table->mc_address),
                                NULL);
                if (!ret)
-                       ret = smu_send_smc_msg_with_param(smu,
+                       ret = smu_cmn_send_smc_msg_with_param(smu,
                                SMU_MSG_SetDriverDramAddrLow,
                                lower_32_bits(driver_table->mc_address),
                                NULL);
@@ -809,12 +669,12 @@ int smu_v11_0_set_tool_table_location(struct smu_context *smu)
        struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
 
        if (tool_table->mc_address) {
-               ret = smu_send_smc_msg_with_param(smu,
+               ret = smu_cmn_send_smc_msg_with_param(smu,
                                SMU_MSG_SetToolsDramAddrHigh,
                                upper_32_bits(tool_table->mc_address),
                                NULL);
                if (!ret)
-                       ret = smu_send_smc_msg_with_param(smu,
+                       ret = smu_cmn_send_smc_msg_with_param(smu,
                                SMU_MSG_SetToolsDramAddrLow,
                                lower_32_bits(tool_table->mc_address),
                                NULL);
@@ -835,7 +695,7 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
        if (!smu->pm_enabled)
                return ret;
 
-       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
+       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
        return ret;
 }
 
@@ -852,12 +712,12 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
 
        bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
 
-       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
+       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
                                          feature_mask[1], NULL);
        if (ret)
                goto failed;
 
-       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
+       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
                                          feature_mask[0], NULL);
        if (ret)
                goto failed;
@@ -867,35 +727,6 @@ failed:
        return ret;
 }
 
-int smu_v11_0_get_enabled_mask(struct smu_context *smu,
-                                     uint32_t *feature_mask, uint32_t num)
-{
-       uint32_t feature_mask_high = 0, feature_mask_low = 0;
-       struct smu_feature *feature = &smu->smu_feature;
-       int ret = 0;
-
-       if (!feature_mask || num < 2)
-               return -EINVAL;
-
-       if (bitmap_empty(feature->enabled, feature->feature_num)) {
-               ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
-               if (ret)
-                       return ret;
-
-               ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
-               if (ret)
-                       return ret;
-
-               feature_mask[0] = feature_mask_low;
-               feature_mask[1] = feature_mask_high;
-       } else {
-               bitmap_copy((unsigned long *)feature_mask, feature->enabled,
-                            feature->feature_num);
-       }
-
-       return ret;
-}
-
 int smu_v11_0_system_features_control(struct smu_context *smu,
                                             bool en)
 {
@@ -903,7 +734,7 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
        uint32_t feature_mask[2];
        int ret = 0;
 
-       ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
+       ret = smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
                                     SMU_MSG_DisableAllSmuFeatures), NULL);
        if (ret)
                return ret;
@@ -912,7 +743,7 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
        bitmap_zero(feature->supported, feature->feature_num);
 
        if (en) {
-               ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+               ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
                if (ret)
                        return ret;
 
@@ -932,9 +763,9 @@ int smu_v11_0_notify_display_change(struct smu_context *smu)
        if (!smu->pm_enabled)
                return ret;
 
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
            smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
 
        return ret;
 }
@@ -946,15 +777,17 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
        int ret = 0;
        int clk_id;
 
-       if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
-           (smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0))
+       if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
+           (smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0))
                return 0;
 
-       clk_id = smu_clk_get_index(smu, clock_select);
+       clk_id = smu_cmn_to_asic_specific_index(smu,
+                                               CMN2ASIC_MAPPING_CLK,
+                                               clock_select);
        if (clk_id < 0)
                return -EINVAL;
 
-       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
+       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
                                          clk_id << 16, clock);
        if (ret) {
                dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
@@ -965,7 +798,7 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
                return 0;
 
        /* if DC limit is zero, return AC limit */
-       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
+       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
                                          clk_id << 16, clock);
        if (ret) {
                dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!");
@@ -988,7 +821,7 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
        max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
        max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
 
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
                ret = smu_v11_0_get_max_sustainable_clock(smu,
                                                          &(max_sustainable_clocks->uclock),
                                                          SMU_UCLK);
@@ -999,7 +832,7 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
                }
        }
 
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
                ret = smu_v11_0_get_max_sustainable_clock(smu,
                                                          &(max_sustainable_clocks->soc_clock),
                                                          SMU_SOCCLK);
@@ -1010,7 +843,7 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
                }
        }
 
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
                ret = smu_v11_0_get_max_sustainable_clock(smu,
                                                          &(max_sustainable_clocks->dcef_clock),
                                                          SMU_DCEFCLK);
@@ -1058,17 +891,18 @@ int smu_v11_0_get_current_power_limit(struct smu_context *smu,
        int power_src;
        int ret = 0;
 
-       if (!smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
+       if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
                return -EINVAL;
 
-       power_src = smu_power_get_index(smu,
+       power_src = smu_cmn_to_asic_specific_index(smu,
+                                       CMN2ASIC_MAPPING_PWR,
                                        smu->adev->pm.ac_power ?
                                        SMU_POWER_SOURCE_AC :
                                        SMU_POWER_SOURCE_DC);
        if (power_src < 0)
                return -EINVAL;
 
-       ret = smu_send_smc_msg_with_param(smu,
+       ret = smu_cmn_send_smc_msg_with_param(smu,
                                          SMU_MSG_GetPptLimit,
                                          power_src << 16,
                                          power_limit);
@@ -1082,12 +916,12 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
 {
        int ret = 0;
 
-       if (!smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+       if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
                dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
                return -EOPNOTSUPP;
        }
 
-       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
+       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
        if (ret) {
                dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
                return ret;
@@ -1145,8 +979,8 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
        enum smu_clk_type clk_select = 0;
        uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
 
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
-               smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
+               smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
                switch (clk_type) {
                case amd_pp_dcef_clock:
                        clk_select = SMU_DCEFCLK;
@@ -1198,9 +1032,9 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
                if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
                        return 0;
                if (enable)
-                       ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
+                       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
                else
-                       ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
+                       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
                break;
        default:
                break;
@@ -1212,7 +1046,7 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
 uint32_t
 smu_v11_0_get_fan_control_mode(struct smu_context *smu)
 {
-       if (!smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
+       if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
                return AMD_FAN_CTRL_MANUAL;
        else
                return AMD_FAN_CTRL_AUTO;
@@ -1223,10 +1057,10 @@ smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
 {
        int ret = 0;
 
-       if (!smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
+       if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
                return 0;
 
-       ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
+       ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
        if (ret)
                dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!",
                       __func__, (auto_fan_control ? "Start" : "Stop"));
@@ -1336,7 +1170,7 @@ int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
                                     uint32_t pstate)
 {
        int ret = 0;
-       ret = smu_send_smc_msg_with_param(smu,
+       ret = smu_cmn_send_smc_msg_with_param(smu,
                                          SMU_MSG_SetXgmiMode,
                                          pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
                                          NULL);
@@ -1410,7 +1244,7 @@ static int smu_v11_0_set_irq_state(struct amdgpu_device *adev,
 
 static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
 {
-       return smu_send_smc_msg(smu,
+       return smu_cmn_send_smc_msg(smu,
                                SMU_MSG_ReenableAcDcInterrupt,
                                NULL);
 }
@@ -1568,14 +1402,14 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
 {
        int ret = 0;
 
-       ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
+       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
 
        return ret;
 }
 
 static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
 {
-       return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
+       return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
 }
 
 bool smu_v11_0_baco_is_support(struct smu_context *smu)
@@ -1591,8 +1425,8 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu)
                return false;
 
        /* Arcturus does not support this bit mask */
-       if (smu_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
-          !smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
+       if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
+          !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
                return false;
 
        return true;
@@ -1629,21 +1463,15 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
                        data |= 0x80000000;
                        WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
 
-                       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL);
+                       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL);
                } else {
-                       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL);
+                       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL);
                }
        } else {
-               ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
+               ret = smu_cmn_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
                if (ret)
                        goto out;
 
-               if (ras && ras->supported) {
-                       ret = smu_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
-                       if (ret)
-                               goto out;
-               }
-
                /* clear vbios scratch 6 and 7 for coming asic reinit */
                WREG32(adev->bios_scratch_reg_offset + 6, 0);
                WREG32(adev->bios_scratch_reg_offset + 7, 0);
@@ -1693,7 +1521,7 @@ int smu_v11_0_mode1_reset(struct smu_context *smu)
 {
        int ret = 0;
 
-       ret = smu_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
+       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
        if (!ret)
                msleep(SMU11_MODE1_RESET_WAIT_TIME_IN_MS);
 
@@ -1707,7 +1535,7 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
        uint32_t param = 0;
        uint32_t clock_limit;
 
-       if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
+       if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
                switch (clk_type) {
                case SMU_MCLK:
                case SMU_UCLK:
@@ -1734,7 +1562,9 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
                return 0;
        }
 
-       clk_id = smu_clk_get_index(smu, clk_type);
+       clk_id = smu_cmn_to_asic_specific_index(smu,
+                                               CMN2ASIC_MAPPING_CLK,
+                                               clk_type);
        if (clk_id < 0) {
                ret = -EINVAL;
                goto failed;
@@ -1742,13 +1572,13 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
        param = (clk_id & 0xffff) << 16;
 
        if (max) {
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max);
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max);
                if (ret)
                        goto failed;
        }
 
        if (min) {
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
                if (ret)
                        goto failed;
        }
@@ -1766,7 +1596,12 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
        int ret = 0, clk_id = 0;
        uint32_t param;
 
-       clk_id = smu_clk_get_index(smu, clk_type);
+       if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
+               return 0;
+
+       clk_id = smu_cmn_to_asic_specific_index(smu,
+                                               CMN2ASIC_MAPPING_CLK,
+                                               clk_type);
        if (clk_id < 0)
                return clk_id;
 
@@ -1775,7 +1610,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
 
        if (max > 0) {
                param = (uint32_t)((clk_id << 16) | (max & 0xffff));
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
                                                  param, NULL);
                if (ret)
                        goto out;
@@ -1783,7 +1618,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
 
        if (min > 0) {
                param = (uint32_t)((clk_id << 16) | (min & 0xffff));
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
                                                  param, NULL);
                if (ret)
                        goto out;
@@ -1807,16 +1642,18 @@ int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu,
        if (min <= 0 && max <= 0)
                return -EINVAL;
 
-       if (!smu_clk_dpm_is_enabled(smu, clk_type))
+       if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
                return 0;
 
-       clk_id = smu_clk_get_index(smu, clk_type);
+       clk_id = smu_cmn_to_asic_specific_index(smu,
+                                               CMN2ASIC_MAPPING_CLK,
+                                               clk_type);
        if (clk_id < 0)
                return clk_id;
 
        if (max > 0) {
                param = (uint32_t)((clk_id << 16) | (max & 0xffff));
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
                                                  param, NULL);
                if (ret)
                        return ret;
@@ -1824,7 +1661,7 @@ int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu,
 
        if (min > 0) {
                param = (uint32_t)((clk_id << 16) | (min & 0xffff));
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
                                                  param, NULL);
                if (ret)
                        return ret;
@@ -1939,11 +1776,13 @@ int smu_v11_0_set_power_source(struct smu_context *smu,
 {
        int pwr_source;
 
-       pwr_source = smu_power_get_index(smu, (uint32_t)power_src);
+       pwr_source = smu_cmn_to_asic_specific_index(smu,
+                                                   CMN2ASIC_MAPPING_PWR,
+                                                   (uint32_t)power_src);
        if (pwr_source < 0)
                return -EINVAL;
 
-       return smu_send_smc_msg_with_param(smu,
+       return smu_cmn_send_smc_msg_with_param(smu,
                                        SMU_MSG_NotifyPowerSource,
                                        pwr_source,
                                        NULL);
@@ -1960,16 +1799,18 @@ int smu_v11_0_get_dpm_freq_by_index(struct smu_context *smu,
        if (!value)
                return -EINVAL;
 
-       if (!smu_clk_dpm_is_enabled(smu, clk_type))
+       if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
                return 0;
 
-       clk_id = smu_clk_get_index(smu, clk_type);
+       clk_id = smu_cmn_to_asic_specific_index(smu,
+                                               CMN2ASIC_MAPPING_CLK,
+                                               clk_type);
        if (clk_id < 0)
                return clk_id;
 
        param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
 
-       ret = smu_send_smc_msg_with_param(smu,
+       ret = smu_cmn_send_smc_msg_with_param(smu,
                                          SMU_MSG_GetDpmFreqByIndex,
                                          param,
                                          value);
index 4e1b11d..3145643 100644 (file)
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#define SWSMU_CODE_LAYER_L3
+
 #include <linux/firmware.h>
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
-#include "smu_internal.h"
 #include "atomfirmware.h"
 #include "amdgpu_atomfirmware.h"
 #include "smu_v12_0.h"
 #include "soc15_common.h"
 #include "atom.h"
+#include "smu_cmn.h"
 
 #include "asic_reg/mp/mp_12_0_0_offset.h"
 #include "asic_reg/mp/mp_12_0_0_sh_mask.h"
 
 #define smnMP1_FIRMWARE_FLAGS                                0x3010024
 
-int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
-                                             uint16_t msg)
-{
-       struct amdgpu_device *adev = smu->adev;
-
-       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
-       return 0;
-}
-
-static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
-{
-       struct amdgpu_device *adev = smu->adev;
-
-       *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
-       return 0;
-}
-
-int smu_v12_0_wait_for_response(struct smu_context *smu)
-{
-       struct amdgpu_device *adev = smu->adev;
-       uint32_t cur_value, i;
-
-       for (i = 0; i < adev->usec_timeout; i++) {
-               cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
-               if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
-                       return cur_value == 0x1 ? 0 : -EIO;
-
-               udelay(1);
-       }
-
-       /* timeout means wrong logic */
-       return -ETIME;
-}
-
-int
-smu_v12_0_send_msg_with_param(struct smu_context *smu,
-                             enum smu_message_type msg,
-                             uint32_t param,
-                             uint32_t *read_arg)
-{
-       struct amdgpu_device *adev = smu->adev;
-       int ret = 0, index = 0;
-
-       index = smu_msg_get_index(smu, msg);
-       if (index < 0)
-               return index;
-
-       mutex_lock(&smu->message_lock);
-       ret = smu_v12_0_wait_for_response(smu);
-       if (ret) {
-               dev_err(adev->dev, "Msg issuing pre-check failed and "
-                      "SMU may be not in the right state!\n");
-               goto out;
-       }
-
-       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
-       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
-
-       smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);
-
-       ret = smu_v12_0_wait_for_response(smu);
-       if (ret) {
-               dev_err(adev->dev, "Failed to send message 0x%x, response 0x%x param 0x%x\n",
-                      index, ret, param);
-               goto out;
-       }
-       if (read_arg) {
-               ret = smu_v12_0_read_arg(smu, read_arg);
-               if (ret) {
-                       dev_err(adev->dev, "Failed to read message arg 0x%x, response 0x%x param 0x%x\n",
-                              index, ret, param);
-                       goto out;
-               }
-       }
-out:
-       mutex_unlock(&smu->message_lock);
-       return ret;
-}
-
 int smu_v12_0_check_fw_status(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
@@ -154,7 +76,7 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
        uint8_t smu_minor, smu_debug;
        int ret = 0;
 
-       ret = smu_get_smc_version(smu, &if_version, &smu_version);
+       ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
        if (ret)
                return ret;
 
@@ -187,9 +109,9 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
                return 0;
 
        if (gate)
-               return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma, NULL);
+               return smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownSdma, NULL);
        else
-               return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma, NULL);
+               return smu_cmn_send_smc_msg(smu, SMU_MSG_PowerUpSdma, NULL);
 }
 
 int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
@@ -197,7 +119,7 @@ int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
        if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
                return 0;
 
-       return smu_v12_0_send_msg_with_param(smu,
+       return smu_cmn_send_smc_msg_with_param(smu,
                SMU_MSG_SetGfxCGPG,
                enable ? 1 : 0,
                NULL);
@@ -233,10 +155,10 @@ int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
        int ret = 0, timeout = 500;
 
        if (enable) {
-               ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
+               ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
 
        } else {
-               ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
+               ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
 
                /* confirm gfx is back to "on" state, timeout is 0.5 second */
                while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) {
@@ -252,36 +174,18 @@ int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
        return ret;
 }
 
-int smu_v12_0_init_smc_tables(struct smu_context *smu)
-{
-       struct smu_table_context *smu_table = &smu->smu_table;
-       struct smu_table *tables = NULL;
-
-       if (smu_table->tables)
-               return -EINVAL;
-
-       tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
-                        GFP_KERNEL);
-       if (!tables)
-               return -ENOMEM;
-
-       smu_table->tables = tables;
-
-       return smu_tables_init(smu, tables);
-}
-
 int smu_v12_0_fini_smc_tables(struct smu_context *smu)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
 
-       if (!smu_table->tables)
-               return -EINVAL;
-
        kfree(smu_table->clocks_table);
-       kfree(smu_table->tables);
-
        smu_table->clocks_table = NULL;
-       smu_table->tables = NULL;
+
+       kfree(smu_table->metrics_table);
+       smu_table->metrics_table = NULL;
+
+       kfree(smu_table->watermarks_table);
+       smu_table->watermarks_table = NULL;
 
        return 0;
 }
@@ -290,34 +194,11 @@ int smu_v12_0_set_default_dpm_tables(struct smu_context *smu)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
 
-       return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
-}
-
-int smu_v12_0_get_enabled_mask(struct smu_context *smu,
-                                     uint32_t *feature_mask, uint32_t num)
-{
-       uint32_t feature_mask_high = 0, feature_mask_low = 0;
-       int ret = 0;
-
-       if (!feature_mask || num < 2)
-               return -EINVAL;
-
-       ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
-       if (ret)
-               return ret;
-
-       ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
-       if (ret)
-               return ret;
-
-       feature_mask[0] = feature_mask_low;
-       feature_mask[1] = feature_mask_high;
-
-       return ret;
+       return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
 }
 
 int smu_v12_0_mode2_reset(struct smu_context *smu){
-       return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL);
+       return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL);
 }
 
 int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
@@ -325,42 +206,45 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
 {
        int ret = 0;
 
+       if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
+               return 0;
+
        switch (clk_type) {
        case SMU_GFXCLK:
        case SMU_SCLK:
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min, NULL);
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min, NULL);
                if (ret)
                        return ret;
 
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max, NULL);
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max, NULL);
                if (ret)
                        return ret;
        break;
        case SMU_FCLK:
        case SMU_MCLK:
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
                if (ret)
                        return ret;
 
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max, NULL);
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max, NULL);
                if (ret)
                        return ret;
        break;
        case SMU_SOCCLK:
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min, NULL);
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min, NULL);
                if (ret)
                        return ret;
 
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max, NULL);
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max, NULL);
                if (ret)
                        return ret;
        break;
        case SMU_VCLK:
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min, NULL);
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min, NULL);
                if (ret)
                        return ret;
 
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max, NULL);
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max, NULL);
                if (ret)
                        return ret;
        break;
@@ -377,12 +261,12 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu)
        int ret = 0;
 
        if (driver_table->mc_address) {
-               ret = smu_send_smc_msg_with_param(smu,
+               ret = smu_cmn_send_smc_msg_with_param(smu,
                                SMU_MSG_SetDriverDramAddrHigh,
                                upper_32_bits(driver_table->mc_address),
                                NULL);
                if (!ret)
-                       ret = smu_send_smc_msg_with_param(smu,
+                       ret = smu_cmn_send_smc_msg_with_param(smu,
                                SMU_MSG_SetDriverDramAddrLow,
                                lower_32_bits(driver_table->mc_address),
                                NULL);
index c2e0fbb..cf43629 100644 (file)
@@ -522,11 +522,9 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr)
        priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
        priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
 
-       if (adev->psp.ras.ras) {
-               ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c);
-               if (ret)
-                       goto err4;
-       }
+       ret = smu_v11_0_i2c_control_init(&adev->pm.smu_i2c);
+       if (ret)
+               goto err4;
 
        return 0;
 
@@ -562,8 +560,7 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
                        (struct vega20_smumgr *)(hwmgr->smu_backend);
        struct amdgpu_device *adev = hwmgr->adev;
 
-       if (adev->psp.ras.ras)
-               smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c);
+       smu_v11_0_i2c_control_fini(&adev->pm.smu_i2c);
 
        if (priv) {
                amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
index 3da71a0..0ecc18b 100644 (file)
@@ -644,9 +644,6 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
 
        /* sclk is bigger than max sclk in the dependence table */
        *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-       vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
-                       (dep_table->entries[i - 1].vddc -
-                                       (uint16_t)VDDC_VDDCI_DELTA));
 
        if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
                *voltage |= (data->vbios_boot_state.vddci_bootup_value *
@@ -654,8 +651,13 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
        else if (dep_table->entries[i - 1].vddci)
                *voltage |= (dep_table->entries[i - 1].vddci *
                                VOLTAGE_SCALE) << VDDC_SHIFT;
-       else
+       else {
+               vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+                               (dep_table->entries[i - 1].vddc -
+                                               (uint16_t)VDDC_VDDCI_DELTA));
+
                *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+       }
 
        if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
                *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
index 5e7ea04..903f4f3 100644 (file)
@@ -173,8 +173,6 @@ static int aspeed_gfx_load(struct drm_device *drm)
 
        drm_mode_config_reset(drm);
 
-       drm_fbdev_generic_setup(drm, 32);
-
        return 0;
 }
 
@@ -225,6 +223,7 @@ static int aspeed_gfx_probe(struct platform_device *pdev)
        if (ret)
                goto err_unload;
 
+       drm_fbdev_generic_setup(&priv->drm, 32);
        return 0;
 
 err_unload:
index f0a66ef..25ce42e 100644 (file)
@@ -73,8 +73,11 @@ enum drm_mode_status drm_crtc_mode_valid(struct drm_crtc *crtc,
                                         const struct drm_display_mode *mode);
 enum drm_mode_status drm_encoder_mode_valid(struct drm_encoder *encoder,
                                            const struct drm_display_mode *mode);
-enum drm_mode_status drm_connector_mode_valid(struct drm_connector *connector,
-                                             struct drm_display_mode *mode);
+int
+drm_connector_mode_valid(struct drm_connector *connector,
+                        struct drm_display_mode *mode,
+                        struct drm_modeset_acquire_ctx *ctx,
+                        enum drm_mode_status *status);
 
 struct drm_encoder *
 drm_connector_get_single_encoder(struct drm_connector *connector);
index da0d96a..88146f7 100644 (file)
@@ -227,18 +227,9 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
 }
 EXPORT_SYMBOL(drm_fb_helper_debug_leave);
 
-/**
- * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
- * @fb_helper: driver-allocated fbdev helper, can be NULL
- *
- * This should be called from driver's drm &drm_driver.lastclose callback
- * when implementing an fbcon on top of kms using this helper. This ensures that
- * the user isn't greeted with a black screen when e.g. X dies.
- *
- * RETURNS:
- * Zero if everything went ok, negative error code otherwise.
- */
-int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
+static int
+__drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper,
+                                           bool force)
 {
        bool do_delayed;
        int ret;
@@ -250,7 +241,16 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
                return 0;
 
        mutex_lock(&fb_helper->lock);
-       ret = drm_client_modeset_commit(&fb_helper->client);
+       if (force) {
+               /*
+                * Yes this is the _locked version which expects the master lock
+                * to be held. But for forced restores we're intentionally
+                * racing here, see drm_fb_helper_set_par().
+                */
+               ret = drm_client_modeset_commit_locked(&fb_helper->client);
+       } else {
+               ret = drm_client_modeset_commit(&fb_helper->client);
+       }
 
        do_delayed = fb_helper->delayed_hotplug;
        if (do_delayed)
@@ -262,6 +262,22 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
 
        return ret;
 }
+
+/**
+ * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
+ * @fb_helper: driver-allocated fbdev helper, can be NULL
+ *
+ * This should be called from driver's drm &drm_driver.lastclose callback
+ * when implementing an fbcon on top of kms using this helper. This ensures that
+ * the user isn't greeted with a black screen when e.g. X dies.
+ *
+ * RETURNS:
+ * Zero if everything went ok, negative error code otherwise.
+ */
+int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
+{
+       return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, false);
+}
 EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
 
 #ifdef CONFIG_MAGIC_SYSRQ
@@ -1318,6 +1334,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
 {
        struct drm_fb_helper *fb_helper = info->par;
        struct fb_var_screeninfo *var = &info->var;
+       bool force;
 
        if (oops_in_progress)
                return -EBUSY;
@@ -1327,7 +1344,25 @@ int drm_fb_helper_set_par(struct fb_info *info)
                return -EINVAL;
        }
 
-       drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+       /*
+        * Normally we want to make sure that a kms master takes precedence over
+        * fbdev, to avoid fbdev flickering and occasionally stealing the
+        * display status. But Xorg first sets the vt back to text mode using
+        * the KDSET IOCTL with KD_TEXT, and only after that drops the master
+        * status when exiting.
+        *
+        * In the past this was caught by drm_fb_helper_lastclose(), but on
+        * modern systems where logind always keeps a drm fd open to orchestrate
+        * the vt switching, this doesn't work.
+        *
+        * To not break the userspace ABI we have this special case here, which
+        * is only used for the above case. Everything else uses the normal
+        * commit function, which ensures that we never steal the display from
+        * an active drm master.
+        */
+       force = var->activate & FB_ACTIVATE_KD_TEXT;
+
+       __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force);
 
        return 0;
 }
index ffd95bf..d00ea38 100644 (file)
@@ -30,12 +30,6 @@ struct drm_dmi_panel_orientation_data {
        int orientation;
 };
 
-static const struct drm_dmi_panel_orientation_data acer_s1003 = {
-       .width = 800,
-       .height = 1280,
-       .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
-};
-
 static const struct drm_dmi_panel_orientation_data asus_t100ha = {
        .width = 800,
        .height = 1280,
@@ -114,13 +108,19 @@ static const struct dmi_system_id orientation_data[] = {
                  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
                  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"),
                },
-               .driver_data = (void *)&acer_s1003,
+               .driver_data = (void *)&lcd800x1280_rightside_up,
        }, {    /* Asus T100HA */
                .matches = {
                  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
                  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
                },
                .driver_data = (void *)&asus_t100ha,
+       }, {    /* Asus T101HA */
+               .matches = {
+                 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"),
+               },
+               .driver_data = (void *)&lcd800x1280_rightside_up,
        }, {    /* GPD MicroPC (generic strings, also match on bios date) */
                .matches = {
                  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
index e0ed58d..d601772 100644 (file)
@@ -86,17 +86,19 @@ drm_mode_validate_flag(const struct drm_display_mode *mode,
        return MODE_OK;
 }
 
-static enum drm_mode_status
+static int
 drm_mode_validate_pipeline(struct drm_display_mode *mode,
-                           struct drm_connector *connector)
+                          struct drm_connector *connector,
+                          struct drm_modeset_acquire_ctx *ctx,
+                          enum drm_mode_status *status)
 {
        struct drm_device *dev = connector->dev;
-       enum drm_mode_status ret = MODE_OK;
        struct drm_encoder *encoder;
+       int ret;
 
        /* Step 1: Validate against connector */
-       ret = drm_connector_mode_valid(connector, mode);
-       if (ret != MODE_OK)
+       ret = drm_connector_mode_valid(connector, mode, ctx, status);
+       if (ret || *status != MODE_OK)
                return ret;
 
        /* Step 2: Validate against encoders and crtcs */
@@ -104,8 +106,8 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
                struct drm_bridge *bridge;
                struct drm_crtc *crtc;
 
-               ret = drm_encoder_mode_valid(encoder, mode);
-               if (ret != MODE_OK) {
+               *status = drm_encoder_mode_valid(encoder, mode);
+               if (*status != MODE_OK) {
                        /* No point in continuing for crtc check as this encoder
                         * will not accept the mode anyway. If all encoders
                         * reject the mode then, at exit, ret will not be
@@ -114,10 +116,10 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
                }
 
                bridge = drm_bridge_chain_get_first_bridge(encoder);
-               ret = drm_bridge_chain_mode_valid(bridge,
-                                                 &connector->display_info,
-                                                 mode);
-               if (ret != MODE_OK) {
+               *status = drm_bridge_chain_mode_valid(bridge,
+                                                     &connector->display_info,
+                                                     mode);
+               if (*status != MODE_OK) {
                        /* There is also no point in continuing for crtc check
                         * here. */
                        continue;
@@ -127,17 +129,17 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
                        if (!drm_encoder_crtc_ok(encoder, crtc))
                                continue;
 
-                       ret = drm_crtc_mode_valid(crtc, mode);
-                       if (ret == MODE_OK) {
+                       *status = drm_crtc_mode_valid(crtc, mode);
+                       if (*status == MODE_OK) {
                                /* If we get to this point there is at least
                                 * one combination of encoder+crtc that works
                                 * for this mode. Lets return now. */
-                               return ret;
+                               return 0;
                        }
                }
        }
 
-       return ret;
+       return 0;
 }
 
 static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
@@ -198,16 +200,27 @@ enum drm_mode_status drm_encoder_mode_valid(struct drm_encoder *encoder,
        return encoder_funcs->mode_valid(encoder, mode);
 }
 
-enum drm_mode_status drm_connector_mode_valid(struct drm_connector *connector,
-                                             struct drm_display_mode *mode)
+int
+drm_connector_mode_valid(struct drm_connector *connector,
+                        struct drm_display_mode *mode,
+                        struct drm_modeset_acquire_ctx *ctx,
+                        enum drm_mode_status *status)
 {
        const struct drm_connector_helper_funcs *connector_funcs =
                connector->helper_private;
+       int ret = 0;
+
+       if (!connector_funcs)
+               *status = MODE_OK;
+       else if (connector_funcs->mode_valid_ctx)
+               ret = connector_funcs->mode_valid_ctx(connector, mode, ctx,
+                                                     status);
+       else if (connector_funcs->mode_valid)
+               *status = connector_funcs->mode_valid(connector, mode);
+       else
+               *status = MODE_OK;
 
-       if (!connector_funcs || !connector_funcs->mode_valid)
-               return MODE_OK;
-
-       return connector_funcs->mode_valid(connector, mode);
+       return ret;
 }
 
 #define DRM_OUTPUT_POLL_PERIOD (10*HZ)
@@ -385,8 +398,9 @@ EXPORT_SYMBOL(drm_helper_probe_detect);
  *      (if specified)
  *    - drm_mode_validate_flag() checks the modes against basic connector
  *      capabilities (interlace_allowed,doublescan_allowed,stereo_allowed)
- *    - the optional &drm_connector_helper_funcs.mode_valid helper can perform
- *      driver and/or sink specific checks
+ *    - the optional &drm_connector_helper_funcs.mode_valid or
+ *      &drm_connector_helper_funcs.mode_valid_ctx helpers can perform driver
+ *      and/or sink specific checks
  *    - the optional &drm_crtc_helper_funcs.mode_valid,
  *      &drm_bridge_funcs.mode_valid and &drm_encoder_helper_funcs.mode_valid
  *      helpers can perform driver and/or source specific checks which are also
@@ -517,22 +531,39 @@ retry:
                mode_flags |= DRM_MODE_FLAG_3D_MASK;
 
        list_for_each_entry(mode, &connector->modes, head) {
-               if (mode->status == MODE_OK)
-                       mode->status = drm_mode_validate_driver(dev, mode);
+               if (mode->status != MODE_OK)
+                       continue;
+
+               mode->status = drm_mode_validate_driver(dev, mode);
+               if (mode->status != MODE_OK)
+                       continue;
 
-               if (mode->status == MODE_OK)
-                       mode->status = drm_mode_validate_size(mode, maxX, maxY);
+               mode->status = drm_mode_validate_size(mode, maxX, maxY);
+               if (mode->status != MODE_OK)
+                       continue;
 
-               if (mode->status == MODE_OK)
-                       mode->status = drm_mode_validate_flag(mode, mode_flags);
+               mode->status = drm_mode_validate_flag(mode, mode_flags);
+               if (mode->status != MODE_OK)
+                       continue;
 
-               if (mode->status == MODE_OK)
-                       mode->status = drm_mode_validate_pipeline(mode,
-                                                                 connector);
+               ret = drm_mode_validate_pipeline(mode, connector, &ctx,
+                                                &mode->status);
+               if (ret) {
+                       drm_dbg_kms(dev,
+                                   "drm_mode_validate_pipeline failed: %d\n",
+                                   ret);
+
+                       if (drm_WARN_ON_ONCE(dev, ret != -EDEADLK)) {
+                               mode->status = MODE_ERROR;
+                       } else {
+                               drm_modeset_backoff(&ctx);
+                               goto retry;
+                       }
+               }
 
-               if (mode->status == MODE_OK)
-                       mode->status = drm_mode_validate_ycbcr420(mode,
-                                                                 connector);
+               if (mode->status != MODE_OK)
+                       continue;
+               mode->status = drm_mode_validate_ycbcr420(mode, connector);
        }
 
 prune:
index 619f814..58b89ec 100644 (file)
@@ -61,7 +61,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
                                struct device *subdrv_dev, void **dma_priv)
 {
        struct exynos_drm_private *priv = drm_dev->dev_private;
-       int ret;
+       int ret = 0;
 
        if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
                DRM_DEV_ERROR(subdrv_dev, "Device %s lacks support for IOMMU\n",
@@ -92,7 +92,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
        if (ret)
                clear_dma_max_seg_size(subdrv_dev);
 
-       return 0;
+       return ret;
 }
 
 /*
index fcee33a..03be314 100644 (file)
@@ -1498,7 +1498,6 @@ static int g2d_probe(struct platform_device *pdev)
 
        g2d->irq = platform_get_irq(pdev, 0);
        if (g2d->irq < 0) {
-               dev_err(dev, "failed to get irq\n");
                ret = g2d->irq;
                goto err_put_clk;
        }
index a86abc1..3821ea7 100644 (file)
@@ -269,8 +269,10 @@ static void mic_pre_enable(struct drm_bridge *bridge)
                goto unlock;
 
        ret = pm_runtime_get_sync(mic->dev);
-       if (ret < 0)
+       if (ret < 0) {
+               pm_runtime_put_noidle(mic->dev);
                goto unlock;
+       }
 
        mic_set_path(mic, 1);
 
index 249c298..eea13e6 100644 (file)
@@ -308,8 +308,6 @@ static int hibmc_load(struct drm_device *dev)
        /* reset all the states of crtc/plane/encoder/connector */
        drm_mode_config_reset(dev);
 
-       drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
-
        return 0;
 
 err:
@@ -356,6 +354,9 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
                          ret);
                goto err_unload;
        }
+
+       drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
+
        return 0;
 
 err_unload:
index 41a27fd..bda4c0e 100644 (file)
@@ -112,6 +112,7 @@ gt-y += \
        gt/intel_ring_submission.o \
        gt/intel_rps.o \
        gt/intel_sseu.o \
+       gt/intel_sseu_debugfs.o \
        gt/intel_timeline.o \
        gt/intel_workarounds.o \
        gt/shmem_utils.o \
index 6593e2c..c53c85d 100644 (file)
@@ -722,6 +722,9 @@ parse_power_conservation_features(struct drm_i915_private *dev_priv,
         */
        if (!(power->drrs & BIT(panel_type)))
                dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+
+       if (bdb->version >= 232)
+               dev_priv->vbt.edp.hobl = power->hobl & BIT(panel_type);
 }
 
 static void
index 45f7f33..bb91dac 100644 (file)
@@ -2080,8 +2080,15 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
         * Explicitly stating here that this seems to be currently
         * rather a Hack, than final solution.
         */
-       if (IS_TIGERLAKE(dev_priv))
-               min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate);
+       if (IS_TIGERLAKE(dev_priv)) {
+               /*
+                * Clamp to max_cdclk_freq in case pixel rate is higher,
+                * in order not to break an 8K, but still leave W/A at place.
+                */
+               min_cdclk = max_t(int, min_cdclk,
+                                 min_t(int, crtc_state->pixel_rate,
+                                       dev_priv->max_cdclk_freq));
+       }
 
        if (min_cdclk > dev_priv->max_cdclk_freq) {
                drm_dbg_kms(&dev_priv->drm,
index 77b04bb..eccaa79 100644 (file)
@@ -264,6 +264,18 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
        if (!icl_combo_phy_enabled(dev_priv, phy))
                return false;
 
+       if (INTEL_GEN(dev_priv) >= 12) {
+               ret &= check_phy_reg(dev_priv, phy, ICL_PORT_TX_DW8_LN0(phy),
+                                    ICL_PORT_TX_DW8_ODCC_CLK_SEL |
+                                    ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK,
+                                    ICL_PORT_TX_DW8_ODCC_CLK_SEL |
+                                    ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2);
+
+               ret &= check_phy_reg(dev_priv, phy, ICL_PORT_PCS_DW1_LN0(phy),
+                                    DCC_MODE_SELECT_MASK,
+                                    DCC_MODE_SELECT_CONTINUOSLY);
+       }
+
        ret = cnl_verify_procmon_ref_values(dev_priv, phy);
 
        if (phy_is_master(dev_priv, phy)) {
@@ -375,6 +387,19 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
                intel_de_write(dev_priv, ICL_PHY_MISC(phy), val);
 
 skip_phy_misc:
+               if (INTEL_GEN(dev_priv) >= 12) {
+                       val = intel_de_read(dev_priv, ICL_PORT_TX_DW8_LN0(phy));
+                       val &= ~ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK;
+                       val |= ICL_PORT_TX_DW8_ODCC_CLK_SEL;
+                       val |= ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2;
+                       intel_de_write(dev_priv, ICL_PORT_TX_DW8_GRP(phy), val);
+
+                       val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy));
+                       val &= ~DCC_MODE_SELECT_MASK;
+                       val |= DCC_MODE_SELECT_CONTINUOSLY;
+                       intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
+               }
+
                cnl_set_procmon_ref_values(dev_priv, phy);
 
                if (phy_is_master(dev_priv, phy)) {
index 025d405..2c484b5 100644 (file)
@@ -707,8 +707,10 @@ static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2[] =
 };
 
 static const struct ddi_buf_trans *
-bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
+bdw_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
 {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
        if (dev_priv->vbt.edp.low_vswing) {
                *n_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
                return bdw_ddi_translations_edp;
@@ -719,8 +721,10 @@ bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
 }
 
 static const struct ddi_buf_trans *
-skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+skl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
 {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
        if (IS_SKL_ULX(dev_priv)) {
                *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
                return skl_y_ddi_translations_dp;
@@ -734,8 +738,10 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
 }
 
 static const struct ddi_buf_trans *
-kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+kbl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
 {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
        if (IS_KBL_ULX(dev_priv) ||
            IS_CFL_ULX(dev_priv) ||
            IS_CML_ULX(dev_priv)) {
@@ -753,8 +759,10 @@ kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
 }
 
 static const struct ddi_buf_trans *
-skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
+skl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
 {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
        if (dev_priv->vbt.edp.low_vswing) {
                if (IS_SKL_ULX(dev_priv) ||
                    IS_KBL_ULX(dev_priv) ||
@@ -777,9 +785,9 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
        if (IS_KABYLAKE(dev_priv) ||
            IS_COFFEELAKE(dev_priv) ||
            IS_COMETLAKE(dev_priv))
-               return kbl_get_buf_trans_dp(dev_priv, n_entries);
+               return kbl_get_buf_trans_dp(encoder, n_entries);
        else
-               return skl_get_buf_trans_dp(dev_priv, n_entries);
+               return skl_get_buf_trans_dp(encoder, n_entries);
 }
 
 static const struct ddi_buf_trans *
@@ -807,20 +815,21 @@ static int skl_buf_trans_num_entries(enum port port, int n_entries)
 }
 
 static const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv,
-                          enum port port, int *n_entries)
+intel_ddi_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
 {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
        if (IS_KABYLAKE(dev_priv) ||
            IS_COFFEELAKE(dev_priv) ||
            IS_COMETLAKE(dev_priv)) {
                const struct ddi_buf_trans *ddi_translations =
-                       kbl_get_buf_trans_dp(dev_priv, n_entries);
-               *n_entries = skl_buf_trans_num_entries(port, *n_entries);
+                       kbl_get_buf_trans_dp(encoder, n_entries);
+               *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
                return ddi_translations;
        } else if (IS_SKYLAKE(dev_priv)) {
                const struct ddi_buf_trans *ddi_translations =
-                       skl_get_buf_trans_dp(dev_priv, n_entries);
-               *n_entries = skl_buf_trans_num_entries(port, *n_entries);
+                       skl_get_buf_trans_dp(encoder, n_entries);
+               *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
                return ddi_translations;
        } else if (IS_BROADWELL(dev_priv)) {
                *n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
@@ -835,16 +844,17 @@ intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv,
 }
 
 static const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_edp(struct drm_i915_private *dev_priv,
-                           enum port port, int *n_entries)
+intel_ddi_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
 {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
        if (IS_GEN9_BC(dev_priv)) {
                const struct ddi_buf_trans *ddi_translations =
-                       skl_get_buf_trans_edp(dev_priv, n_entries);
-               *n_entries = skl_buf_trans_num_entries(port, *n_entries);
+                       skl_get_buf_trans_edp(encoder, n_entries);
+               *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
                return ddi_translations;
        } else if (IS_BROADWELL(dev_priv)) {
-               return bdw_get_buf_trans_edp(dev_priv, n_entries);
+               return bdw_get_buf_trans_edp(encoder, n_entries);
        } else if (IS_HASWELL(dev_priv)) {
                *n_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
                return hsw_ddi_translations_dp;
@@ -871,9 +881,11 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
 }
 
 static const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_hdmi(struct drm_i915_private *dev_priv,
+intel_ddi_get_buf_trans_hdmi(struct intel_encoder *encoder,
                             int *n_entries)
 {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
        if (IS_GEN9_BC(dev_priv)) {
                return skl_get_buf_trans_hdmi(dev_priv, n_entries);
        } else if (IS_BROADWELL(dev_priv)) {
@@ -889,33 +901,36 @@ intel_ddi_get_buf_trans_hdmi(struct drm_i915_private *dev_priv,
 }
 
 static const struct bxt_ddi_buf_trans *
-bxt_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+bxt_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
 {
        *n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
        return bxt_ddi_translations_dp;
 }
 
 static const struct bxt_ddi_buf_trans *
-bxt_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
+bxt_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
 {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
        if (dev_priv->vbt.edp.low_vswing) {
                *n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
                return bxt_ddi_translations_edp;
        }
 
-       return bxt_get_buf_trans_dp(dev_priv, n_entries);
+       return bxt_get_buf_trans_dp(encoder, n_entries);
 }
 
 static const struct bxt_ddi_buf_trans *
-bxt_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
+bxt_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries)
 {
        *n_entries = ARRAY_SIZE(bxt_ddi_translations_hdmi);
        return bxt_ddi_translations_hdmi;
 }
 
 static const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
+cnl_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries)
 {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
 
        if (voltage == VOLTAGE_INFO_0_85V) {
@@ -935,8 +950,9 @@ cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
 }
 
 static const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+cnl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
 {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
 
        if (voltage == VOLTAGE_INFO_0_85V) {
@@ -956,8 +972,9 @@ cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
 }
 
 static const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
+cnl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
 {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
 
        if (dev_priv->vbt.edp.low_vswing) {
@@ -976,14 +993,16 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
                }
                return NULL;
        } else {
-               return cnl_get_buf_trans_dp(dev_priv, n_entries);
+               return cnl_get_buf_trans_dp(encoder, n_entries);
        }
 }
 
 static const struct cnl_ddi_buf_trans *
-icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+icl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate,
                        int *n_entries)
 {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
        if (type == INTEL_OUTPUT_HDMI) {
                *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
                return icl_combo_phy_ddi_translations_hdmi;
@@ -1000,7 +1019,7 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
 }
 
 static const struct icl_mg_phy_ddi_buf_trans *
-icl_get_mg_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+icl_get_mg_buf_trans(struct intel_encoder *encoder, int type, int rate,
                     int *n_entries)
 {
        if (type == INTEL_OUTPUT_HDMI) {
@@ -1016,7 +1035,7 @@ icl_get_mg_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
 }
 
 static const struct cnl_ddi_buf_trans *
-ehl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+ehl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate,
                        int *n_entries)
 {
        if (type != INTEL_OUTPUT_HDMI && type != INTEL_OUTPUT_EDP) {
@@ -1024,15 +1043,15 @@ ehl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
                return ehl_combo_phy_ddi_translations_dp;
        }
 
-       return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries);
+       return icl_get_combo_buf_trans(encoder, type, rate, n_entries);
 }
 
 static const struct cnl_ddi_buf_trans *
-tgl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+tgl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate,
                        int *n_entries)
 {
        if (type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_EDP) {
-               return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries);
+               return icl_get_combo_buf_trans(encoder, type, rate, n_entries);
        } else if (rate > 270000) {
                *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2);
                return tgl_combo_phy_ddi_translations_dp_hbr2;
@@ -1043,7 +1062,7 @@ tgl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
 }
 
 static const struct tgl_dkl_phy_ddi_buf_trans *
-tgl_get_dkl_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+tgl_get_dkl_buf_trans(struct intel_encoder *encoder, int type, int rate,
                      int *n_entries)
 {
        if (type == INTEL_OUTPUT_HDMI) {
@@ -1066,34 +1085,34 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder)
 
        if (INTEL_GEN(dev_priv) >= 12) {
                if (intel_phy_is_combo(dev_priv, phy))
-                       tgl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
+                       tgl_get_combo_buf_trans(encoder, INTEL_OUTPUT_HDMI,
                                                0, &n_entries);
                else
-                       tgl_get_dkl_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0,
+                       tgl_get_dkl_buf_trans(encoder, INTEL_OUTPUT_HDMI, 0,
                                              &n_entries);
                default_entry = n_entries - 1;
        } else if (INTEL_GEN(dev_priv) == 11) {
                if (intel_phy_is_combo(dev_priv, phy))
-                       icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
+                       icl_get_combo_buf_trans(encoder, INTEL_OUTPUT_HDMI,
                                                0, &n_entries);
                else
-                       icl_get_mg_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0,
+                       icl_get_mg_buf_trans(encoder, INTEL_OUTPUT_HDMI, 0,
                                             &n_entries);
                default_entry = n_entries - 1;
        } else if (IS_CANNONLAKE(dev_priv)) {
-               cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
+               cnl_get_buf_trans_hdmi(encoder, &n_entries);
                default_entry = n_entries - 1;
        } else if (IS_GEN9_LP(dev_priv)) {
-               bxt_get_buf_trans_hdmi(dev_priv, &n_entries);
+               bxt_get_buf_trans_hdmi(encoder, &n_entries);
                default_entry = n_entries - 1;
        } else if (IS_GEN9_BC(dev_priv)) {
-               intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+               intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
                default_entry = 8;
        } else if (IS_BROADWELL(dev_priv)) {
-               intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+               intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
                default_entry = 7;
        } else if (IS_HASWELL(dev_priv)) {
-               intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+               intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
                default_entry = 6;
        } else {
                drm_WARN(&dev_priv->drm, 1, "ddi translation table missing\n");
@@ -1131,10 +1150,10 @@ static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
                ddi_translations = intel_ddi_get_buf_trans_fdi(dev_priv,
                                                               &n_entries);
        else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
-               ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, port,
+               ddi_translations = intel_ddi_get_buf_trans_edp(encoder,
                                                               &n_entries);
        else
-               ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port,
+               ddi_translations = intel_ddi_get_buf_trans_dp(encoder,
                                                              &n_entries);
 
        /* If we're boosting the current, set bit 31 of trans1 */
@@ -1163,7 +1182,7 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
        enum port port = encoder->port;
        const struct ddi_buf_trans *ddi_translations;
 
-       ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+       ddi_translations = intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
 
        if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
                return;
@@ -1184,16 +1203,30 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
 static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
                                    enum port port)
 {
-       i915_reg_t reg = DDI_BUF_CTL(port);
-       int i;
+       if (IS_BROXTON(dev_priv)) {
+               udelay(16);
+               return;
+       }
 
-       for (i = 0; i < 16; i++) {
-               udelay(1);
-               if (intel_de_read(dev_priv, reg) & DDI_BUF_IS_IDLE)
-                       return;
+       if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
+                        DDI_BUF_IS_IDLE), 8))
+               drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c to get idle\n",
+                       port_name(port));
+}
+
+static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
+                                     enum port port)
+{
+       /* Wait > 518 usecs for DDI_BUF_CTL to be non idle */
+       if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
+               usleep_range(518, 1000);
+               return;
        }
-       drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c idle bit\n",
-               port_name(port));
+
+       if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
+                         DDI_BUF_IS_IDLE), 500))
+               drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c to get active\n",
+                       port_name(port));
 }
 
 static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
@@ -1394,10 +1427,9 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
 static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-       struct intel_digital_port *intel_dig_port =
-               enc_to_dig_port(encoder);
+       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 
-       intel_dp->DP = intel_dig_port->saved_port_bits |
+       intel_dp->DP = dig_port->saved_port_bits |
                DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0);
        intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
 }
@@ -2070,9 +2102,8 @@ static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
 static void skl_ddi_set_iboost(struct intel_encoder *encoder,
                               int level, enum intel_output_type type)
 {
-       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       enum port port = encoder->port;
        u8 iboost;
 
        if (type == INTEL_OUTPUT_HDMI)
@@ -2085,11 +2116,13 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
                int n_entries;
 
                if (type == INTEL_OUTPUT_HDMI)
-                       ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+                       ddi_translations = intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
                else if (type == INTEL_OUTPUT_EDP)
-                       ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, port, &n_entries);
+                       ddi_translations = intel_ddi_get_buf_trans_edp(encoder,
+                                                                      &n_entries);
                else
-                       ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
+                       ddi_translations = intel_ddi_get_buf_trans_dp(encoder,
+                                                                     &n_entries);
 
                if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
                        return;
@@ -2105,9 +2138,9 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
                return;
        }
 
-       _skl_ddi_set_iboost(dev_priv, port, iboost);
+       _skl_ddi_set_iboost(dev_priv, encoder->port, iboost);
 
-       if (port == PORT_A && intel_dig_port->max_lanes == 4)
+       if (encoder->port == PORT_A && dig_port->max_lanes == 4)
                _skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
 }
 
@@ -2120,11 +2153,11 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
        int n_entries;
 
        if (type == INTEL_OUTPUT_HDMI)
-               ddi_translations = bxt_get_buf_trans_hdmi(dev_priv, &n_entries);
+               ddi_translations = bxt_get_buf_trans_hdmi(encoder, &n_entries);
        else if (type == INTEL_OUTPUT_EDP)
-               ddi_translations = bxt_get_buf_trans_edp(dev_priv, &n_entries);
+               ddi_translations = bxt_get_buf_trans_edp(encoder, &n_entries);
        else
-               ddi_translations = bxt_get_buf_trans_dp(dev_priv, &n_entries);
+               ddi_translations = bxt_get_buf_trans_dp(encoder, &n_entries);
 
        if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
                return;
@@ -2148,36 +2181,36 @@ static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp)
 
        if (INTEL_GEN(dev_priv) >= 12) {
                if (intel_phy_is_combo(dev_priv, phy))
-                       tgl_get_combo_buf_trans(dev_priv, encoder->type,
+                       tgl_get_combo_buf_trans(encoder, encoder->type,
                                                intel_dp->link_rate, &n_entries);
                else
-                       tgl_get_dkl_buf_trans(dev_priv, encoder->type,
+                       tgl_get_dkl_buf_trans(encoder, encoder->type,
                                              intel_dp->link_rate, &n_entries);
        } else if (INTEL_GEN(dev_priv) == 11) {
                if (IS_ELKHARTLAKE(dev_priv))
-                       ehl_get_combo_buf_trans(dev_priv, encoder->type,
+                       ehl_get_combo_buf_trans(encoder, encoder->type,
                                                intel_dp->link_rate, &n_entries);
                else if (intel_phy_is_combo(dev_priv, phy))
-                       icl_get_combo_buf_trans(dev_priv, encoder->type,
+                       icl_get_combo_buf_trans(encoder, encoder->type,
                                                intel_dp->link_rate, &n_entries);
                else
-                       icl_get_mg_buf_trans(dev_priv, encoder->type,
+                       icl_get_mg_buf_trans(encoder, encoder->type,
                                             intel_dp->link_rate, &n_entries);
        } else if (IS_CANNONLAKE(dev_priv)) {
                if (encoder->type == INTEL_OUTPUT_EDP)
-                       cnl_get_buf_trans_edp(dev_priv, &n_entries);
+                       cnl_get_buf_trans_edp(encoder, &n_entries);
                else
-                       cnl_get_buf_trans_dp(dev_priv, &n_entries);
+                       cnl_get_buf_trans_dp(encoder, &n_entries);
        } else if (IS_GEN9_LP(dev_priv)) {
                if (encoder->type == INTEL_OUTPUT_EDP)
-                       bxt_get_buf_trans_edp(dev_priv, &n_entries);
+                       bxt_get_buf_trans_edp(encoder, &n_entries);
                else
-                       bxt_get_buf_trans_dp(dev_priv, &n_entries);
+                       bxt_get_buf_trans_dp(encoder, &n_entries);
        } else {
                if (encoder->type == INTEL_OUTPUT_EDP)
-                       intel_ddi_get_buf_trans_edp(dev_priv, port, &n_entries);
+                       intel_ddi_get_buf_trans_edp(encoder, &n_entries);
                else
-                       intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
+                       intel_ddi_get_buf_trans_dp(encoder, &n_entries);
        }
 
        if (drm_WARN_ON(&dev_priv->drm, n_entries < 1))
@@ -2210,11 +2243,11 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
        u32 val;
 
        if (type == INTEL_OUTPUT_HDMI)
-               ddi_translations = cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
+               ddi_translations = cnl_get_buf_trans_hdmi(encoder, &n_entries);
        else if (type == INTEL_OUTPUT_EDP)
-               ddi_translations = cnl_get_buf_trans_edp(dev_priv, &n_entries);
+               ddi_translations = cnl_get_buf_trans_edp(encoder, &n_entries);
        else
-               ddi_translations = cnl_get_buf_trans_dp(dev_priv, &n_entries);
+               ddi_translations = cnl_get_buf_trans_dp(encoder, &n_entries);
 
        if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
                return;
@@ -2331,22 +2364,23 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
        intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
 }
 
-static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
-                                       u32 level, enum phy phy, int type,
-                                       int rate)
+static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
+                                        u32 level, int type, int rate)
 {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
        const struct cnl_ddi_buf_trans *ddi_translations = NULL;
        u32 n_entries, val;
        int ln;
 
        if (INTEL_GEN(dev_priv) >= 12)
-               ddi_translations = tgl_get_combo_buf_trans(dev_priv, type, rate,
+               ddi_translations = tgl_get_combo_buf_trans(encoder, type, rate,
                                                           &n_entries);
        else if (IS_ELKHARTLAKE(dev_priv))
-               ddi_translations = ehl_get_combo_buf_trans(dev_priv, type, rate,
+               ddi_translations = ehl_get_combo_buf_trans(encoder, type, rate,
                                                           &n_entries);
        else
-               ddi_translations = icl_get_combo_buf_trans(dev_priv, type, rate,
+               ddi_translations = icl_get_combo_buf_trans(encoder, type, rate,
                                                           &n_entries);
        if (!ddi_translations)
                return;
@@ -2458,7 +2492,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
        intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
 
        /* 5. Program swing and de-emphasis */
-       icl_ddi_combo_vswing_program(dev_priv, level, phy, type, rate);
+       icl_ddi_combo_vswing_program(encoder, level, type, rate);
 
        /* 6. Set training enable to trigger update */
        val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
@@ -2482,7 +2516,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
                rate = intel_dp->link_rate;
        }
 
-       ddi_translations = icl_get_mg_buf_trans(dev_priv, type, rate,
+       ddi_translations = icl_get_mg_buf_trans(encoder, type, rate,
                                                &n_entries);
        /* The table does not have values for level 3 and level 9. */
        if (level >= n_entries || level == 3 || level == 9) {
@@ -2627,7 +2661,7 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock,
                rate = intel_dp->link_rate;
        }
 
-       ddi_translations = tgl_get_dkl_buf_trans(dev_priv, encoder->type, rate,
+       ddi_translations = tgl_get_dkl_buf_trans(encoder, encoder->type, rate,
                                                 &n_entries);
 
        if (level >= n_entries)
@@ -3000,15 +3034,15 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
 }
 
 static void
-icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
+icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
                       const struct intel_crtc_state *crtc_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
-       enum tc_port tc_port = intel_port_to_tc(dev_priv, intel_dig_port->base.port);
+       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+       enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
        u32 ln0, ln1, pin_assignment;
        u8 width;
 
-       if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
+       if (dig_port->tc_mode == TC_PORT_TBT_ALT)
                return;
 
        if (INTEL_GEN(dev_priv) >= 12) {
@@ -3027,13 +3061,13 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
        ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
 
        /* DPPATC */
-       pin_assignment = intel_tc_port_get_pin_assignment_mask(intel_dig_port);
+       pin_assignment = intel_tc_port_get_pin_assignment_mask(dig_port);
        width = crtc_state->lane_count;
 
        switch (pin_assignment) {
        case 0x0:
                drm_WARN_ON(&dev_priv->drm,
-                           intel_dig_port->tc_mode != TC_PORT_LEGACY);
+                           dig_port->tc_mode != TC_PORT_LEGACY);
                if (width == 1) {
                        ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
                } else {
@@ -3978,10 +4012,9 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
 
 static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv =
-               to_i915(intel_dig_port->base.base.dev);
-       enum port port = intel_dig_port->base.port;
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+       enum port port = dig_port->base.port;
        u32 dp_tp_ctl, ddi_buf_ctl;
        bool wait = false;
 
@@ -4020,7 +4053,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
        intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
        intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
 
-       udelay(600);
+       intel_wait_ddi_buf_active(dev_priv, port);
 }
 
 static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
@@ -4536,42 +4569,41 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
 };
 
 static struct intel_connector *
-intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
+intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
        struct intel_connector *connector;
-       enum port port = intel_dig_port->base.port;
+       enum port port = dig_port->base.port;
 
        connector = intel_connector_alloc();
        if (!connector)
                return NULL;
 
-       intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
-       intel_dig_port->dp.prepare_link_retrain =
-               intel_ddi_prepare_link_retrain;
-       intel_dig_port->dp.set_link_train = intel_ddi_set_link_train;
-       intel_dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train;
+       dig_port->dp.output_reg = DDI_BUF_CTL(port);
+       dig_port->dp.prepare_link_retrain = intel_ddi_prepare_link_retrain;
+       dig_port->dp.set_link_train = intel_ddi_set_link_train;
+       dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train;
 
        if (INTEL_GEN(dev_priv) >= 12)
-               intel_dig_port->dp.set_signal_levels = tgl_set_signal_levels;
+               dig_port->dp.set_signal_levels = tgl_set_signal_levels;
        else if (INTEL_GEN(dev_priv) >= 11)
-               intel_dig_port->dp.set_signal_levels = icl_set_signal_levels;
+               dig_port->dp.set_signal_levels = icl_set_signal_levels;
        else if (IS_CANNONLAKE(dev_priv))
-               intel_dig_port->dp.set_signal_levels = cnl_set_signal_levels;
+               dig_port->dp.set_signal_levels = cnl_set_signal_levels;
        else if (IS_GEN9_LP(dev_priv))
-               intel_dig_port->dp.set_signal_levels = bxt_set_signal_levels;
+               dig_port->dp.set_signal_levels = bxt_set_signal_levels;
        else
-               intel_dig_port->dp.set_signal_levels = hsw_set_signal_levels;
+               dig_port->dp.set_signal_levels = hsw_set_signal_levels;
 
-       intel_dig_port->dp.voltage_max = intel_ddi_dp_voltage_max;
-       intel_dig_port->dp.preemph_max = intel_ddi_dp_preemph_max;
+       dig_port->dp.voltage_max = intel_ddi_dp_voltage_max;
+       dig_port->dp.preemph_max = intel_ddi_dp_preemph_max;
 
        if (INTEL_GEN(dev_priv) < 12) {
-               intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
-               intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
+               dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
+               dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
        }
 
-       if (!intel_dp_init_connector(intel_dig_port, connector)) {
+       if (!intel_dp_init_connector(dig_port, connector)) {
                kfree(connector);
                return NULL;
        }
@@ -4770,29 +4802,29 @@ static bool bdw_digital_port_connected(struct intel_encoder *encoder)
 }
 
 static struct intel_connector *
-intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
+intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
 {
        struct intel_connector *connector;
-       enum port port = intel_dig_port->base.port;
+       enum port port = dig_port->base.port;
 
        connector = intel_connector_alloc();
        if (!connector)
                return NULL;
 
-       intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
-       intel_hdmi_init_connector(intel_dig_port, connector);
+       dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
+       intel_hdmi_init_connector(dig_port, connector);
 
        return connector;
 }
 
-static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dport)
+static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
 {
-       struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 
-       if (dport->base.port != PORT_A)
+       if (dig_port->base.port != PORT_A)
                return false;
 
-       if (dport->saved_port_bits & DDI_A_4_LANES)
+       if (dig_port->saved_port_bits & DDI_A_4_LANES)
                return false;
 
        /* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only
@@ -4814,10 +4846,10 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dport)
 }
 
 static int
-intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
+intel_ddi_max_lanes(struct intel_digital_port *dig_port)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dport->base.base.dev);
-       enum port port = intel_dport->base.port;
+       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+       enum port port = dig_port->base.port;
        int max_lanes = 4;
 
        if (INTEL_GEN(dev_priv) >= 11)
@@ -4836,10 +4868,10 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
         * wasn't lit up at boot.  Force this bit set when needed
         * so we use the proper lane count for our calculations.
         */
-       if (intel_ddi_a_force_4_lanes(intel_dport)) {
+       if (intel_ddi_a_force_4_lanes(dig_port)) {
                drm_dbg_kms(&dev_priv->drm,
                            "Forcing DDI_A_4_LANES for port A\n");
-               intel_dport->saved_port_bits |= DDI_A_4_LANES;
+               dig_port->saved_port_bits |= DDI_A_4_LANES;
                max_lanes = 4;
        }
 
@@ -4848,7 +4880,7 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
 
 void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 {
-       struct intel_digital_port *intel_dig_port;
+       struct intel_digital_port *dig_port;
        struct intel_encoder *encoder;
        bool init_hdmi, init_dp, init_lspcon = false;
        enum phy phy = intel_port_to_phy(dev_priv, port);
@@ -4877,11 +4909,11 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
                return;
        }
 
-       intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
-       if (!intel_dig_port)
+       dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+       if (!dig_port)
                return;
 
-       encoder = &intel_dig_port->base;
+       encoder = &dig_port->base;
 
        drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
                         DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
@@ -4908,49 +4940,49 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
        encoder->pipe_mask = ~0;
 
        if (INTEL_GEN(dev_priv) >= 11)
-               intel_dig_port->saved_port_bits = intel_de_read(dev_priv,
-                                                               DDI_BUF_CTL(port)) &
-                       DDI_BUF_PORT_REVERSAL;
+               dig_port->saved_port_bits =
+                       intel_de_read(dev_priv, DDI_BUF_CTL(port))
+                       DDI_BUF_PORT_REVERSAL;
        else
-               intel_dig_port->saved_port_bits = intel_de_read(dev_priv,
-                                                               DDI_BUF_CTL(port)) &
-                       (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
+               dig_port->saved_port_bits =
+                       intel_de_read(dev_priv, DDI_BUF_CTL(port))
+                       (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
 
-       intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
-       intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
-       intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+       dig_port->dp.output_reg = INVALID_MMIO_REG;
+       dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
+       dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
 
        if (intel_phy_is_tc(dev_priv, phy)) {
                bool is_legacy =
                        !intel_bios_port_supports_typec_usb(dev_priv, port) &&
                        !intel_bios_port_supports_tbt(dev_priv, port);
 
-               intel_tc_port_init(intel_dig_port, is_legacy);
+               intel_tc_port_init(dig_port, is_legacy);
 
                encoder->update_prepare = intel_ddi_update_prepare;
                encoder->update_complete = intel_ddi_update_complete;
        }
 
        drm_WARN_ON(&dev_priv->drm, port > PORT_I);
-       intel_dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO +
+       dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO +
                                              port - PORT_A;
 
        if (init_dp) {
-               if (!intel_ddi_init_dp_connector(intel_dig_port))
+               if (!intel_ddi_init_dp_connector(dig_port))
                        goto err;
 
-               intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
+               dig_port->hpd_pulse = intel_dp_hpd_pulse;
        }
 
        /* In theory we don't need the encoder->type check, but leave it just in
         * case we have some really bad VBTs... */
        if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
-               if (!intel_ddi_init_hdmi_connector(intel_dig_port))
+               if (!intel_ddi_init_hdmi_connector(dig_port))
                        goto err;
        }
 
        if (init_lspcon) {
-               if (lspcon_init(intel_dig_port))
+               if (lspcon_init(dig_port))
                        /* TODO: handle hdmi info frame part */
                        drm_dbg_kms(&dev_priv->drm,
                                    "LSPCON init success on port %c\n",
@@ -4967,26 +4999,26 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 
        if (INTEL_GEN(dev_priv) >= 11) {
                if (intel_phy_is_tc(dev_priv, phy))
-                       intel_dig_port->connected = intel_tc_port_connected;
+                       dig_port->connected = intel_tc_port_connected;
                else
-                       intel_dig_port->connected = lpt_digital_port_connected;
+                       dig_port->connected = lpt_digital_port_connected;
        } else if (INTEL_GEN(dev_priv) >= 8) {
                if (port == PORT_A || IS_GEN9_LP(dev_priv))
-                       intel_dig_port->connected = bdw_digital_port_connected;
+                       dig_port->connected = bdw_digital_port_connected;
                else
-                       intel_dig_port->connected = lpt_digital_port_connected;
+                       dig_port->connected = lpt_digital_port_connected;
        } else {
                if (port == PORT_A)
-                       intel_dig_port->connected = hsw_digital_port_connected;
+                       dig_port->connected = hsw_digital_port_connected;
                else
-                       intel_dig_port->connected = lpt_digital_port_connected;
+                       dig_port->connected = lpt_digital_port_connected;
        }
 
-       intel_infoframe_init(intel_dig_port);
+       intel_infoframe_init(dig_port);
 
        return;
 
 err:
        drm_encoder_cleanup(&encoder->base);
-       kfree(intel_dig_port);
+       kfree(dig_port);
 }
index 84e2a17..729ec6e 100644 (file)
@@ -1612,13 +1612,13 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
 }
 
 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
-                        struct intel_digital_port *dport,
+                        struct intel_digital_port *dig_port,
                         unsigned int expected_mask)
 {
        u32 port_mask;
        i915_reg_t dpll_reg;
 
-       switch (dport->base.port) {
+       switch (dig_port->base.port) {
        case PORT_B:
                port_mask = DPLL_PORTB_READY_MASK;
                dpll_reg = DPLL(0);
@@ -1640,7 +1640,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
                                       port_mask, expected_mask, 1000))
                drm_WARN(&dev_priv->drm, 1,
                         "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
-                        dport->base.base.base.id, dport->base.base.name,
+                        dig_port->base.base.base.id, dig_port->base.base.name,
                         intel_de_read(dev_priv, dpll_reg) & port_mask,
                         expected_mask);
 }
@@ -10073,7 +10073,8 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
        drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
                    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
 
-       if (crtc_state->limited_color_range)
+       if (crtc_state->limited_color_range &&
+           !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
                val |= PIPECONF_COLOR_RANGE_SELECT;
 
        if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
@@ -16332,7 +16333,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
         * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
         * port is hooked to pipe B. Hence we want plane A feeding pipe B.
         */
-       if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
+       if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4 &&
+           INTEL_NUM_PIPES(dev_priv) == 2)
                plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
        else
                plane->i9xx_plane = (enum i9xx_plane_id) pipe;
index f68007f..e890c8f 100644 (file)
@@ -542,7 +542,7 @@ void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
 
 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp);
 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
-                        struct intel_digital_port *dport,
+                        struct intel_digital_port *dig_port,
                         unsigned int expected_mask);
 int intel_get_load_detect_pipe(struct drm_connector *connector,
                               struct intel_load_detect_pipe *old,
index d1cb48b..3644752 100644 (file)
@@ -1194,7 +1194,7 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct drm_device *dev = &dev_priv->drm;
        struct intel_encoder *intel_encoder;
-       struct intel_digital_port *intel_dig_port;
+       struct intel_digital_port *dig_port;
        struct drm_connector *connector;
        struct drm_connector_list_iter conn_iter;
 
@@ -1207,14 +1207,14 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
                if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
                        continue;
 
-               intel_dig_port = enc_to_dig_port(intel_encoder);
-               if (!intel_dig_port->dp.can_mst)
+               dig_port = enc_to_dig_port(intel_encoder);
+               if (!dig_port->dp.can_mst)
                        continue;
 
                seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
-                          intel_dig_port->base.base.base.id,
-                          intel_dig_port->base.base.name);
-               drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
+                          dig_port->base.base.base.id,
+                          dig_port->base.base.name);
+               drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr);
        }
        drm_connector_list_iter_end(&conn_iter);
 
index 8a277df..0c713e8 100644 (file)
@@ -1817,8 +1817,8 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(encoder));
-       enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
+       enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
+       enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
 
        mutex_lock(&power_domains->lock);
 
index 4b0aaa3..e8f8091 100644 (file)
@@ -279,10 +279,10 @@ enum check_link_response {
  */
 struct intel_hdcp_shim {
        /* Outputs the transmitter's An and Aksv values to the receiver. */
-       int (*write_an_aksv)(struct intel_digital_port *intel_dig_port, u8 *an);
+       int (*write_an_aksv)(struct intel_digital_port *dig_port, u8 *an);
 
        /* Reads the receiver's key selection vector */
-       int (*read_bksv)(struct intel_digital_port *intel_dig_port, u8 *bksv);
+       int (*read_bksv)(struct intel_digital_port *dig_port, u8 *bksv);
 
        /*
         * Reads BINFO from DP receivers and BSTATUS from HDMI receivers. The
@@ -290,52 +290,52 @@ struct intel_hdcp_shim {
         * different. Call it BSTATUS since that's the name the HDMI spec
         * uses and it was there first.
         */
-       int (*read_bstatus)(struct intel_digital_port *intel_dig_port,
+       int (*read_bstatus)(struct intel_digital_port *dig_port,
                            u8 *bstatus);
 
        /* Determines whether a repeater is present downstream */
-       int (*repeater_present)(struct intel_digital_port *intel_dig_port,
+       int (*repeater_present)(struct intel_digital_port *dig_port,
                                bool *repeater_present);
 
        /* Reads the receiver's Ri' value */
-       int (*read_ri_prime)(struct intel_digital_port *intel_dig_port, u8 *ri);
+       int (*read_ri_prime)(struct intel_digital_port *dig_port, u8 *ri);
 
        /* Determines if the receiver's KSV FIFO is ready for consumption */
-       int (*read_ksv_ready)(struct intel_digital_port *intel_dig_port,
+       int (*read_ksv_ready)(struct intel_digital_port *dig_port,
                              bool *ksv_ready);
 
        /* Reads the ksv fifo for num_downstream devices */
-       int (*read_ksv_fifo)(struct intel_digital_port *intel_dig_port,
+       int (*read_ksv_fifo)(struct intel_digital_port *dig_port,
                             int num_downstream, u8 *ksv_fifo);
 
        /* Reads a 32-bit part of V' from the receiver */
-       int (*read_v_prime_part)(struct intel_digital_port *intel_dig_port,
+       int (*read_v_prime_part)(struct intel_digital_port *dig_port,
                                 int i, u32 *part);
 
        /* Enables HDCP signalling on the port */
-       int (*toggle_signalling)(struct intel_digital_port *intel_dig_port,
+       int (*toggle_signalling)(struct intel_digital_port *dig_port,
                                 bool enable);
 
        /* Ensures the link is still protected */
-       bool (*check_link)(struct intel_digital_port *intel_dig_port);
+       bool (*check_link)(struct intel_digital_port *dig_port);
 
        /* Detects panel's hdcp capability. This is optional for HDMI. */
-       int (*hdcp_capable)(struct intel_digital_port *intel_dig_port,
+       int (*hdcp_capable)(struct intel_digital_port *dig_port,
                            bool *hdcp_capable);
 
        /* HDCP adaptation(DP/HDMI) required on the port */
        enum hdcp_wired_protocol protocol;
 
        /* Detects whether sink is HDCP2.2 capable */
-       int (*hdcp_2_2_capable)(struct intel_digital_port *intel_dig_port,
+       int (*hdcp_2_2_capable)(struct intel_digital_port *dig_port,
                                bool *capable);
 
        /* Write HDCP2.2 messages */
-       int (*write_2_2_msg)(struct intel_digital_port *intel_dig_port,
+       int (*write_2_2_msg)(struct intel_digital_port *dig_port,
                             void *buf, size_t size);
 
        /* Read HDCP2.2 messages */
-       int (*read_2_2_msg)(struct intel_digital_port *intel_dig_port,
+       int (*read_2_2_msg)(struct intel_digital_port *dig_port,
                            u8 msg_id, void *buf, size_t size);
 
        /*
@@ -343,11 +343,11 @@ struct intel_hdcp_shim {
         * type to Receivers. In DP HDCP2.2 Stream type is one of the input to
         * the HDCP2.2 Cipher for En/De-Cryption. Not applicable for HDMI.
         */
-       int (*config_stream_type)(struct intel_digital_port *intel_dig_port,
+       int (*config_stream_type)(struct intel_digital_port *dig_port,
                                  bool is_repeater, u8 type);
 
        /* HDCP2.2 Link Integrity Check */
-       int (*check_2_2_link)(struct intel_digital_port *intel_dig_port);
+       int (*check_2_2_link)(struct intel_digital_port *dig_port);
 };
 
 struct intel_hdcp {
@@ -1434,9 +1434,9 @@ struct intel_dp_mst_encoder {
 };
 
 static inline enum dpio_channel
-vlv_dport_to_channel(struct intel_digital_port *dport)
+vlv_dig_port_to_channel(struct intel_digital_port *dig_port)
 {
-       switch (dport->base.port) {
+       switch (dig_port->base.port) {
        case PORT_B:
        case PORT_D:
                return DPIO_CH0;
@@ -1448,9 +1448,9 @@ vlv_dport_to_channel(struct intel_digital_port *dport)
 }
 
 static inline enum dpio_phy
-vlv_dport_to_phy(struct intel_digital_port *dport)
+vlv_dig_port_to_phy(struct intel_digital_port *dig_port)
 {
-       switch (dport->base.port) {
+       switch (dig_port->base.port) {
        case PORT_B:
        case PORT_C:
                return DPIO_PHY0;
index c9b93c5..d6295eb 100644 (file)
@@ -137,14 +137,12 @@ static const u8 valid_dsc_slicecount[] = {1, 2, 4};
  *
  * If a CPU or PCH DP output is attached to an eDP panel, this function
  * will return true, and false otherwise.
- *
- * This function is not safe to use prior to encoder type being set.
  */
 bool intel_dp_is_edp(struct intel_dp *intel_dp)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 
-       return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
+       return dig_port->base.type == INTEL_OUTPUT_EDP;
 }
 
 static void intel_dp_link_down(struct intel_encoder *encoder,
@@ -218,10 +216,10 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
 /* Theoretical max between source and sink */
 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       int source_max = intel_dig_port->max_lanes;
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       int source_max = dig_port->max_lanes;
        int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
-       int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port);
+       int fia_max = intel_tc_port_fia_max_lane_count(dig_port);
 
        return min3(source_max, sink_max, fia_max);
 }
@@ -253,8 +251,8 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes)
 static int
 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct intel_encoder *encoder = &intel_dig_port->base;
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct intel_encoder *encoder = &dig_port->base;
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        int max_dotclk = dev_priv->max_dotclk_freq;
        int ds_max_dotclk;
@@ -780,7 +778,7 @@ static void
 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        enum pipe pipe = intel_dp->pps_pipe;
        bool pll_enabled, release_cl_override = false;
        enum dpio_phy phy = DPIO_PHY(pipe);
@@ -790,14 +788,14 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
        if (drm_WARN(&dev_priv->drm,
                     intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
                     "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
-                    pipe_name(pipe), intel_dig_port->base.base.base.id,
-                    intel_dig_port->base.base.name))
+                    pipe_name(pipe), dig_port->base.base.base.id,
+                    dig_port->base.base.name))
                return;
 
        drm_dbg_kms(&dev_priv->drm,
                    "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
-                   pipe_name(pipe), intel_dig_port->base.base.base.id,
-                   intel_dig_port->base.base.name);
+                   pipe_name(pipe), dig_port->base.base.base.id,
+                   dig_port->base.base.name);
 
        /* Preserve the BIOS-computed detected bit. This is
         * supposed to be read-only.
@@ -893,7 +891,7 @@ static enum pipe
 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        enum pipe pipe;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
@@ -922,8 +920,8 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
        drm_dbg_kms(&dev_priv->drm,
                    "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
                    pipe_name(intel_dp->pps_pipe),
-                   intel_dig_port->base.base.base.id,
-                   intel_dig_port->base.base.name);
+                   dig_port->base.base.base.id,
+                   dig_port->base.base.name);
 
        /* init power sequencer on this pipe and port */
        intel_dp_init_panel_power_sequencer(intel_dp);
@@ -1011,8 +1009,8 @@ static void
 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       enum port port = intel_dig_port->base.port;
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       enum port port = dig_port->base.port;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
@@ -1033,15 +1031,15 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
        if (intel_dp->pps_pipe == INVALID_PIPE) {
                drm_dbg_kms(&dev_priv->drm,
                            "no initial power sequencer for [ENCODER:%d:%s]\n",
-                           intel_dig_port->base.base.base.id,
-                           intel_dig_port->base.base.name);
+                           dig_port->base.base.base.id,
+                           dig_port->base.base.name);
                return;
        }
 
        drm_dbg_kms(&dev_priv->drm,
                    "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
-                   intel_dig_port->base.base.base.id,
-                   intel_dig_port->base.base.name,
+                   dig_port->base.base.base.id,
+                   dig_port->base.base.name,
                    pipe_name(intel_dp->pps_pipe));
 
        intel_dp_init_panel_power_sequencer(intel_dp);
@@ -1306,9 +1304,9 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
                                int send_bytes,
                                u32 aux_clock_divider)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_i915_private *dev_priv =
-                       to_i915(intel_dig_port->base.base.dev);
+                       to_i915(dig_port->base.base.dev);
        u32 precharge, timeout;
 
        if (IS_GEN(dev_priv, 6))
@@ -1336,10 +1334,10 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
                                int send_bytes,
                                u32 unused)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_i915_private *i915 =
-                       to_i915(intel_dig_port->base.base.dev);
-       enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
+                       to_i915(dig_port->base.base.dev);
+       enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
        u32 ret;
 
        ret = DP_AUX_CH_CTL_SEND_BUSY |
@@ -1353,7 +1351,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
              DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
 
        if (intel_phy_is_tc(i915, phy) &&
-           intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
+           dig_port->tc_mode == TC_PORT_TBT_ALT)
                ret |= DP_AUX_CH_CTL_TBT_IO;
 
        return ret;
@@ -1365,11 +1363,11 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
                  u8 *recv, int recv_size,
                  u32 aux_send_ctl_flags)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_i915_private *i915 =
-                       to_i915(intel_dig_port->base.base.dev);
+                       to_i915(dig_port->base.base.dev);
        struct intel_uncore *uncore = &i915->uncore;
-       enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
+       enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
        bool is_tc_port = intel_phy_is_tc(i915, phy);
        i915_reg_t ch_ctl, ch_data[5];
        u32 aux_clock_divider;
@@ -1386,9 +1384,9 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
                ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
 
        if (is_tc_port)
-               intel_tc_port_lock(intel_dig_port);
+               intel_tc_port_lock(dig_port);
 
-       aux_domain = intel_aux_power_domain(intel_dig_port);
+       aux_domain = intel_aux_power_domain(dig_port);
 
        aux_wakeref = intel_display_power_get(i915, aux_domain);
        pps_wakeref = pps_lock(intel_dp);
@@ -1547,7 +1545,7 @@ out:
        intel_display_power_put_async(i915, aux_domain, aux_wakeref);
 
        if (is_tc_port)
-               intel_tc_port_unlock(intel_dig_port);
+               intel_tc_port_unlock(dig_port);
 
        return ret;
 }
@@ -2893,7 +2891,7 @@ static  u32 ilk_get_pp_control(struct intel_dp *intel_dp)
 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        u32 pp;
        i915_reg_t pp_stat_reg, pp_ctrl_reg;
        bool need_to_disable = !intel_dp->want_panel_vdd;
@@ -2910,11 +2908,11 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
                return need_to_disable;
 
        intel_display_power_get(dev_priv,
-                               intel_aux_power_domain(intel_dig_port));
+                               intel_aux_power_domain(dig_port));
 
        drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
-                   intel_dig_port->base.base.base.id,
-                   intel_dig_port->base.base.name);
+                   dig_port->base.base.base.id,
+                   dig_port->base.base.name);
 
        if (!edp_have_panel_power(intel_dp))
                wait_panel_power_cycle(intel_dp);
@@ -2936,8 +2934,8 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
        if (!edp_have_panel_power(intel_dp)) {
                drm_dbg_kms(&dev_priv->drm,
                            "[ENCODER:%d:%s] panel power wasn't enabled\n",
-                           intel_dig_port->base.base.base.id,
-                           intel_dig_port->base.base.name);
+                           dig_port->base.base.base.id,
+                           dig_port->base.base.name);
                msleep(intel_dp->panel_power_up_delay);
        }
 
@@ -2970,7 +2968,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       struct intel_digital_port *intel_dig_port =
+       struct intel_digital_port *dig_port =
                dp_to_dig_port(intel_dp);
        u32 pp;
        i915_reg_t pp_stat_reg, pp_ctrl_reg;
@@ -2983,8 +2981,8 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
                return;
 
        drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
-                   intel_dig_port->base.base.base.id,
-                   intel_dig_port->base.base.name);
+                   dig_port->base.base.base.id,
+                   dig_port->base.base.name);
 
        pp = ilk_get_pp_control(intel_dp);
        pp &= ~EDP_FORCE_VDD;
@@ -3004,7 +3002,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
                intel_dp->panel_power_off_time = ktime_get_boottime();
 
        intel_display_power_put_unchecked(dev_priv,
-                                         intel_aux_power_domain(intel_dig_port));
+                                         intel_aux_power_domain(dig_port));
 }
 
 static void edp_panel_vdd_work(struct work_struct *__work)
@@ -3835,8 +3833,8 @@ static void g4x_pre_enable_dp(struct intel_atomic_state *state,
 
 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
        enum pipe pipe = intel_dp->pps_pipe;
        i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
 
@@ -3858,8 +3856,8 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
         */
        drm_dbg_kms(&dev_priv->drm,
                    "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
-                   pipe_name(pipe), intel_dig_port->base.base.base.id,
-                   intel_dig_port->base.base.name);
+                   pipe_name(pipe), dig_port->base.base.base.id,
+                   dig_port->base.base.name);
        intel_de_write(dev_priv, pp_on_reg, 0);
        intel_de_posting_read(dev_priv, pp_on_reg);
 
@@ -4925,7 +4923,7 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
                               const struct intel_crtc_state *crtc_state,
                               unsigned int type)
 {
-       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct dp_sdp sdp = {};
        ssize_t len;
@@ -4951,14 +4949,14 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
        if (drm_WARN_ON(&dev_priv->drm, len < 0))
                return;
 
-       intel_dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
+       dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
 }
 
 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
                            const struct intel_crtc_state *crtc_state,
                            struct drm_dp_vsc_sdp *vsc)
 {
-       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct dp_sdp sdp = {};
        ssize_t len;
@@ -4968,7 +4966,7 @@ void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
        if (drm_WARN_ON(&dev_priv->drm, len < 0))
                return;
 
-       intel_dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
+       dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
                                        &sdp, len);
 }
 
@@ -5128,7 +5126,7 @@ static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
                                  struct intel_crtc_state *crtc_state,
                                  struct drm_dp_vsc_sdp *vsc)
 {
-       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        unsigned int type = DP_SDP_VSC;
@@ -5143,7 +5141,7 @@ static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
             intel_hdmi_infoframe_enable(type)) == 0)
                return;
 
-       intel_dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
+       dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
 
        ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));
 
@@ -5155,7 +5153,7 @@ static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encod
                                                     struct intel_crtc_state *crtc_state,
                                                     struct hdmi_drm_infoframe *drm_infoframe)
 {
-       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
        struct dp_sdp sdp = {};
@@ -5165,8 +5163,8 @@ static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encod
            intel_hdmi_infoframe_enable(type)) == 0)
                return;
 
-       intel_dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
-                                      sizeof(sdp));
+       dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
+                                sizeof(sdp));
 
        ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp,
                                                         sizeof(sdp));
@@ -5368,10 +5366,10 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv =
                        to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_dp_phy_test_params *data =
                        &intel_dp->compliance.test_data.phytest;
-       struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+       struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
        enum pipe pipe = crtc->pipe;
        u32 pattern_val;
 
@@ -5433,10 +5431,10 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp)
 static void
 intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+       struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
        enum pipe pipe = crtc->pipe;
        u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
 
@@ -5459,11 +5457,11 @@ intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp)
 static void
 intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, uint8_t lane_cnt)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       enum port port = intel_dig_port->base.port;
-       struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+       enum port port = dig_port->base.port;
+       struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
        enum pipe pipe = crtc->pipe;
        u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
 
@@ -6334,10 +6332,10 @@ intel_dp_connector_unregister(struct drm_connector *connector)
 
 void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
 {
-       struct intel_digital_port *intel_dig_port = enc_to_dig_port(to_intel_encoder(encoder));
-       struct intel_dp *intel_dp = &intel_dig_port->dp;
+       struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
+       struct intel_dp *intel_dp = &dig_port->dp;
 
-       intel_dp_mst_encoder_cleanup(intel_dig_port);
+       intel_dp_mst_encoder_cleanup(dig_port);
        if (intel_dp_is_edp(intel_dp)) {
                intel_wakeref_t wakeref;
 
@@ -6396,11 +6394,11 @@ static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
 }
 
 static
-int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
                                u8 *an)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
-       struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base));
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&dig_port->base.base));
        static const struct drm_dp_aux_msg msg = {
                .request = DP_AUX_NATIVE_WRITE,
                .address = DP_AUX_HDCP_AKSV,
@@ -6411,7 +6409,7 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
        int ret;
 
        /* Output An first, that's easy */
-       dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
+       dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AN,
                                     an, DRM_HDCP_AN_LEN);
        if (dpcd_ret != DRM_HDCP_AN_LEN) {
                drm_dbg_kms(&i915->drm,
@@ -6450,13 +6448,13 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
        return 0;
 }
 
-static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
+static int intel_dp_hdcp_read_bksv(struct intel_digital_port *dig_port,
                                   u8 *bksv)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        ssize_t ret;
 
-       ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
+       ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
                               DRM_HDCP_KSV_LEN);
        if (ret != DRM_HDCP_KSV_LEN) {
                drm_dbg_kms(&i915->drm,
@@ -6466,10 +6464,10 @@ static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
        return 0;
 }
 
-static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
+static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port,
                                      u8 *bstatus)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        ssize_t ret;
 
        /*
@@ -6477,7 +6475,7 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
         * definition by different names. In the HDMI spec, it's called BSTATUS,
         * but in DP it's called BINFO.
         */
-       ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
+       ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BINFO,
                               bstatus, DRM_HDCP_BSTATUS_LEN);
        if (ret != DRM_HDCP_BSTATUS_LEN) {
                drm_dbg_kms(&i915->drm,
@@ -6488,13 +6486,13 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
 }
 
 static
-int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_read_bcaps(struct intel_digital_port *dig_port,
                             u8 *bcaps)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        ssize_t ret;
 
-       ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
+       ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
                               bcaps, 1);
        if (ret != 1) {
                drm_dbg_kms(&i915->drm,
@@ -6506,13 +6504,13 @@ int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
 }
 
 static
-int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port,
                                   bool *repeater_present)
 {
        ssize_t ret;
        u8 bcaps;
 
-       ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
+       ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
        if (ret)
                return ret;
 
@@ -6521,13 +6519,13 @@ int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
 }
 
 static
-int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *dig_port,
                                u8 *ri_prime)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        ssize_t ret;
 
-       ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
+       ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
                               ri_prime, DRM_HDCP_RI_LEN);
        if (ret != DRM_HDCP_RI_LEN) {
                drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n",
@@ -6538,14 +6536,14 @@ int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
 }
 
 static
-int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *dig_port,
                                 bool *ksv_ready)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        ssize_t ret;
        u8 bstatus;
 
-       ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
+       ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
                               &bstatus, 1);
        if (ret != 1) {
                drm_dbg_kms(&i915->drm,
@@ -6557,17 +6555,17 @@ int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
 }
 
 static
-int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port,
                                int num_downstream, u8 *ksv_fifo)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        ssize_t ret;
        int i;
 
        /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
        for (i = 0; i < num_downstream; i += 3) {
                size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
-               ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+               ret = drm_dp_dpcd_read(&dig_port->dp.aux,
                                       DP_AUX_HDCP_KSV_FIFO,
                                       ksv_fifo + i * DRM_HDCP_KSV_LEN,
                                       len);
@@ -6582,16 +6580,16 @@ int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
 }
 
 static
-int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
                                    int i, u32 *part)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        ssize_t ret;
 
        if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
                return -EINVAL;
 
-       ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+       ret = drm_dp_dpcd_read(&dig_port->dp.aux,
                               DP_AUX_HDCP_V_PRIME(i), part,
                               DRM_HDCP_V_PRIME_PART_LEN);
        if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
@@ -6603,7 +6601,7 @@ int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
 }
 
 static
-int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
                                    bool enable)
 {
        /* Not used for single stream DisplayPort setups */
@@ -6611,13 +6609,13 @@ int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
 }
 
 static
-bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
+bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        ssize_t ret;
        u8 bstatus;
 
-       ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
+       ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
                               &bstatus, 1);
        if (ret != 1) {
                drm_dbg_kms(&i915->drm,
@@ -6629,13 +6627,13 @@ bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
 }
 
 static
-int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_capable(struct intel_digital_port *dig_port,
                          bool *hdcp_capable)
 {
        ssize_t ret;
        u8 bcaps;
 
-       ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
+       ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
        if (ret)
                return ret;
 
@@ -6693,13 +6691,13 @@ static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
 };
 
 static int
-intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
+intel_dp_hdcp2_read_rx_status(struct intel_digital_port *dig_port,
                              u8 *rx_status)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        ssize_t ret;
 
-       ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+       ret = drm_dp_dpcd_read(&dig_port->dp.aux,
                               DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
                               HDCP_2_2_DP_RXSTATUS_LEN);
        if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
@@ -6712,14 +6710,14 @@ intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
 }
 
 static
-int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
+int hdcp2_detect_msg_availability(struct intel_digital_port *dig_port,
                                  u8 msg_id, bool *msg_ready)
 {
        u8 rx_status;
        int ret;
 
        *msg_ready = false;
-       ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
+       ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status);
        if (ret < 0)
                return ret;
 
@@ -6745,11 +6743,11 @@ int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
 }
 
 static ssize_t
-intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
+intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *dig_port,
                            const struct hdcp2_dp_msg_data *hdcp2_msg_data)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
-       struct intel_dp *dp = &intel_dig_port->dp;
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       struct intel_dp *dp = &dig_port->dp;
        struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
        u8 msg_id = hdcp2_msg_data->msg_id;
        int ret, timeout;
@@ -6773,7 +6771,7 @@ intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
                 * the timeout at wait for CP_IRQ.
                 */
                intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
-               ret = hdcp2_detect_msg_availability(intel_dig_port,
+               ret = hdcp2_detect_msg_availability(dig_port,
                                                    msg_id, &msg_ready);
                if (!msg_ready)
                        ret = -ETIMEDOUT;
@@ -6799,10 +6797,10 @@ static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
 }
 
 static
-int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port,
                             void *buf, size_t size)
 {
-       struct intel_dp *dp = &intel_dig_port->dp;
+       struct intel_dp *dp = &dig_port->dp;
        struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
        unsigned int offset;
        u8 *byte = buf;
@@ -6825,7 +6823,7 @@ int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
                len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
                                DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
 
-               ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
+               ret = drm_dp_dpcd_write(&dig_port->dp.aux,
                                        offset, (void *)byte, len);
                if (ret < 0)
                        return ret;
@@ -6839,13 +6837,13 @@ int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
 }
 
 static
-ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
+ssize_t get_receiver_id_list_size(struct intel_digital_port *dig_port)
 {
        u8 rx_info[HDCP_2_2_RXINFO_LEN];
        u32 dev_cnt;
        ssize_t ret;
 
-       ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+       ret = drm_dp_dpcd_read(&dig_port->dp.aux,
                               DP_HDCP_2_2_REG_RXINFO_OFFSET,
                               (void *)rx_info, HDCP_2_2_RXINFO_LEN);
        if (ret != HDCP_2_2_RXINFO_LEN)
@@ -6865,10 +6863,10 @@ ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
 }
 
 static
-int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port,
                            u8 msg_id, void *buf, size_t size)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        unsigned int offset;
        u8 *byte = buf;
        ssize_t ret, bytes_to_recv, len;
@@ -6879,12 +6877,12 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
                return -EINVAL;
        offset = hdcp2_msg_data->offset;
 
-       ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
+       ret = intel_dp_hdcp2_wait_for_msg(dig_port, hdcp2_msg_data);
        if (ret < 0)
                return ret;
 
        if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
-               ret = get_receiver_id_list_size(intel_dig_port);
+               ret = get_receiver_id_list_size(dig_port);
                if (ret < 0)
                        return ret;
 
@@ -6899,7 +6897,7 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
                len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
                      DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
 
-               ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
+               ret = drm_dp_dpcd_read(&dig_port->dp.aux, offset,
                                       (void *)byte, len);
                if (ret < 0) {
                        drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n",
@@ -6918,7 +6916,7 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
 }
 
 static
-int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *dig_port,
                                      bool is_repeater, u8 content_type)
 {
        int ret;
@@ -6937,7 +6935,7 @@ int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
        stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
        stream_type_msg.stream_type = content_type;
 
-       ret =  intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
+       ret =  intel_dp_hdcp2_write_msg(dig_port, &stream_type_msg,
                                        sizeof(stream_type_msg));
 
        return ret < 0 ? ret : 0;
@@ -6945,12 +6943,12 @@ int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
 }
 
 static
-int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
+int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port)
 {
        u8 rx_status;
        int ret;
 
-       ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
+       ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status);
        if (ret)
                return ret;
 
@@ -6965,14 +6963,14 @@ int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
 }
 
 static
-int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp2_capable(struct intel_digital_port *dig_port,
                           bool *capable)
 {
        u8 rx_caps[3];
        int ret;
 
        *capable = false;
-       ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+       ret = drm_dp_dpcd_read(&dig_port->dp.aux,
                               DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
                               rx_caps, HDCP_2_2_RXCAPS_LEN);
        if (ret != HDCP_2_2_RXCAPS_LEN)
@@ -7251,12 +7249,12 @@ static bool intel_edp_have_power(struct intel_dp *intel_dp)
 }
 
 enum irqreturn
-intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
+intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
-       struct intel_dp *intel_dp = &intel_dig_port->dp;
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       struct intel_dp *intel_dp = &dig_port->dp;
 
-       if (intel_dig_port->base.type == INTEL_OUTPUT_EDP &&
+       if (dig_port->base.type == INTEL_OUTPUT_EDP &&
            (long_hpd || !intel_edp_have_power(intel_dp))) {
                /*
                 * vdd off can generate a long/short pulse on eDP which
@@ -7267,14 +7265,14 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
                drm_dbg_kms(&i915->drm,
                            "ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
                            long_hpd ? "long" : "short",
-                           intel_dig_port->base.base.base.id,
-                           intel_dig_port->base.base.name);
+                           dig_port->base.base.base.id,
+                           dig_port->base.base.name);
                return IRQ_HANDLED;
        }
 
        drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
-                   intel_dig_port->base.base.base.id,
-                   intel_dig_port->base.base.name,
+                   dig_port->base.base.base.id,
+                   dig_port->base.base.name,
                    long_hpd ? "long" : "short");
 
        if (long_hpd) {
@@ -8137,12 +8135,12 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
 }
 
 bool
-intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+intel_dp_init_connector(struct intel_digital_port *dig_port,
                        struct intel_connector *intel_connector)
 {
        struct drm_connector *connector = &intel_connector->base;
-       struct intel_dp *intel_dp = &intel_dig_port->dp;
-       struct intel_encoder *intel_encoder = &intel_dig_port->base;
+       struct intel_dp *intel_dp = &dig_port->dp;
+       struct intel_encoder *intel_encoder = &dig_port->base;
        struct drm_device *dev = intel_encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_encoder->port;
@@ -8153,12 +8151,14 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        INIT_WORK(&intel_connector->modeset_retry_work,
                  intel_dp_modeset_retry_work_fn);
 
-       if (drm_WARN(dev, intel_dig_port->max_lanes < 1,
+       if (drm_WARN(dev, dig_port->max_lanes < 1,
                     "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
-                    intel_dig_port->max_lanes, intel_encoder->base.base.id,
+                    dig_port->max_lanes, intel_encoder->base.base.id,
                     intel_encoder->base.name))
                return false;
 
+       intel_dp_set_source_rates(intel_dp);
+
        intel_dp->reset_link_params = true;
        intel_dp->pps_pipe = INVALID_PIPE;
        intel_dp->active_pipe = INVALID_PIPE;
@@ -8174,22 +8174,28 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
                 */
                drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
                type = DRM_MODE_CONNECTOR_eDP;
-               intel_encoder->type = INTEL_OUTPUT_EDP;
-
-               /* eDP only on port B and/or C on vlv/chv */
-               if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
-                                     IS_CHERRYVIEW(dev_priv)) &&
-                               port != PORT_B && port != PORT_C))
-                       return false;
        } else {
                type = DRM_MODE_CONNECTOR_DisplayPort;
        }
 
-       intel_dp_set_source_rates(intel_dp);
-
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                intel_dp->active_pipe = vlv_active_pipe(intel_dp);
 
+       /*
+        * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
+        * for DP the encoder type can be set by the caller to
+        * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
+        */
+       if (type == DRM_MODE_CONNECTOR_eDP)
+               intel_encoder->type = INTEL_OUTPUT_EDP;
+
+       /* eDP only on port B and/or C on vlv/chv */
+       if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
+                             IS_CHERRYVIEW(dev_priv)) &&
+                       intel_dp_is_edp(intel_dp) &&
+                       port != PORT_B && port != PORT_C))
+               return false;
+
        drm_dbg_kms(&dev_priv->drm,
                    "Adding %s connector on [ENCODER:%d:%s]\n",
                    type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
@@ -8218,12 +8224,12 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
                intel_connector->get_hw_state = intel_connector_get_hw_state;
 
        /* init MST on ports that can support it */
-       intel_dp_mst_encoder_init(intel_dig_port,
+       intel_dp_mst_encoder_init(dig_port,
                                  intel_connector->base.base.id);
 
        if (!intel_edp_init_connector(intel_dp, intel_connector)) {
                intel_dp_aux_fini(intel_dp);
-               intel_dp_mst_encoder_cleanup(intel_dig_port);
+               intel_dp_mst_encoder_cleanup(dig_port);
                goto fail;
        }
 
@@ -8258,20 +8264,20 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
                   i915_reg_t output_reg,
                   enum port port)
 {
-       struct intel_digital_port *intel_dig_port;
+       struct intel_digital_port *dig_port;
        struct intel_encoder *intel_encoder;
        struct drm_encoder *encoder;
        struct intel_connector *intel_connector;
 
-       intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
-       if (!intel_dig_port)
+       dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+       if (!dig_port)
                return false;
 
        intel_connector = intel_connector_alloc();
        if (!intel_connector)
                goto err_connector_alloc;
 
-       intel_encoder = &intel_dig_port->base;
+       intel_encoder = &dig_port->base;
        encoder = &intel_encoder->base;
 
        if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
@@ -8307,34 +8313,34 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
 
        if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
            (HAS_PCH_CPT(dev_priv) && port != PORT_A))
-               intel_dig_port->dp.set_link_train = cpt_set_link_train;
+               dig_port->dp.set_link_train = cpt_set_link_train;
        else
-               intel_dig_port->dp.set_link_train = g4x_set_link_train;
+               dig_port->dp.set_link_train = g4x_set_link_train;
 
        if (IS_CHERRYVIEW(dev_priv))
-               intel_dig_port->dp.set_signal_levels = chv_set_signal_levels;
+               dig_port->dp.set_signal_levels = chv_set_signal_levels;
        else if (IS_VALLEYVIEW(dev_priv))
-               intel_dig_port->dp.set_signal_levels = vlv_set_signal_levels;
+               dig_port->dp.set_signal_levels = vlv_set_signal_levels;
        else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
-               intel_dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
+               dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
        else if (IS_GEN(dev_priv, 6) && port == PORT_A)
-               intel_dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
+               dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
        else
-               intel_dig_port->dp.set_signal_levels = g4x_set_signal_levels;
+               dig_port->dp.set_signal_levels = g4x_set_signal_levels;
 
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
            (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
-               intel_dig_port->dp.preemph_max = intel_dp_pre_empemph_max_3;
-               intel_dig_port->dp.voltage_max = intel_dp_voltage_max_3;
+               dig_port->dp.preemph_max = intel_dp_pre_empemph_max_3;
+               dig_port->dp.voltage_max = intel_dp_voltage_max_3;
        } else {
-               intel_dig_port->dp.preemph_max = intel_dp_pre_empemph_max_2;
-               intel_dig_port->dp.voltage_max = intel_dp_voltage_max_2;
+               dig_port->dp.preemph_max = intel_dp_pre_empemph_max_2;
+               dig_port->dp.voltage_max = intel_dp_voltage_max_2;
        }
 
-       intel_dig_port->dp.output_reg = output_reg;
-       intel_dig_port->max_lanes = 4;
-       intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
-       intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
+       dig_port->dp.output_reg = output_reg;
+       dig_port->max_lanes = 4;
+       dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
+       dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
 
        intel_encoder->type = INTEL_OUTPUT_DP;
        intel_encoder->power_domain = intel_port_to_power_domain(port);
@@ -8349,25 +8355,25 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
        intel_encoder->cloneable = 0;
        intel_encoder->port = port;
 
-       intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
+       dig_port->hpd_pulse = intel_dp_hpd_pulse;
 
        if (HAS_GMCH(dev_priv)) {
                if (IS_GM45(dev_priv))
-                       intel_dig_port->connected = gm45_digital_port_connected;
+                       dig_port->connected = gm45_digital_port_connected;
                else
-                       intel_dig_port->connected = g4x_digital_port_connected;
+                       dig_port->connected = g4x_digital_port_connected;
        } else {
                if (port == PORT_A)
-                       intel_dig_port->connected = ilk_digital_port_connected;
+                       dig_port->connected = ilk_digital_port_connected;
                else
-                       intel_dig_port->connected = ibx_digital_port_connected;
+                       dig_port->connected = ibx_digital_port_connected;
        }
 
        if (port != PORT_A)
-               intel_infoframe_init(intel_dig_port);
+               intel_infoframe_init(dig_port);
 
-       intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
-       if (!intel_dp_init_connector(intel_dig_port, intel_connector))
+       dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+       if (!intel_dp_init_connector(dig_port, intel_connector))
                goto err_init_connector;
 
        return true;
@@ -8377,7 +8383,7 @@ err_init_connector:
 err_encoder_init:
        kfree(intel_connector);
 err_connector_alloc:
-       kfree(intel_dig_port);
+       kfree(dig_port);
        return false;
 }
 
index 0a8950f..b901ab8 100644 (file)
@@ -40,7 +40,7 @@ bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
                           enum pipe *pipe);
 bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
                   enum port port);
-bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+bool intel_dp_init_connector(struct intel_digital_port *dig_port,
                             struct intel_connector *intel_connector);
 void intel_dp_set_link_params(struct intel_dp *intel_dp,
                              int link_rate, u8 lane_count,
@@ -61,7 +61,7 @@ int intel_dp_compute_config(struct intel_encoder *encoder,
                            struct drm_connector_state *conn_state);
 bool intel_dp_is_edp(struct intel_dp *intel_dp);
 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
-enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
+enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port,
                                  bool long_hpd);
 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
                            const struct drm_connector_state *conn_state);
index 2493142..a23ed72 100644 (file)
@@ -52,6 +52,7 @@ static u8 dp_voltage_max(u8 preemph)
 void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
                               const u8 link_status[DP_LINK_STATUS_SIZE])
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        u8 v = 0;
        u8 p = 0;
        int lane;
@@ -64,12 +65,20 @@ void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
        }
 
        preemph_max = intel_dp->preemph_max(intel_dp);
+       drm_WARN_ON_ONCE(&i915->drm,
+                        preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 &&
+                        preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3);
+
        if (p >= preemph_max)
                p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
 
        v = min(v, dp_voltage_max(p));
 
        voltage_max = intel_dp->voltage_max(intel_dp);
+       drm_WARN_ON_ONCE(&i915->drm,
+                        voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 &&
+                        voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3);
+
        if (v >= voltage_max)
                v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
 
index 8273f2e..a2d91a4 100644 (file)
@@ -342,8 +342,8 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
                                 const struct drm_connector_state *old_conn_state)
 {
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
-       struct intel_digital_port *intel_dig_port = intel_mst->primary;
-       struct intel_dp *intel_dp = &intel_dig_port->dp;
+       struct intel_digital_port *dig_port = intel_mst->primary;
+       struct intel_dp *intel_dp = &dig_port->dp;
        struct intel_connector *connector =
                to_intel_connector(old_conn_state->connector);
        struct drm_i915_private *i915 = to_i915(connector->base.dev);
@@ -369,8 +369,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
                                      const struct drm_connector_state *old_conn_state)
 {
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
-       struct intel_digital_port *intel_dig_port = intel_mst->primary;
-       struct intel_dp *intel_dp = &intel_dig_port->dp;
+       struct intel_digital_port *dig_port = intel_mst->primary;
+       struct intel_dp *intel_dp = &dig_port->dp;
        struct intel_connector *connector =
                to_intel_connector(old_conn_state->connector);
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -421,7 +421,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
         * the transcoder clock select is set to none.
         */
        if (last_mst_stream)
-               intel_dp_set_infoframes(&intel_dig_port->base, false,
+               intel_dp_set_infoframes(&dig_port->base, false,
                                        old_crtc_state, NULL);
        /*
         * From TGL spec: "If multi-stream slave transcoder: Configure
@@ -436,7 +436,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
 
        intel_mst->connector = NULL;
        if (last_mst_stream)
-               intel_dig_port->base.post_disable(state, &intel_dig_port->base,
+               dig_port->base.post_disable(state, &dig_port->base,
                                                  old_crtc_state, NULL);
 
        drm_dbg_kms(&dev_priv->drm, "active links %d\n",
@@ -449,11 +449,11 @@ static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state,
                                        const struct drm_connector_state *conn_state)
 {
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
-       struct intel_digital_port *intel_dig_port = intel_mst->primary;
-       struct intel_dp *intel_dp = &intel_dig_port->dp;
+       struct intel_digital_port *dig_port = intel_mst->primary;
+       struct intel_dp *intel_dp = &dig_port->dp;
 
        if (intel_dp->active_mst_links == 0)
-               intel_dig_port->base.pre_pll_enable(state, &intel_dig_port->base,
+               dig_port->base.pre_pll_enable(state, &dig_port->base,
                                                    pipe_config, NULL);
 }
 
@@ -463,8 +463,8 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
                                    const struct drm_connector_state *conn_state)
 {
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
-       struct intel_digital_port *intel_dig_port = intel_mst->primary;
-       struct intel_dp *intel_dp = &intel_dig_port->dp;
+       struct intel_digital_port *dig_port = intel_mst->primary;
+       struct intel_dp *intel_dp = &dig_port->dp;
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_connector *connector =
                to_intel_connector(conn_state->connector);
@@ -490,7 +490,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
        drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
 
        if (first_mst_stream)
-               intel_dig_port->base.pre_enable(state, &intel_dig_port->base,
+               dig_port->base.pre_enable(state, &dig_port->base,
                                                pipe_config, NULL);
 
        ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
@@ -506,7 +506,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
 
        /*
         * Before Gen 12 this is not done as part of
-        * intel_dig_port->base.pre_enable() and should be done here. For
+        * dig_port->base.pre_enable() and should be done here. For
         * Gen 12+ the step in which this should be done is different for the
         * first MST stream, so it's done on the DDI for the first stream and
         * here for the following ones.
@@ -525,8 +525,8 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
                                const struct drm_connector_state *conn_state)
 {
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
-       struct intel_digital_port *intel_dig_port = intel_mst->primary;
-       struct intel_dp *intel_dp = &intel_dig_port->dp;
+       struct intel_digital_port *dig_port = intel_mst->primary;
+       struct intel_dp *intel_dp = &dig_port->dp;
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        u32 val;
 
@@ -572,9 +572,9 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
                                        struct intel_crtc_state *pipe_config)
 {
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
-       struct intel_digital_port *intel_dig_port = intel_mst->primary;
+       struct intel_digital_port *dig_port = intel_mst->primary;
 
-       intel_ddi_get_config(&intel_dig_port->base, pipe_config);
+       intel_ddi_get_config(&dig_port->base, pipe_config);
 }
 
 static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
@@ -639,39 +639,60 @@ static int intel_dp_mst_get_modes(struct drm_connector *connector)
        return intel_dp_mst_get_ddc_modes(connector);
 }
 
-static enum drm_mode_status
-intel_dp_mst_mode_valid(struct drm_connector *connector,
-                       struct drm_display_mode *mode)
+static int
+intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
+                           struct drm_display_mode *mode,
+                           struct drm_modeset_acquire_ctx *ctx,
+                           enum drm_mode_status *status)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->dev);
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct intel_dp *intel_dp = intel_connector->mst_port;
+       struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
+       struct drm_dp_mst_port *port = intel_connector->port;
+       const int min_bpp = 18;
        int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
        int max_rate, mode_rate, max_lanes, max_link_clock;
+       int ret;
 
-       if (drm_connector_is_unregistered(connector))
-               return MODE_ERROR;
+       if (drm_connector_is_unregistered(connector)) {
+               *status = MODE_ERROR;
+               return 0;
+       }
 
-       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
-               return MODE_NO_DBLESCAN;
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+               *status = MODE_NO_DBLESCAN;
+               return 0;
+       }
 
        max_link_clock = intel_dp_max_link_rate(intel_dp);
        max_lanes = intel_dp_max_lane_count(intel_dp);
 
        max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
-       mode_rate = intel_dp_link_required(mode->clock, 18);
+       mode_rate = intel_dp_link_required(mode->clock, min_bpp);
 
-       /* TODO - validate mode against available PBN for link */
-       if (mode->clock < 10000)
-               return MODE_CLOCK_LOW;
+       ret = drm_modeset_lock(&mgr->base.lock, ctx);
+       if (ret)
+               return ret;
 
-       if (mode->flags & DRM_MODE_FLAG_DBLCLK)
-               return MODE_H_ILLEGAL;
+       if (mode_rate > max_rate || mode->clock > max_dotclk ||
+           drm_dp_calc_pbn_mode(mode->clock, min_bpp, false) > port->full_pbn) {
+               *status = MODE_CLOCK_HIGH;
+               return 0;
+       }
+
+       if (mode->clock < 10000) {
+               *status = MODE_CLOCK_LOW;
+               return 0;
+       }
 
-       if (mode_rate > max_rate || mode->clock > max_dotclk)
-               return MODE_CLOCK_HIGH;
+       if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
+               *status = MODE_H_ILLEGAL;
+               return 0;
+       }
 
-       return intel_mode_valid_max_plane_size(dev_priv, mode);
+       *status = intel_mode_valid_max_plane_size(dev_priv, mode);
+       return 0;
 }
 
 static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
@@ -700,7 +721,7 @@ intel_dp_mst_detect(struct drm_connector *connector,
 
 static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
        .get_modes = intel_dp_mst_get_modes,
-       .mode_valid = intel_dp_mst_mode_valid,
+       .mode_valid_ctx = intel_dp_mst_mode_valid_ctx,
        .atomic_best_encoder = intel_mst_atomic_best_encoder,
        .atomic_check = intel_dp_mst_atomic_check,
        .detect_ctx = intel_dp_mst_detect,
@@ -732,8 +753,8 @@ static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
 static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *pathprop)
 {
        struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_connector *intel_connector;
        struct drm_connector *connector;
@@ -808,11 +829,11 @@ static const struct drm_dp_mst_topology_cbs mst_cbs = {
 };
 
 static struct intel_dp_mst_encoder *
-intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum pipe pipe)
+intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe pipe)
 {
        struct intel_dp_mst_encoder *intel_mst;
        struct intel_encoder *intel_encoder;
-       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_device *dev = dig_port->base.base.dev;
 
        intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL);
 
@@ -821,14 +842,14 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
 
        intel_mst->pipe = pipe;
        intel_encoder = &intel_mst->base;
-       intel_mst->primary = intel_dig_port;
+       intel_mst->primary = dig_port;
 
        drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
                         DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
 
        intel_encoder->type = INTEL_OUTPUT_DP_MST;
-       intel_encoder->power_domain = intel_dig_port->base.power_domain;
-       intel_encoder->port = intel_dig_port->base.port;
+       intel_encoder->power_domain = dig_port->base.power_domain;
+       intel_encoder->port = dig_port->base.port;
        intel_encoder->cloneable = 0;
        /*
         * This is wrong, but broken userspace uses the intersection
@@ -855,29 +876,29 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
 }
 
 static bool
-intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
+intel_dp_create_fake_mst_encoders(struct intel_digital_port *dig_port)
 {
-       struct intel_dp *intel_dp = &intel_dig_port->dp;
-       struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+       struct intel_dp *intel_dp = &dig_port->dp;
+       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
        enum pipe pipe;
 
        for_each_pipe(dev_priv, pipe)
-               intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(intel_dig_port, pipe);
+               intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(dig_port, pipe);
        return true;
 }
 
 int
-intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port)
+intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port)
 {
-       return intel_dig_port->dp.active_mst_links;
+       return dig_port->dp.active_mst_links;
 }
 
 int
-intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
+intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
-       struct intel_dp *intel_dp = &intel_dig_port->dp;
-       enum port port = intel_dig_port->base.port;
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       struct intel_dp *intel_dp = &dig_port->dp;
+       enum port port = dig_port->base.port;
        int ret;
 
        if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
@@ -892,7 +913,7 @@ intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_ba
        intel_dp->mst_mgr.cbs = &mst_cbs;
 
        /* create encoders */
-       intel_dp_create_fake_mst_encoders(intel_dig_port);
+       intel_dp_create_fake_mst_encoders(dig_port);
        ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
                                           &intel_dp->aux, 16, 3, conn_base_id);
        if (ret)
@@ -904,9 +925,9 @@ intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_ba
 }
 
 void
-intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port)
+intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port)
 {
-       struct intel_dp *intel_dp = &intel_dig_port->dp;
+       struct intel_dp *intel_dp = &dig_port->dp;
 
        if (!intel_dp->can_mst)
                return;
index 854724f..6afda4e 100644 (file)
@@ -11,9 +11,9 @@
 struct intel_digital_port;
 struct intel_crtc_state;
 
-int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
-void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
-int intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port);
+int intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_id);
+void intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port);
+int intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port);
 bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state);
 bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
 
index 399a7ed..7910522 100644 (file)
@@ -650,9 +650,9 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
                              bool uniq_trans_scale)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct intel_digital_port *dport = enc_to_dig_port(encoder);
+       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
-       enum dpio_channel ch = vlv_dport_to_channel(dport);
+       enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
        enum pipe pipe = intel_crtc->pipe;
        u32 val;
        int i;
@@ -746,7 +746,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
                              bool reset)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
+       enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
        enum pipe pipe = crtc->pipe;
        u32 val;
@@ -789,10 +789,10 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
 void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
                            const struct intel_crtc_state *crtc_state)
 {
-       struct intel_digital_port *dport = enc_to_dig_port(encoder);
+       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-       enum dpio_channel ch = vlv_dport_to_channel(dport);
+       enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
        enum pipe pipe = crtc->pipe;
        unsigned int lane_mask =
                intel_dp_unused_lane_mask(crtc_state->lane_count);
@@ -803,7 +803,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
         * Otherwise we can't even access the PLL.
         */
        if (ch == DPIO_CH0 && pipe == PIPE_B)
-               dport->release_cl2_override =
+               dig_port->release_cl2_override =
                        !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
 
        chv_phy_powergate_lanes(encoder, true, lane_mask);
@@ -870,10 +870,10 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
                                const struct intel_crtc_state *crtc_state)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-       struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-       enum dpio_channel ch = vlv_dport_to_channel(dport);
+       enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
        enum pipe pipe = crtc->pipe;
        int data, i, stagger;
        u32 val;
@@ -948,12 +948,12 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
 
 void chv_phy_release_cl2_override(struct intel_encoder *encoder)
 {
-       struct intel_digital_port *dport = enc_to_dig_port(encoder);
+       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-       if (dport->release_cl2_override) {
+       if (dig_port->release_cl2_override) {
                chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
-               dport->release_cl2_override = false;
+               dig_port->release_cl2_override = false;
        }
 }
 
@@ -997,8 +997,8 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
-       struct intel_digital_port *dport = enc_to_dig_port(encoder);
-       enum dpio_channel port = vlv_dport_to_channel(dport);
+       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+       enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
        enum pipe pipe = intel_crtc->pipe;
 
        vlv_dpio_get(dev_priv);
@@ -1022,10 +1022,10 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
 void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
                            const struct intel_crtc_state *crtc_state)
 {
-       struct intel_digital_port *dport = enc_to_dig_port(encoder);
+       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-       enum dpio_channel port = vlv_dport_to_channel(dport);
+       enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
        enum pipe pipe = crtc->pipe;
 
        /* Program Tx lane resets to default */
@@ -1052,10 +1052,10 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
                                const struct intel_crtc_state *crtc_state)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-       struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-       enum dpio_channel port = vlv_dport_to_channel(dport);
+       enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
        enum pipe pipe = crtc->pipe;
        u32 val;
 
@@ -1081,10 +1081,10 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
 void vlv_phy_reset_lanes(struct intel_encoder *encoder,
                         const struct intel_crtc_state *old_crtc_state)
 {
-       struct intel_digital_port *dport = enc_to_dig_port(encoder);
+       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
-       enum dpio_channel port = vlv_dport_to_channel(dport);
+       enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
        enum pipe pipe = crtc->pipe;
 
        vlv_dpio_get(dev_priv);
index 5cd0903..307ed8a 100644 (file)
@@ -324,6 +324,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
        struct drm_i915_private *dev_priv = to_i915(connector->dev);
        const struct drm_display_mode *fixed_mode =
                to_intel_connector(connector)->panel.fixed_mode;
+       int num_modes;
 
        /*
         * We should probably have an i2c driver get_modes function for those
@@ -331,21 +332,22 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
         * (TV-out, for example), but for now with just TMDS and LVDS,
         * that's not the case.
         */
-       intel_ddc_get_modes(connector,
-                           intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPC));
-       if (!list_empty(&connector->probed_modes))
-               return 1;
+       num_modes = intel_ddc_get_modes(connector,
+                                       intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPC));
+       if (num_modes)
+               return num_modes;
 
        if (fixed_mode) {
                struct drm_display_mode *mode;
+
                mode = drm_mode_duplicate(connector->dev, fixed_mode);
                if (mode) {
                        drm_mode_probed_add(connector, mode);
-                       return 1;
+                       num_modes++;
                }
        }
 
-       return 0;
+       return num_modes;
 }
 
 static const struct drm_connector_funcs intel_dvo_connector_funcs = {
index 69a0682..85723fb 100644 (file)
@@ -187,8 +187,30 @@ static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
        return intel_de_read(dev_priv, DPFC_CONTROL) & DPFC_CTL_EN;
 }
 
+static void i8xx_fbc_recompress(struct drm_i915_private *dev_priv)
+{
+       struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
+       enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane;
+
+       spin_lock_irq(&dev_priv->uncore.lock);
+       intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
+                         intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane)));
+       spin_unlock_irq(&dev_priv->uncore.lock);
+}
+
+static void i965_fbc_recompress(struct drm_i915_private *dev_priv)
+{
+       struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
+       enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane;
+
+       spin_lock_irq(&dev_priv->uncore.lock);
+       intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
+                         intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane)));
+       spin_unlock_irq(&dev_priv->uncore.lock);
+}
+
 /* This function forces a CFB recompression through the nuke operation. */
-static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
+static void snb_fbc_recompress(struct drm_i915_private *dev_priv)
 {
        struct intel_fbc *fbc = &dev_priv->fbc;
 
@@ -198,6 +220,16 @@ static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
        intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE);
 }
 
+static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
+{
+       if (INTEL_GEN(dev_priv) >= 6)
+               snb_fbc_recompress(dev_priv);
+       else if (INTEL_GEN(dev_priv) >= 4)
+               i965_fbc_recompress(dev_priv);
+       else
+               i8xx_fbc_recompress(dev_priv);
+}
+
 static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
 {
        struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
@@ -315,21 +347,6 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
        if (dev_priv->fbc.false_color)
                dpfc_ctl |= FBC_CTL_FALSE_COLOR;
 
-       if (IS_IVYBRIDGE(dev_priv)) {
-               /* WaFbcAsynchFlipDisableFbcQueue:ivb */
-               intel_de_write(dev_priv, ILK_DISPLAY_CHICKEN1,
-                              intel_de_read(dev_priv, ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS);
-       } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-               /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
-               intel_de_write(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe),
-                              intel_de_read(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe)) | HSW_FBCQ_DIS);
-       }
-
-       if (INTEL_GEN(dev_priv) >= 11)
-               /* Wa_1409120013:icl,ehl,tgl */
-               intel_de_write(dev_priv, ILK_DPFC_CHICKEN,
-                              ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
-
        intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
        intel_fbc_recompress(dev_priv);
@@ -695,9 +712,13 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
        cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode;
 
        cache->fb.format = fb->format;
-       cache->fb.stride = fb->pitches[0];
        cache->fb.modifier = fb->modifier;
 
+       /* FIXME is this correct? */
+       cache->fb.stride = plane_state->color_plane[0].stride;
+       if (drm_rotation_90_or_270(plane_state->hw.rotation))
+               cache->fb.stride *= fb->format->cpp[0];
+
        /* FBC1 compression interval: arbitrary choice of 1 second */
        cache->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode);
 
@@ -721,6 +742,25 @@ static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
                fbc->compressed_fb.size * fbc->threshold;
 }
 
+static u16 intel_fbc_gen9_wa_cfb_stride(struct drm_i915_private *dev_priv)
+{
+       struct intel_fbc *fbc = &dev_priv->fbc;
+       struct intel_fbc_state_cache *cache = &fbc->state_cache;
+
+       if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) &&
+           cache->fb.modifier != I915_FORMAT_MOD_X_TILED)
+               return DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8;
+       else
+               return 0;
+}
+
+static bool intel_fbc_gen9_wa_cfb_stride_changed(struct drm_i915_private *dev_priv)
+{
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
+       return fbc->params.gen9_wa_cfb_stride != intel_fbc_gen9_wa_cfb_stride(dev_priv);
+}
+
 static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
 {
        struct intel_fbc *fbc = &dev_priv->fbc;
@@ -797,6 +837,11 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
                return false;
        }
 
+       if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) {
+               fbc->no_fbc_reason = "pixel format is invalid";
+               return false;
+       }
+
        if (!rotation_is_valid(dev_priv, cache->fb.format->format,
                               cache->plane.rotation)) {
                fbc->no_fbc_reason = "rotation unsupported";
@@ -813,11 +858,6 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
                return false;
        }
 
-       if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) {
-               fbc->no_fbc_reason = "pixel format is invalid";
-               return false;
-       }
-
        if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
            cache->fb.format->has_alpha) {
                fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC";
@@ -881,6 +921,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
        params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
 
        params->fb.format = cache->fb.format;
+       params->fb.modifier = cache->fb.modifier;
        params->fb.stride = cache->fb.stride;
 
        params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
@@ -910,6 +951,9 @@ static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state)
        if (params->fb.format != cache->fb.format)
                return false;
 
+       if (params->fb.modifier != cache->fb.modifier)
+               return false;
+
        if (params->fb.stride != cache->fb.stride)
                return false;
 
@@ -1197,7 +1241,8 @@ void intel_fbc_enable(struct intel_atomic_state *state,
 
        if (fbc->crtc) {
                if (fbc->crtc != crtc ||
-                   !intel_fbc_cfb_size_changed(dev_priv))
+                   (!intel_fbc_cfb_size_changed(dev_priv) &&
+                    !intel_fbc_gen9_wa_cfb_stride_changed(dev_priv)))
                        goto out;
 
                __intel_fbc_disable(dev_priv);
@@ -1219,12 +1264,7 @@ void intel_fbc_enable(struct intel_atomic_state *state,
                goto out;
        }
 
-       if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) &&
-           plane_state->hw.fb->modifier != I915_FORMAT_MOD_X_TILED)
-               cache->gen9_wa_cfb_stride =
-                       DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8;
-       else
-               cache->gen9_wa_cfb_stride = 0;
+       cache->gen9_wa_cfb_stride = intel_fbc_gen9_wa_cfb_stride(dev_priv);
 
        drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n",
                    pipe_name(crtc->pipe));
index 815b054..89a4d29 100644 (file)
@@ -40,15 +40,15 @@ bool intel_hdcp_is_ksv_valid(u8 *ksv)
 }
 
 static
-int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
+int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
                               const struct intel_hdcp_shim *shim, u8 *bksv)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        int ret, i, tries = 2;
 
        /* HDCP spec states that we must retry the bksv if it is invalid */
        for (i = 0; i < tries; i++) {
-               ret = shim->read_bksv(intel_dig_port, bksv);
+               ret = shim->read_bksv(dig_port, bksv);
                if (ret)
                        return ret;
                if (intel_hdcp_is_ksv_valid(bksv))
@@ -65,7 +65,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
 /* Is HDCP1.4 capable on Platform and Sink */
 bool intel_hdcp_capable(struct intel_connector *connector)
 {
-       struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        const struct intel_hdcp_shim *shim = connector->hdcp.shim;
        bool capable = false;
        u8 bksv[5];
@@ -74,9 +74,9 @@ bool intel_hdcp_capable(struct intel_connector *connector)
                return capable;
 
        if (shim->hdcp_capable) {
-               shim->hdcp_capable(intel_dig_port, &capable);
+               shim->hdcp_capable(dig_port, &capable);
        } else {
-               if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv))
+               if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
                        capable = true;
        }
 
@@ -86,7 +86,7 @@ bool intel_hdcp_capable(struct intel_connector *connector)
 /* Is HDCP2.2 capable on Platform and Sink */
 bool intel_hdcp2_capable(struct intel_connector *connector)
 {
-       struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        struct intel_hdcp *hdcp = &connector->hdcp;
        bool capable = false;
@@ -104,7 +104,7 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
        mutex_unlock(&dev_priv->hdcp_comp_mutex);
 
        /* Sink's capability for HDCP2.2 */
-       hdcp->shim->hdcp_2_2_capable(intel_dig_port, &capable);
+       hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
 
        return capable;
 }
@@ -125,14 +125,14 @@ static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
               LINK_ENCRYPTION_STATUS;
 }
 
-static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
+static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
                                    const struct intel_hdcp_shim *shim)
 {
        int ret, read_ret;
        bool ksv_ready;
 
        /* Poll for ksv list ready (spec says max time allowed is 5s) */
-       ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port,
+       ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
                                                         &ksv_ready),
                         read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
                         100 * 1000);
@@ -300,16 +300,16 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
                                const struct intel_hdcp_shim *shim,
                                u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
 {
-       struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
-       enum port port = intel_dig_port->base.port;
+       enum port port = dig_port->base.port;
        u32 vprime, sha_text, sha_leftovers, rep_ctl;
        int ret, i, j, sha_idx;
 
        /* Process V' values from the receiver */
        for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
-               ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
+               ret = shim->read_v_prime_part(dig_port, i, &vprime);
                if (ret)
                        return ret;
                intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
@@ -528,20 +528,20 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
 static
 int intel_hdcp_auth_downstream(struct intel_connector *connector)
 {
-       struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        const struct intel_hdcp_shim *shim = connector->hdcp.shim;
        u8 bstatus[2], num_downstream, *ksv_fifo;
        int ret, i, tries = 3;
 
-       ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
+       ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
        if (ret) {
                drm_dbg_kms(&dev_priv->drm,
                            "KSV list failed to become ready (%d)\n", ret);
                return ret;
        }
 
-       ret = shim->read_bstatus(intel_dig_port, bstatus);
+       ret = shim->read_bstatus(dig_port, bstatus);
        if (ret)
                return ret;
 
@@ -571,12 +571,12 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
                return -ENOMEM;
        }
 
-       ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
+       ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
        if (ret)
                goto err;
 
        if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
-                                       num_downstream)) {
+                                       num_downstream) > 0) {
                drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
                ret = -EPERM;
                goto err;
@@ -611,12 +611,12 @@ err:
 /* Implements Part 1 of the HDCP authorization procedure */
 static int intel_hdcp_auth(struct intel_connector *connector)
 {
-       struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        struct intel_hdcp *hdcp = &connector->hdcp;
        const struct intel_hdcp_shim *shim = hdcp->shim;
        enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
-       enum port port = intel_dig_port->base.port;
+       enum port port = dig_port->base.port;
        unsigned long r0_prime_gen_start;
        int ret, i, tries = 2;
        union {
@@ -640,7 +640,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
         * displays, this is not necessary.
         */
        if (shim->hdcp_capable) {
-               ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable);
+               ret = shim->hdcp_capable(dig_port, &hdcp_capable);
                if (ret)
                        return ret;
                if (!hdcp_capable) {
@@ -670,7 +670,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
                                  HDCP_ANLO(dev_priv, cpu_transcoder, port));
        an.reg[1] = intel_de_read(dev_priv,
                                  HDCP_ANHI(dev_priv, cpu_transcoder, port));
-       ret = shim->write_an_aksv(intel_dig_port, an.shim);
+       ret = shim->write_an_aksv(dig_port, an.shim);
        if (ret)
                return ret;
 
@@ -678,11 +678,11 @@ static int intel_hdcp_auth(struct intel_connector *connector)
 
        memset(&bksv, 0, sizeof(bksv));
 
-       ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim);
+       ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
        if (ret < 0)
                return ret;
 
-       if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1)) {
+       if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1) > 0) {
                drm_err(&dev_priv->drm, "BKSV is revoked\n");
                return -EPERM;
        }
@@ -692,14 +692,14 @@ static int intel_hdcp_auth(struct intel_connector *connector)
        intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
                       bksv.reg[1]);
 
-       ret = shim->repeater_present(intel_dig_port, &repeater_present);
+       ret = shim->repeater_present(dig_port, &repeater_present);
        if (ret)
                return ret;
        if (repeater_present)
                intel_de_write(dev_priv, HDCP_REP_CTL,
                               intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
 
-       ret = shim->toggle_signalling(intel_dig_port, true);
+       ret = shim->toggle_signalling(dig_port, true);
        if (ret)
                return ret;
 
@@ -732,7 +732,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
         */
        for (i = 0; i < tries; i++) {
                ri.reg = 0;
-               ret = shim->read_ri_prime(intel_dig_port, ri.shim);
+               ret = shim->read_ri_prime(dig_port, ri.shim);
                if (ret)
                        return ret;
                intel_de_write(dev_priv,
@@ -776,10 +776,10 @@ static int intel_hdcp_auth(struct intel_connector *connector)
 
 static int _intel_hdcp_disable(struct intel_connector *connector)
 {
-       struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        struct intel_hdcp *hdcp = &connector->hdcp;
-       enum port port = intel_dig_port->base.port;
+       enum port port = dig_port->base.port;
        enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
        int ret;
 
@@ -796,7 +796,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
                return -ETIMEDOUT;
        }
 
-       ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
+       ret = hdcp->shim->toggle_signalling(dig_port, false);
        if (ret) {
                drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
                return ret;
@@ -859,10 +859,10 @@ static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
 /* Implements Part 3 of the HDCP authorization procedure */
 static int intel_hdcp_check_link(struct intel_connector *connector)
 {
-       struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        struct intel_hdcp *hdcp = &connector->hdcp;
-       enum port port = intel_dig_port->base.port;
+       enum port port = dig_port->base.port;
        enum transcoder cpu_transcoder;
        int ret = 0;
 
@@ -888,7 +888,7 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
                goto out;
        }
 
-       if (hdcp->shim->check_link(intel_dig_port)) {
+       if (hdcp->shim->check_link(dig_port)) {
                if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
                        hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
                        schedule_work(&hdcp->prop_work);
@@ -1242,7 +1242,7 @@ static int hdcp2_deauthenticate_port(struct intel_connector *connector)
 /* Authentication flow starts from here */
 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
 {
-       struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        struct intel_hdcp *hdcp = &connector->hdcp;
        union {
@@ -1264,12 +1264,12 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
        if (ret < 0)
                return ret;
 
-       ret = shim->write_2_2_msg(intel_dig_port, &msgs.ake_init,
+       ret = shim->write_2_2_msg(dig_port, &msgs.ake_init,
                                  sizeof(msgs.ake_init));
        if (ret < 0)
                return ret;
 
-       ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_CERT,
+       ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT,
                                 &msgs.send_cert, sizeof(msgs.send_cert));
        if (ret < 0)
                return ret;
@@ -1283,7 +1283,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
 
        if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
                                        msgs.send_cert.cert_rx.receiver_id,
-                                       1)) {
+                                       1) > 0) {
                drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
                return -EPERM;
        }
@@ -1298,11 +1298,11 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
        if (ret < 0)
                return ret;
 
-       ret = shim->write_2_2_msg(intel_dig_port, &msgs.no_stored_km, size);
+       ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size);
        if (ret < 0)
                return ret;
 
-       ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_HPRIME,
+       ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME,
                                 &msgs.send_hprime, sizeof(msgs.send_hprime));
        if (ret < 0)
                return ret;
@@ -1313,7 +1313,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
 
        if (!hdcp->is_paired) {
                /* Pairing is required */
-               ret = shim->read_2_2_msg(intel_dig_port,
+               ret = shim->read_2_2_msg(dig_port,
                                         HDCP_2_2_AKE_SEND_PAIRING_INFO,
                                         &msgs.pairing_info,
                                         sizeof(msgs.pairing_info));
@@ -1331,7 +1331,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
 
 static int hdcp2_locality_check(struct intel_connector *connector)
 {
-       struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct intel_hdcp *hdcp = &connector->hdcp;
        union {
                struct hdcp2_lc_init lc_init;
@@ -1345,12 +1345,12 @@ static int hdcp2_locality_check(struct intel_connector *connector)
                if (ret < 0)
                        continue;
 
-               ret = shim->write_2_2_msg(intel_dig_port, &msgs.lc_init,
+               ret = shim->write_2_2_msg(dig_port, &msgs.lc_init,
                                      sizeof(msgs.lc_init));
                if (ret < 0)
                        continue;
 
-               ret = shim->read_2_2_msg(intel_dig_port,
+               ret = shim->read_2_2_msg(dig_port,
                                         HDCP_2_2_LC_SEND_LPRIME,
                                         &msgs.send_lprime,
                                         sizeof(msgs.send_lprime));
@@ -1367,7 +1367,7 @@ static int hdcp2_locality_check(struct intel_connector *connector)
 
 static int hdcp2_session_key_exchange(struct intel_connector *connector)
 {
-       struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct intel_hdcp *hdcp = &connector->hdcp;
        struct hdcp2_ske_send_eks send_eks;
        int ret;
@@ -1376,7 +1376,7 @@ static int hdcp2_session_key_exchange(struct intel_connector *connector)
        if (ret < 0)
                return ret;
 
-       ret = hdcp->shim->write_2_2_msg(intel_dig_port, &send_eks,
+       ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks,
                                        sizeof(send_eks));
        if (ret < 0)
                return ret;
@@ -1387,7 +1387,7 @@ static int hdcp2_session_key_exchange(struct intel_connector *connector)
 static
 int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
 {
-       struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct drm_i915_private *i915 = to_i915(connector->base.dev);
        struct intel_hdcp *hdcp = &connector->hdcp;
        union {
@@ -1409,12 +1409,12 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
        msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
 
        /* Send it to Repeater */
-       ret = shim->write_2_2_msg(intel_dig_port, &msgs.stream_manage,
+       ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
                                  sizeof(msgs.stream_manage));
        if (ret < 0)
                return ret;
 
-       ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_STREAM_READY,
+       ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY,
                                 &msgs.stream_ready, sizeof(msgs.stream_ready));
        if (ret < 0)
                return ret;
@@ -1439,7 +1439,7 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
 static
 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
 {
-       struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        struct intel_hdcp *hdcp = &connector->hdcp;
        union {
@@ -1451,7 +1451,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
        u8 *rx_info;
        int ret;
 
-       ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
+       ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
                                 &msgs.recvid_list, sizeof(msgs.recvid_list));
        if (ret < 0)
                return ret;
@@ -1484,7 +1484,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
                      HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
        if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
                                        msgs.recvid_list.receiver_ids,
-                                       device_cnt)) {
+                                       device_cnt) > 0) {
                drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
                return -EPERM;
        }
@@ -1496,7 +1496,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
                return ret;
 
        hdcp->seq_num_v = seq_num_v;
-       ret = shim->write_2_2_msg(intel_dig_port, &msgs.rep_ack,
+       ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack,
                                  sizeof(msgs.rep_ack));
        if (ret < 0)
                return ret;
@@ -1517,7 +1517,7 @@ static int hdcp2_authenticate_repeater(struct intel_connector *connector)
 
 static int hdcp2_authenticate_sink(struct intel_connector *connector)
 {
-       struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct drm_i915_private *i915 = to_i915(connector->base.dev);
        struct intel_hdcp *hdcp = &connector->hdcp;
        const struct intel_hdcp_shim *shim = hdcp->shim;
@@ -1543,7 +1543,7 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
        }
 
        if (shim->config_stream_type) {
-               ret = shim->config_stream_type(intel_dig_port,
+               ret = shim->config_stream_type(dig_port,
                                               hdcp->is_repeater,
                                               hdcp->content_type);
                if (ret < 0)
@@ -1569,10 +1569,10 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
 
 static int hdcp2_enable_encryption(struct intel_connector *connector)
 {
-       struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        struct intel_hdcp *hdcp = &connector->hdcp;
-       enum port port = intel_dig_port->base.port;
+       enum port port = dig_port->base.port;
        enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
        int ret;
 
@@ -1580,7 +1580,7 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
                    intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
                    LINK_ENCRYPTION_STATUS);
        if (hdcp->shim->toggle_signalling) {
-               ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
+               ret = hdcp->shim->toggle_signalling(dig_port, true);
                if (ret) {
                        drm_err(&dev_priv->drm,
                                "Failed to enable HDCP signalling. %d\n",
@@ -1608,10 +1608,10 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
 
 static int hdcp2_disable_encryption(struct intel_connector *connector)
 {
-       struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        struct intel_hdcp *hdcp = &connector->hdcp;
-       enum port port = intel_dig_port->base.port;
+       enum port port = dig_port->base.port;
        enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
        int ret;
 
@@ -1630,7 +1630,7 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
                drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
 
        if (hdcp->shim->toggle_signalling) {
-               ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
+               ret = hdcp->shim->toggle_signalling(dig_port, false);
                if (ret) {
                        drm_err(&dev_priv->drm,
                                "Failed to disable HDCP signalling. %d\n",
@@ -1723,10 +1723,10 @@ static int _intel_hdcp2_disable(struct intel_connector *connector)
 /* Implements the Link Integrity Check for HDCP2.2 */
 static int intel_hdcp2_check_link(struct intel_connector *connector)
 {
-       struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        struct intel_hdcp *hdcp = &connector->hdcp;
-       enum port port = intel_dig_port->base.port;
+       enum port port = dig_port->base.port;
        enum transcoder cpu_transcoder;
        int ret = 0;
 
@@ -1751,7 +1751,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
                goto out;
        }
 
-       ret = hdcp->shim->check_2_2_link(intel_dig_port);
+       ret = hdcp->shim->check_2_2_link(dig_port);
        if (ret == HDCP_LINK_PROTECTED) {
                if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
                        hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
@@ -2086,6 +2086,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
                (conn_state->hdcp_content_type != hdcp->content_type &&
                 conn_state->content_protection !=
                 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
+       bool desired_and_not_enabled = false;
 
        /*
         * During the HDCP encryption session if Type change is requested,
@@ -2108,8 +2109,15 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
        }
 
        if (conn_state->content_protection ==
-           DRM_MODE_CONTENT_PROTECTION_DESIRED ||
-           content_protection_type_changed)
+           DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+               mutex_lock(&hdcp->mutex);
+               /* Avoid enabling hdcp, if it already ENABLED */
+               desired_and_not_enabled =
+                       hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
+               mutex_unlock(&hdcp->mutex);
+       }
+
+       if (desired_and_not_enabled || content_protection_type_changed)
                intel_hdcp_enable(connector,
                                  crtc_state->cpu_transcoder,
                                  (u8)conn_state->hdcp_content_type);
@@ -2158,6 +2166,19 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
                return;
        }
 
+       crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+                                                  new_state->crtc);
+       /*
+        * Fix the HDCP uapi content protection state in case of modeset.
+        * FIXME: As per HDCP content protection property uapi doc, an uevent()
+        * need to be sent if there is transition from ENABLED->DESIRED.
+        */
+       if (drm_atomic_crtc_needs_modeset(crtc_state) &&
+           (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+           new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
+               new_state->content_protection =
+                       DRM_MODE_CONTENT_PROTECTION_DESIRED;
+
        /*
         * Nothing to do if the state didn't change, or HDCP was activated since
         * the last commit. And also no change in hdcp content type.
@@ -2170,8 +2191,6 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
                        return;
        }
 
-       crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
-                                                  new_state->crtc);
        crtc_state->mode_changed = true;
 }
 
index 864a164..de2ce56 100644 (file)
@@ -88,10 +88,10 @@ assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv,
 
 struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder)
 {
-       struct intel_digital_port *intel_dig_port =
+       struct intel_digital_port *dig_port =
                container_of(&encoder->base, struct intel_digital_port,
                             base.base);
-       return &intel_dig_port->hdmi;
+       return &dig_port->hdmi;
 }
 
 static struct intel_hdmi *intel_attached_hdmi(struct intel_connector *connector)
@@ -660,7 +660,7 @@ static void intel_write_infoframe(struct intel_encoder *encoder,
                                  enum hdmi_infoframe_type type,
                                  const union hdmi_infoframe *frame)
 {
-       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
        u8 buffer[VIDEO_DIP_DATA_SIZE];
        ssize_t len;
 
@@ -681,7 +681,7 @@ static void intel_write_infoframe(struct intel_encoder *encoder,
        buffer[3] = 0;
        len++;
 
-       intel_dig_port->write_infoframe(encoder, crtc_state, type, buffer, len);
+       dig_port->write_infoframe(encoder, crtc_state, type, buffer, len);
 }
 
 void intel_read_infoframe(struct intel_encoder *encoder,
@@ -689,7 +689,7 @@ void intel_read_infoframe(struct intel_encoder *encoder,
                          enum hdmi_infoframe_type type,
                          union hdmi_infoframe *frame)
 {
-       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
        u8 buffer[VIDEO_DIP_DATA_SIZE];
        int ret;
 
@@ -697,7 +697,7 @@ void intel_read_infoframe(struct intel_encoder *encoder,
             intel_hdmi_infoframe_enable(type)) == 0)
                return;
 
-       intel_dig_port->read_infoframe(encoder, crtc_state,
+       dig_port->read_infoframe(encoder, crtc_state,
                                       type, buffer, sizeof(buffer));
 
        /* Fill the 'hole' (see big comment above) at position 3 */
@@ -872,8 +872,8 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
                               const struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
-       struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+       struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
        i915_reg_t reg = VIDEO_DIP_CTL;
        u32 val = intel_de_read(dev_priv, reg);
        u32 port = VIDEO_DIP_PORT(encoder->port);
@@ -1057,8 +1057,8 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
-       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
-       struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+       struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
        i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
        u32 val = intel_de_read(dev_priv, reg);
        u32 port = VIDEO_DIP_PORT(encoder->port);
@@ -1275,11 +1275,11 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
                                         adapter, enable);
 }
 
-static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port,
+static int intel_hdmi_hdcp_read(struct intel_digital_port *dig_port,
                                unsigned int offset, void *buffer, size_t size)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
-       struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       struct intel_hdmi *hdmi = &dig_port->hdmi;
        struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
                                                              hdmi->ddc_bus);
        int ret;
@@ -1304,11 +1304,11 @@ static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port,
        return ret >= 0 ? -EIO : ret;
 }
 
-static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
+static int intel_hdmi_hdcp_write(struct intel_digital_port *dig_port,
                                 unsigned int offset, void *buffer, size_t size)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
-       struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       struct intel_hdmi *hdmi = &dig_port->hdmi;
        struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
                                                              hdmi->ddc_bus);
        int ret;
@@ -1338,16 +1338,16 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
 }
 
 static
-int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
                                  u8 *an)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
-       struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       struct intel_hdmi *hdmi = &dig_port->hdmi;
        struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
                                                              hdmi->ddc_bus);
        int ret;
 
-       ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an,
+       ret = intel_hdmi_hdcp_write(dig_port, DRM_HDCP_DDC_AN, an,
                                    DRM_HDCP_AN_LEN);
        if (ret) {
                drm_dbg_kms(&i915->drm, "Write An over DDC failed (%d)\n",
@@ -1363,13 +1363,13 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
        return 0;
 }
 
-static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
+static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *dig_port,
                                     u8 *bksv)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 
        int ret;
-       ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv,
+       ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BKSV, bksv,
                                   DRM_HDCP_KSV_LEN);
        if (ret)
                drm_dbg_kms(&i915->drm, "Read Bksv over DDC failed (%d)\n",
@@ -1378,13 +1378,13 @@ static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
 }
 
 static
-int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *dig_port,
                                 u8 *bstatus)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 
        int ret;
-       ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS,
+       ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BSTATUS,
                                   bstatus, DRM_HDCP_BSTATUS_LEN);
        if (ret)
                drm_dbg_kms(&i915->drm, "Read bstatus over DDC failed (%d)\n",
@@ -1393,14 +1393,14 @@ int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
 }
 
 static
-int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *dig_port,
                                     bool *repeater_present)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        int ret;
        u8 val;
 
-       ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
+       ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
        if (ret) {
                drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
                            ret);
@@ -1411,13 +1411,13 @@ int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
 }
 
 static
-int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *dig_port,
                                  u8 *ri_prime)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
 
        int ret;
-       ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME,
+       ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_RI_PRIME,
                                   ri_prime, DRM_HDCP_RI_LEN);
        if (ret)
                drm_dbg_kms(&i915->drm, "Read Ri' over DDC failed (%d)\n",
@@ -1426,14 +1426,14 @@ int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
 }
 
 static
-int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *dig_port,
                                   bool *ksv_ready)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        int ret;
        u8 val;
 
-       ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
+       ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
        if (ret) {
                drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
                            ret);
@@ -1444,12 +1444,12 @@ int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
 }
 
 static
-int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port,
                                  int num_downstream, u8 *ksv_fifo)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        int ret;
-       ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO,
+       ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_KSV_FIFO,
                                   ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN);
        if (ret) {
                drm_dbg_kms(&i915->drm,
@@ -1460,16 +1460,16 @@ int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
 }
 
 static
-int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
                                      int i, u32 *part)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        int ret;
 
        if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
                return -EINVAL;
 
-       ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i),
+       ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_V_PRIME(i),
                                   part, DRM_HDCP_V_PRIME_PART_LEN);
        if (ret)
                drm_dbg_kms(&i915->drm, "Read V'[%d] over DDC failed (%d)\n",
@@ -1480,7 +1480,7 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
 static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-       struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct drm_crtc *crtc = connector->base.state->crtc;
        struct intel_crtc *intel_crtc = container_of(crtc,
                                                     struct intel_crtc, base);
@@ -1494,13 +1494,13 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
                usleep_range(25, 50);
        }
 
-       ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, false);
+       ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, false);
        if (ret) {
                drm_err(&dev_priv->drm,
                        "Disable HDCP signalling failed (%d)\n", ret);
                return ret;
        }
-       ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, true);
+       ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, true);
        if (ret) {
                drm_err(&dev_priv->drm,
                        "Enable HDCP signalling failed (%d)\n", ret);
@@ -1511,10 +1511,10 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
 }
 
 static
-int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
                                      bool enable)
 {
-       struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+       struct intel_hdmi *hdmi = &dig_port->hdmi;
        struct intel_connector *connector = hdmi->attached_connector;
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        int ret;
@@ -1522,7 +1522,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
        if (!enable)
                usleep_range(6, 60); /* Bspec says >= 6us */
 
-       ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, enable);
+       ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, enable);
        if (ret) {
                drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n",
                        enable ? "Enable" : "Disable", ret);
@@ -1540,12 +1540,12 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
 }
 
 static
-bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *intel_dig_port)
+bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        struct intel_connector *connector =
-               intel_dig_port->hdmi.attached_connector;
-       enum port port = intel_dig_port->base.port;
+               dig_port->hdmi.attached_connector;
+       enum port port = dig_port->base.port;
        enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
        int ret;
        union {
@@ -1553,7 +1553,7 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *intel_dig_port)
                u8 shim[DRM_HDCP_RI_LEN];
        } ri;
 
-       ret = intel_hdmi_hdcp_read_ri_prime(intel_dig_port, ri.shim);
+       ret = intel_hdmi_hdcp_read_ri_prime(dig_port, ri.shim);
        if (ret)
                return false;
 
@@ -1572,13 +1572,13 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *intel_dig_port)
 }
 
 static
-bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
+bool intel_hdmi_hdcp_check_link(struct intel_digital_port *dig_port)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        int retry;
 
        for (retry = 0; retry < 3; retry++)
-               if (intel_hdmi_hdcp_check_link_once(intel_dig_port))
+               if (intel_hdmi_hdcp_check_link_once(dig_port))
                        return true;
 
        drm_err(&i915->drm, "Link check failed\n");
@@ -1599,10 +1599,10 @@ static const struct hdcp2_hdmi_msg_timeout hdcp2_msg_timeout[] = {
 };
 
 static
-int intel_hdmi_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp2_read_rx_status(struct intel_digital_port *dig_port,
                                    u8 *rx_status)
 {
-       return intel_hdmi_hdcp_read(intel_dig_port,
+       return intel_hdmi_hdcp_read(dig_port,
                                    HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET,
                                    rx_status,
                                    HDCP_2_2_HDMI_RXSTATUS_LEN);
@@ -1628,15 +1628,15 @@ static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired)
 }
 
 static int
-hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
+hdcp2_detect_msg_availability(struct intel_digital_port *dig_port,
                              u8 msg_id, bool *msg_ready,
                              ssize_t *msg_sz)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
        int ret;
 
-       ret = intel_hdmi_hdcp2_read_rx_status(intel_dig_port, rx_status);
+       ret = intel_hdmi_hdcp2_read_rx_status(dig_port, rx_status);
        if (ret < 0) {
                drm_dbg_kms(&i915->drm, "rx_status read failed. Err %d\n",
                            ret);
@@ -1656,10 +1656,10 @@ hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
 }
 
 static ssize_t
-intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
+intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *dig_port,
                              u8 msg_id, bool paired)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        bool msg_ready = false;
        int timeout, ret;
        ssize_t msg_sz = 0;
@@ -1668,7 +1668,7 @@ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
        if (timeout < 0)
                return timeout;
 
-       ret = __wait_for(ret = hdcp2_detect_msg_availability(intel_dig_port,
+       ret = __wait_for(ret = hdcp2_detect_msg_availability(dig_port,
                                                             msg_id, &msg_ready,
                                                             &msg_sz),
                         !ret && msg_ready && msg_sz, timeout * 1000,
@@ -1681,26 +1681,26 @@ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
 }
 
 static
-int intel_hdmi_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp2_write_msg(struct intel_digital_port *dig_port,
                               void *buf, size_t size)
 {
        unsigned int offset;
 
        offset = HDCP_2_2_HDMI_REG_WR_MSG_OFFSET;
-       return intel_hdmi_hdcp_write(intel_dig_port, offset, buf, size);
+       return intel_hdmi_hdcp_write(dig_port, offset, buf, size);
 }
 
 static
-int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *dig_port,
                              u8 msg_id, void *buf, size_t size)
 {
-       struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
-       struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       struct intel_hdmi *hdmi = &dig_port->hdmi;
        struct intel_hdcp *hdcp = &hdmi->attached_connector->hdcp;
        unsigned int offset;
        ssize_t ret;
 
-       ret = intel_hdmi_hdcp2_wait_for_msg(intel_dig_port, msg_id,
+       ret = intel_hdmi_hdcp2_wait_for_msg(dig_port, msg_id,
                                            hdcp->is_paired);
        if (ret < 0)
                return ret;
@@ -1717,7 +1717,7 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
        }
 
        offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET;
-       ret = intel_hdmi_hdcp_read(intel_dig_port, offset, buf, ret);
+       ret = intel_hdmi_hdcp_read(dig_port, offset, buf, ret);
        if (ret)
                drm_dbg_kms(&i915->drm, "Failed to read msg_id: %d(%zd)\n",
                            msg_id, ret);
@@ -1726,12 +1726,12 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
 }
 
 static
-int intel_hdmi_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
+int intel_hdmi_hdcp2_check_link(struct intel_digital_port *dig_port)
 {
        u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
        int ret;
 
-       ret = intel_hdmi_hdcp2_read_rx_status(intel_dig_port, rx_status);
+       ret = intel_hdmi_hdcp2_read_rx_status(dig_port, rx_status);
        if (ret)
                return ret;
 
@@ -1748,14 +1748,14 @@ int intel_hdmi_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
 }
 
 static
-int intel_hdmi_hdcp2_capable(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp2_capable(struct intel_digital_port *dig_port,
                             bool *capable)
 {
        u8 hdcp2_version;
        int ret;
 
        *capable = false;
-       ret = intel_hdmi_hdcp_read(intel_dig_port, HDCP_2_2_HDMI_REG_VER_OFFSET,
+       ret = intel_hdmi_hdcp_read(dig_port, HDCP_2_2_HDMI_REG_VER_OFFSET,
                                   &hdcp2_version, sizeof(hdcp2_version));
        if (!ret && hdcp2_version & HDCP_2_2_HDMI_SUPPORT_MASK)
                *capable = true;
@@ -2063,7 +2063,7 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
-       struct intel_digital_port *intel_dig_port =
+       struct intel_digital_port *dig_port =
                hdmi_to_dig_port(intel_hdmi);
        struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
        u32 temp;
@@ -2107,7 +2107,7 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
                intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
        }
 
-       intel_dig_port->set_infoframes(encoder,
+       dig_port->set_infoframes(encoder,
                                       false,
                                       old_crtc_state, old_conn_state);
 
@@ -2242,8 +2242,11 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
        if (clock > max_dotclk)
                return MODE_CLOCK_HIGH;
 
-       if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+       if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
+               if (!has_hdmi_sink)
+                       return MODE_CLOCK_LOW;
                clock *= 2;
+       }
 
        if (drm_mode_is_420_only(&connector->display_info, mode))
                clock /= 2;
@@ -2428,8 +2431,8 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
        return 0;
 }
 
-static bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
-                                          const struct drm_connector_state *conn_state)
+bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
+                                   const struct drm_connector_state *conn_state)
 {
        const struct intel_digital_connector_state *intel_conn_state =
                to_intel_digital_connector_state(conn_state);
@@ -2722,12 +2725,12 @@ static void intel_hdmi_pre_enable(struct intel_atomic_state *state,
                                  const struct intel_crtc_state *pipe_config,
                                  const struct drm_connector_state *conn_state)
 {
-       struct intel_digital_port *intel_dig_port =
+       struct intel_digital_port *dig_port =
                enc_to_dig_port(encoder);
 
        intel_hdmi_prepare(encoder, pipe_config);
 
-       intel_dig_port->set_infoframes(encoder,
+       dig_port->set_infoframes(encoder,
                                       pipe_config->has_infoframe,
                                       pipe_config, conn_state);
 }
@@ -2737,7 +2740,7 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
                                const struct intel_crtc_state *pipe_config,
                                const struct drm_connector_state *conn_state)
 {
-       struct intel_digital_port *dport = enc_to_dig_port(encoder);
+       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
        vlv_phy_pre_encoder_enable(encoder, pipe_config);
@@ -2746,13 +2749,13 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
        vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
                                 0x2b247878);
 
-       dport->set_infoframes(encoder,
+       dig_port->set_infoframes(encoder,
                              pipe_config->has_infoframe,
                              pipe_config, conn_state);
 
        g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
 
-       vlv_wait_port_ready(dev_priv, dport, 0x0);
+       vlv_wait_port_ready(dev_priv, dig_port, 0x0);
 }
 
 static void vlv_hdmi_pre_pll_enable(struct intel_atomic_state *state,
@@ -2813,7 +2816,7 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
                                const struct intel_crtc_state *pipe_config,
                                const struct drm_connector_state *conn_state)
 {
-       struct intel_digital_port *dport = enc_to_dig_port(encoder);
+       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
 
@@ -2823,13 +2826,13 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
        /* Use 800mV-0dB */
        chv_set_phy_signal_level(encoder, 128, 102, false);
 
-       dport->set_infoframes(encoder,
+       dig_port->set_infoframes(encoder,
                              pipe_config->has_infoframe,
                              pipe_config, conn_state);
 
        g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
 
-       vlv_wait_port_ready(dev_priv, dport, 0x0);
+       vlv_wait_port_ready(dev_priv, dig_port, 0x0);
 
        /* Second common lane will stay alive on its own now */
        chv_phy_release_cl2_override(encoder);
@@ -2880,19 +2883,13 @@ intel_hdmi_connector_register(struct drm_connector *connector)
        return ret;
 }
 
-static void intel_hdmi_destroy(struct drm_connector *connector)
+static void intel_hdmi_connector_unregister(struct drm_connector *connector)
 {
        struct cec_notifier *n = intel_attached_hdmi(to_intel_connector(connector))->cec_notifier;
 
        cec_notifier_conn_unregister(n);
 
-       intel_connector_destroy(connector);
-}
-
-static void intel_hdmi_connector_unregister(struct drm_connector *connector)
-{
        intel_hdmi_remove_i2c_symlink(connector);
-
        intel_connector_unregister(connector);
 }
 
@@ -2904,7 +2901,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
        .atomic_set_property = intel_digital_connector_atomic_set_property,
        .late_register = intel_hdmi_connector_register,
        .early_unregister = intel_hdmi_connector_unregister,
-       .destroy = intel_hdmi_destroy,
+       .destroy = intel_connector_destroy,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
        .atomic_duplicate_state = intel_digital_connector_duplicate_state,
 };
@@ -2923,7 +2920,7 @@ static void
 intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->dev);
-       struct intel_digital_port *intel_dig_port =
+       struct intel_digital_port *dig_port =
                                hdmi_to_dig_port(intel_hdmi);
 
        intel_attach_force_audio_property(connector);
@@ -2935,7 +2932,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
         * ToDo: This needs to be extended for LSPCON implementation
         * as well. Will be implemented separately.
         */
-       if (!intel_dig_port->lspcon.active)
+       if (!dig_port->lspcon.active)
                intel_attach_colorspace_property(connector);
 
        drm_connector_attach_content_type_property(connector);
@@ -3172,52 +3169,52 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
        return ddc_pin;
 }
 
-void intel_infoframe_init(struct intel_digital_port *intel_dig_port)
+void intel_infoframe_init(struct intel_digital_port *dig_port)
 {
        struct drm_i915_private *dev_priv =
-               to_i915(intel_dig_port->base.base.dev);
+               to_i915(dig_port->base.base.dev);
 
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-               intel_dig_port->write_infoframe = vlv_write_infoframe;
-               intel_dig_port->read_infoframe = vlv_read_infoframe;
-               intel_dig_port->set_infoframes = vlv_set_infoframes;
-               intel_dig_port->infoframes_enabled = vlv_infoframes_enabled;
+               dig_port->write_infoframe = vlv_write_infoframe;
+               dig_port->read_infoframe = vlv_read_infoframe;
+               dig_port->set_infoframes = vlv_set_infoframes;
+               dig_port->infoframes_enabled = vlv_infoframes_enabled;
        } else if (IS_G4X(dev_priv)) {
-               intel_dig_port->write_infoframe = g4x_write_infoframe;
-               intel_dig_port->read_infoframe = g4x_read_infoframe;
-               intel_dig_port->set_infoframes = g4x_set_infoframes;
-               intel_dig_port->infoframes_enabled = g4x_infoframes_enabled;
+               dig_port->write_infoframe = g4x_write_infoframe;
+               dig_port->read_infoframe = g4x_read_infoframe;
+               dig_port->set_infoframes = g4x_set_infoframes;
+               dig_port->infoframes_enabled = g4x_infoframes_enabled;
        } else if (HAS_DDI(dev_priv)) {
-               if (intel_dig_port->lspcon.active) {
-                       intel_dig_port->write_infoframe = lspcon_write_infoframe;
-                       intel_dig_port->read_infoframe = lspcon_read_infoframe;
-                       intel_dig_port->set_infoframes = lspcon_set_infoframes;
-                       intel_dig_port->infoframes_enabled = lspcon_infoframes_enabled;
+               if (dig_port->lspcon.active) {
+                       dig_port->write_infoframe = lspcon_write_infoframe;
+                       dig_port->read_infoframe = lspcon_read_infoframe;
+                       dig_port->set_infoframes = lspcon_set_infoframes;
+                       dig_port->infoframes_enabled = lspcon_infoframes_enabled;
                } else {
-                       intel_dig_port->write_infoframe = hsw_write_infoframe;
-                       intel_dig_port->read_infoframe = hsw_read_infoframe;
-                       intel_dig_port->set_infoframes = hsw_set_infoframes;
-                       intel_dig_port->infoframes_enabled = hsw_infoframes_enabled;
+                       dig_port->write_infoframe = hsw_write_infoframe;
+                       dig_port->read_infoframe = hsw_read_infoframe;
+                       dig_port->set_infoframes = hsw_set_infoframes;
+                       dig_port->infoframes_enabled = hsw_infoframes_enabled;
                }
        } else if (HAS_PCH_IBX(dev_priv)) {
-               intel_dig_port->write_infoframe = ibx_write_infoframe;
-               intel_dig_port->read_infoframe = ibx_read_infoframe;
-               intel_dig_port->set_infoframes = ibx_set_infoframes;
-               intel_dig_port->infoframes_enabled = ibx_infoframes_enabled;
+               dig_port->write_infoframe = ibx_write_infoframe;
+               dig_port->read_infoframe = ibx_read_infoframe;
+               dig_port->set_infoframes = ibx_set_infoframes;
+               dig_port->infoframes_enabled = ibx_infoframes_enabled;
        } else {
-               intel_dig_port->write_infoframe = cpt_write_infoframe;
-               intel_dig_port->read_infoframe = cpt_read_infoframe;
-               intel_dig_port->set_infoframes = cpt_set_infoframes;
-               intel_dig_port->infoframes_enabled = cpt_infoframes_enabled;
+               dig_port->write_infoframe = cpt_write_infoframe;
+               dig_port->read_infoframe = cpt_read_infoframe;
+               dig_port->set_infoframes = cpt_set_infoframes;
+               dig_port->infoframes_enabled = cpt_infoframes_enabled;
        }
 }
 
-void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
                               struct intel_connector *intel_connector)
 {
        struct drm_connector *connector = &intel_connector->base;
-       struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
-       struct intel_encoder *intel_encoder = &intel_dig_port->base;
+       struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
+       struct intel_encoder *intel_encoder = &dig_port->base;
        struct drm_device *dev = intel_encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct i2c_adapter *ddc;
@@ -3231,9 +3228,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
        if (INTEL_GEN(dev_priv) < 12 && drm_WARN_ON(dev, port == PORT_A))
                return;
 
-       if (drm_WARN(dev, intel_dig_port->max_lanes < 4,
+       if (drm_WARN(dev, dig_port->max_lanes < 4,
                     "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
-                    intel_dig_port->max_lanes, intel_encoder->base.base.id,
+                    dig_port->max_lanes, intel_encoder->base.base.id,
                     intel_encoder->base.name))
                return;
 
@@ -3322,21 +3319,21 @@ intel_hdmi_hotplug(struct intel_encoder *encoder,
 void intel_hdmi_init(struct drm_i915_private *dev_priv,
                     i915_reg_t hdmi_reg, enum port port)
 {
-       struct intel_digital_port *intel_dig_port;
+       struct intel_digital_port *dig_port;
        struct intel_encoder *intel_encoder;
        struct intel_connector *intel_connector;
 
-       intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
-       if (!intel_dig_port)
+       dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+       if (!dig_port)
                return;
 
        intel_connector = intel_connector_alloc();
        if (!intel_connector) {
-               kfree(intel_dig_port);
+               kfree(dig_port);
                return;
        }
 
-       intel_encoder = &intel_dig_port->base;
+       intel_encoder = &dig_port->base;
 
        drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
                         &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
@@ -3393,12 +3390,12 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
        if (IS_G4X(dev_priv))
                intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
 
-       intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
-       intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
-       intel_dig_port->max_lanes = 4;
+       dig_port->hdmi.hdmi_reg = hdmi_reg;
+       dig_port->dp.output_reg = INVALID_MMIO_REG;
+       dig_port->max_lanes = 4;
 
-       intel_infoframe_init(intel_dig_port);
+       intel_infoframe_init(dig_port);
 
-       intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
-       intel_hdmi_init_connector(intel_dig_port, intel_connector);
+       dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+       intel_hdmi_init_connector(dig_port, intel_connector);
 }
index 8ff1f76..5b348dc 100644 (file)
@@ -25,7 +25,7 @@ enum port;
 
 void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
                     enum port port);
-void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
                               struct intel_connector *intel_connector);
 struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder);
 int intel_hdmi_compute_config(struct intel_encoder *encoder,
@@ -36,7 +36,7 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
                                       bool high_tmds_clock_ratio,
                                       bool scrambling);
 void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
-void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
+void intel_infoframe_init(struct intel_digital_port *dig_port);
 u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
                                  const struct intel_crtc_state *crtc_state);
 u32 intel_hdmi_infoframe_enable(unsigned int type);
@@ -46,5 +46,7 @@ void intel_read_infoframe(struct intel_encoder *encoder,
                          const struct intel_crtc_state *crtc_state,
                          enum hdmi_infoframe_type type,
                          union hdmi_infoframe *frame);
+bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
+                                   const struct drm_connector_state *conn_state);
 
 #endif /* __INTEL_HDMI_H__ */
index 6ff7b22..b781bf4 100644 (file)
@@ -550,11 +550,11 @@ void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon)
        lspcon_wait_mode(lspcon, DRM_LSPCON_MODE_PCON);
 }
 
-bool lspcon_init(struct intel_digital_port *intel_dig_port)
+bool lspcon_init(struct intel_digital_port *dig_port)
 {
-       struct intel_dp *dp = &intel_dig_port->dp;
-       struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
-       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct intel_dp *dp = &dig_port->dp;
+       struct intel_lspcon *lspcon = &dig_port->lspcon;
+       struct drm_device *dev = dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_connector *connector = &dp->attached_connector->base;
 
index 37cfddf..1cffe8a 100644 (file)
@@ -15,7 +15,7 @@ struct intel_digital_port;
 struct intel_encoder;
 struct intel_lspcon;
 
-bool lspcon_init(struct intel_digital_port *intel_dig_port);
+bool lspcon_init(struct intel_digital_port *dig_port);
 void lspcon_resume(struct intel_lspcon *lspcon);
 void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
 void lspcon_write_infoframe(struct intel_encoder *encoder,
index 611cb8d..bf9e320 100644 (file)
@@ -905,8 +905,8 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
                                    const struct drm_connector_state *conn_state)
 {
        struct intel_dp *intel_dp = dev_priv->psr.dp;
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct intel_encoder *encoder = &intel_dig_port->base;
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct intel_encoder *encoder = &dig_port->base;
        u32 val;
 
        drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled);
index 773523d..2da4388 100644 (file)
@@ -94,6 +94,8 @@ struct intel_sdvo {
         */
        struct intel_sdvo_caps caps;
 
+       u8 colorimetry_cap;
+
        /* Pixel clock limitations reported by the SDVO device, in kHz */
        int pixel_clock_min, pixel_clock_max;
 
@@ -942,6 +944,13 @@ static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
        return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
 }
 
+static bool intel_sdvo_set_pixel_replication(struct intel_sdvo *intel_sdvo,
+                                            u8 pixel_repeat)
+{
+       return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_PIXEL_REPLI,
+                                   &pixel_repeat, 1);
+}
+
 static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo,
                                       u8 audio_state)
 {
@@ -1277,6 +1286,18 @@ static bool intel_has_hdmi_sink(struct intel_sdvo *sdvo,
                READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI;
 }
 
+static bool intel_sdvo_limited_color_range(struct intel_encoder *encoder,
+                                          const struct intel_crtc_state *crtc_state,
+                                          const struct drm_connector_state *conn_state)
+{
+       struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+
+       if ((intel_sdvo->colorimetry_cap & SDVO_COLORIMETRY_RGB220) == 0)
+               return false;
+
+       return intel_hdmi_limited_color_range(crtc_state, conn_state);
+}
+
 static int intel_sdvo_compute_config(struct intel_encoder *encoder,
                                     struct intel_crtc_state *pipe_config,
                                     struct drm_connector_state *conn_state)
@@ -1342,21 +1363,9 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
                                intel_sdvo_state->base.force_audio == HDMI_AUDIO_ON;
        }
 
-       if (intel_sdvo_state->base.broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
-               /*
-                * See CEA-861-E - 5.1 Default Encoding Parameters
-                *
-                * FIXME: This bit is only valid when using TMDS encoding and 8
-                * bit per color mode.
-                */
-               if (pipe_config->has_hdmi_sink &&
-                   drm_match_cea_mode(adjusted_mode) > 1)
-                       pipe_config->limited_color_range = true;
-       } else {
-               if (pipe_config->has_hdmi_sink &&
-                   intel_sdvo_state->base.broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED)
-                       pipe_config->limited_color_range = true;
-       }
+       pipe_config->limited_color_range =
+               intel_sdvo_limited_color_range(encoder, pipe_config,
+                                              conn_state);
 
        /* Clock computation needs to happen after pixel multiplier. */
        if (IS_TV(intel_sdvo_connector))
@@ -1495,8 +1504,13 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
        if (crtc_state->has_hdmi_sink) {
                intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
                intel_sdvo_set_colorimetry(intel_sdvo,
+                                          crtc_state->limited_color_range ?
+                                          SDVO_COLORIMETRY_RGB220 :
                                           SDVO_COLORIMETRY_RGB256);
                intel_sdvo_set_avi_infoframe(intel_sdvo, crtc_state);
+               intel_sdvo_set_pixel_replication(intel_sdvo,
+                                                !!(adjusted_mode->flags &
+                                                   DRM_MODE_FLAG_DBLCLK));
        } else
                intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
 
@@ -1530,8 +1544,6 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
                /* The real mode polarity is set by the SDVO commands, using
                 * struct intel_sdvo_dtd. */
                sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
-               if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
-                       sdvox |= HDMI_COLOR_RANGE_16_235;
                if (INTEL_GEN(dev_priv) < 5)
                        sdvox |= SDVO_BORDER_ENABLE;
        } else {
@@ -1689,8 +1701,11 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
                 "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
                 pipe_config->pixel_multiplier, encoder_pixel_multiplier);
 
-       if (sdvox & HDMI_COLOR_RANGE_16_235)
-               pipe_config->limited_color_range = true;
+       if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_COLORIMETRY,
+                                &val, 1)) {
+               if (val == SDVO_COLORIMETRY_RGB220)
+                       pipe_config->limited_color_range = true;
+       }
 
        if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT,
                                 &val, 1)) {
@@ -1850,17 +1865,26 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
        struct intel_sdvo_connector *intel_sdvo_connector =
                to_intel_sdvo_connector(connector);
        int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+       bool has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo, connector->state);
+       int clock = mode->clock;
 
        if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
                return MODE_NO_DBLESCAN;
 
-       if (intel_sdvo->pixel_clock_min > mode->clock)
-               return MODE_CLOCK_LOW;
 
-       if (intel_sdvo->pixel_clock_max < mode->clock)
+       if (clock > max_dotclk)
                return MODE_CLOCK_HIGH;
 
-       if (mode->clock > max_dotclk)
+       if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
+               if (!has_hdmi_sink)
+                       return MODE_CLOCK_LOW;
+               clock *= 2;
+       }
+
+       if (intel_sdvo->pixel_clock_min > clock)
+               return MODE_CLOCK_LOW;
+
+       if (intel_sdvo->pixel_clock_max < clock)
                return MODE_CLOCK_HIGH;
 
        if (IS_LVDS(intel_sdvo_connector)) {
@@ -1914,6 +1938,17 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
        return true;
 }
 
+static u8 intel_sdvo_get_colorimetry_cap(struct intel_sdvo *intel_sdvo)
+{
+       u8 cap;
+
+       if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_COLORIMETRY_CAP,
+                                 &cap, sizeof(cap)))
+               return SDVO_COLORIMETRY_RGB256;
+
+       return cap;
+}
+
 static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
 {
        struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
@@ -2100,8 +2135,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
        return ret;
 }
 
-static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
+static int intel_sdvo_get_ddc_modes(struct drm_connector *connector)
 {
+       int num_modes = 0;
        struct edid *edid;
 
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
@@ -2116,18 +2152,19 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
         * DDC fails, check to see if the analog output is disconnected, in
         * which case we'll look there for the digital DDC data.
         */
-       if (edid == NULL)
+       if (!edid)
                edid = intel_sdvo_get_analog_edid(connector);
 
-       if (edid != NULL) {
-               if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
-                                                     edid)) {
-                       drm_connector_update_edid_property(connector, edid);
-                       drm_add_edid_modes(connector, edid);
-               }
+       if (!edid)
+               return 0;
 
-               kfree(edid);
-       }
+       if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
+                                             edid))
+               num_modes += intel_connector_update_modes(connector, edid);
+
+       kfree(edid);
+
+       return num_modes;
 }
 
 /*
@@ -2195,12 +2232,13 @@ static const struct drm_display_mode sdvo_tv_modes[] = {
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
 };
 
-static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
+static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
 {
        struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
        const struct drm_connector_state *conn_state = connector->state;
        struct intel_sdvo_sdtv_resolution_request tv_res;
        u32 reply = 0, format_map = 0;
+       int num_modes = 0;
        int i;
 
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
@@ -2215,31 +2253,37 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
               min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
 
        if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output))
-               return;
+               return 0;
 
        BUILD_BUG_ON(sizeof(tv_res) != 3);
        if (!intel_sdvo_write_cmd(intel_sdvo,
                                  SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
                                  &tv_res, sizeof(tv_res)))
-               return;
+               return 0;
        if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
-               return;
+               return 0;
 
-       for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
+       for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++) {
                if (reply & (1 << i)) {
                        struct drm_display_mode *nmode;
                        nmode = drm_mode_duplicate(connector->dev,
                                                   &sdvo_tv_modes[i]);
-                       if (nmode)
+                       if (nmode) {
                                drm_mode_probed_add(connector, nmode);
+                               num_modes++;
+                       }
                }
+       }
+
+       return num_modes;
 }
 
-static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+static int intel_sdvo_get_lvds_modes(struct drm_connector *connector)
 {
        struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
        struct drm_i915_private *dev_priv = to_i915(connector->dev);
        struct drm_display_mode *newmode;
+       int num_modes = 0;
 
        drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
                    connector->base.id, connector->name);
@@ -2256,6 +2300,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
                        newmode->type = (DRM_MODE_TYPE_PREFERRED |
                                         DRM_MODE_TYPE_DRIVER);
                        drm_mode_probed_add(connector, newmode);
+                       num_modes++;
                }
        }
 
@@ -2264,7 +2309,9 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
         * Assume that the preferred modes are
         * arranged in priority order.
         */
-       intel_ddc_get_modes(connector, &intel_sdvo->ddc);
+       num_modes += intel_ddc_get_modes(connector, &intel_sdvo->ddc);
+
+       return num_modes;
 }
 
 static int intel_sdvo_get_modes(struct drm_connector *connector)
@@ -2272,13 +2319,11 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
        struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
 
        if (IS_TV(intel_sdvo_connector))
-               intel_sdvo_get_tv_modes(connector);
+               return intel_sdvo_get_tv_modes(connector);
        else if (IS_LVDS(intel_sdvo_connector))
-               intel_sdvo_get_lvds_modes(connector);
+               return intel_sdvo_get_lvds_modes(connector);
        else
-               intel_sdvo_get_ddc_modes(connector);
-
-       return !list_empty(&connector->probed_modes);
+               return intel_sdvo_get_ddc_modes(connector);
 }
 
 static int
@@ -2669,12 +2714,9 @@ static void
 intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
                               struct intel_sdvo_connector *connector)
 {
-       struct drm_i915_private *dev_priv = to_i915(connector->base.base.dev);
-
        intel_attach_force_audio_property(&connector->base.base);
-       if (INTEL_GEN(dev_priv) >= 4 && IS_MOBILE(dev_priv)) {
+       if (intel_sdvo->colorimetry_cap & SDVO_COLORIMETRY_RGB220)
                intel_attach_broadcast_rgb_property(&connector->base.base);
-       }
        intel_attach_aspect_ratio_property(&connector->base.base);
 }
 
@@ -3315,6 +3357,9 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
        if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
                goto err;
 
+       intel_sdvo->colorimetry_cap =
+               intel_sdvo_get_colorimetry_cap(intel_sdvo);
+
        if (intel_sdvo_output_setup(intel_sdvo,
                                    intel_sdvo->caps.output_flags) != true) {
                drm_dbg_kms(&dev_priv->drm,
index 13b9a8e..74dc6c0 100644 (file)
@@ -705,10 +705,10 @@ struct intel_sdvo_enhancements_arg {
 #define SDVO_CMD_GET_PIXEL_REPLI       0x8c
 #define SDVO_CMD_GET_COLORIMETRY_CAP   0x8d
 #define SDVO_CMD_SET_COLORIMETRY       0x8e
-  #define SDVO_COLORIMETRY_RGB256   0x0
-  #define SDVO_COLORIMETRY_RGB220   0x1
-  #define SDVO_COLORIMETRY_YCrCb422 0x3
-  #define SDVO_COLORIMETRY_YCrCb444 0x4
+  #define SDVO_COLORIMETRY_RGB256      (1 << 0)
+  #define SDVO_COLORIMETRY_RGB220      (1 << 1)
+  #define SDVO_COLORIMETRY_YCrCb422    (1 << 2)
+  #define SDVO_COLORIMETRY_YCrCb444    (1 << 3)
 #define SDVO_CMD_GET_COLORIMETRY       0x8f
 #define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
 #define SDVO_CMD_SET_AUDIO_STAT                0x91
index aef7fe9..6faabd4 100644 (file)
@@ -820,6 +820,7 @@ struct bdb_lfp_power {
        u16 adb;
        u16 lace_enabled_status;
        struct agressiveness_profile_entry aggressivenes[16];
+       u16 hobl; /* 232+ */
 } __packed;
 
 /*
index d145fe2..c5735c3 100644 (file)
@@ -1045,7 +1045,7 @@ static void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
                                   const struct intel_crtc_state *crtc_state)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
        struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
 
@@ -1055,9 +1055,9 @@ static void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
        /* Fill the PPS payload bytes as per DSC spec 1.2 Table 4-1 */
        drm_dsc_pps_payload_pack(&dp_dsc_pps_sdp.pps_payload, vdsc_cfg);
 
-       intel_dig_port->write_infoframe(encoder, crtc_state,
-                                       DP_SDP_PPS, &dp_dsc_pps_sdp,
-                                       sizeof(dp_dsc_pps_sdp));
+       dig_port->write_infoframe(encoder, crtc_state,
+                                 DP_SDP_PPS, &dp_dsc_pps_sdp,
+                                 sizeof(dp_dsc_pps_sdp));
 }
 
 void intel_dsc_enable(struct intel_encoder *encoder,
index d3a86a4..278664f 100644 (file)
@@ -32,16 +32,17 @@ static void vma_clear_pages(struct i915_vma *vma)
        vma->pages = NULL;
 }
 
-static int vma_bind(struct i915_vma *vma,
+static int vma_bind(struct i915_address_space *vm,
+                   struct i915_vma *vma,
                    enum i915_cache_level cache_level,
                    u32 flags)
 {
-       return vma->vm->vma_ops.bind_vma(vma, cache_level, flags);
+       return vm->vma_ops.bind_vma(vm, vma, cache_level, flags);
 }
 
-static void vma_unbind(struct i915_vma *vma)
+static void vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
 {
-       vma->vm->vma_ops.unbind_vma(vma);
+       vm->vma_ops.unbind_vma(vm, vma);
 }
 
 static const struct i915_vma_ops proxy_vma_ops = {
index 6675447..d0bdb6d 100644 (file)
@@ -101,8 +101,7 @@ static void lut_close(struct i915_gem_context *ctx)
        struct radix_tree_iter iter;
        void __rcu **slot;
 
-       lockdep_assert_held(&ctx->mutex);
-
+       mutex_lock(&ctx->lut_mutex);
        rcu_read_lock();
        radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
                struct i915_vma *vma = rcu_dereference_raw(*slot);
@@ -135,6 +134,7 @@ static void lut_close(struct i915_gem_context *ctx)
                i915_gem_object_put(obj);
        }
        rcu_read_unlock();
+       mutex_unlock(&ctx->lut_mutex);
 }
 
 static struct intel_context *
@@ -342,6 +342,7 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
        spin_unlock(&ctx->i915->gem.contexts.lock);
 
        mutex_destroy(&ctx->engines_mutex);
+       mutex_destroy(&ctx->lut_mutex);
 
        if (ctx->timeline)
                intel_timeline_put(ctx->timeline);
@@ -725,6 +726,7 @@ __create_context(struct drm_i915_private *i915)
        RCU_INIT_POINTER(ctx->engines, e);
 
        INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
+       mutex_init(&ctx->lut_mutex);
 
        /* NB: Mark all slices as needing a remap so that when the context first
         * loads it will restore whatever remap state already exists. If there
@@ -1312,11 +1314,11 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
        if (vm == rcu_access_pointer(ctx->vm))
                goto unlock;
 
+       old = __set_ppgtt(ctx, vm);
+
        /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
        lut_close(ctx);
 
-       old = __set_ppgtt(ctx, vm);
-
        /*
         * We need to flush any requests using the current ppgtt before
         * we release it as the requests do not hold a reference themselves,
@@ -1330,6 +1332,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
        if (err) {
                i915_vm_close(__set_ppgtt(ctx, old));
                i915_vm_close(old);
+               lut_close(ctx); /* force a rebuild of the old obj:vma cache */
        }
 
 unlock:
@@ -1397,11 +1400,12 @@ static int get_ringsize(struct i915_gem_context *ctx,
 }
 
 int
-i915_gem_user_to_context_sseu(struct drm_i915_private *i915,
+i915_gem_user_to_context_sseu(struct intel_gt *gt,
                              const struct drm_i915_gem_context_param_sseu *user,
                              struct intel_sseu *context)
 {
-       const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
+       const struct sseu_dev_info *device = &gt->info.sseu;
+       struct drm_i915_private *i915 = gt->i915;
 
        /* No zeros in any field. */
        if (!user->slice_mask || !user->subslice_mask ||
@@ -1534,7 +1538,7 @@ static int set_sseu(struct i915_gem_context *ctx,
                goto out_ce;
        }
 
-       ret = i915_gem_user_to_context_sseu(i915, &user_sseu, &sseu);
+       ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
        if (ret)
                goto out_ce;
 
index 3702b2f..a133f92 100644 (file)
@@ -225,7 +225,7 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it);
 struct i915_lut_handle *i915_lut_handle_alloc(void);
 void i915_lut_handle_free(struct i915_lut_handle *lut);
 
-int i915_gem_user_to_context_sseu(struct drm_i915_private *i915,
+int i915_gem_user_to_context_sseu(struct intel_gt *gt,
                                  const struct drm_i915_gem_context_param_sseu *user,
                                  struct intel_sseu *context);
 
index 28760bd..ae14ca2 100644 (file)
@@ -170,6 +170,7 @@ struct i915_gem_context {
         * per vm, which may be one per context or shared with the global GTT)
         */
        struct radix_tree_root handles_vma;
+       struct mutex lut_mutex;
 
        /**
         * @name: arbitrary name, used for user debug
index b4862af..6b4ec66 100644 (file)
@@ -782,10 +782,15 @@ static int __eb_add_lut(struct i915_execbuffer *eb,
 
        /* Check that the context hasn't been closed in the meantime */
        err = -EINTR;
-       if (!mutex_lock_interruptible(&ctx->mutex)) {
-               err = -ENOENT;
-               if (likely(!i915_gem_context_is_closed(ctx)))
+       if (!mutex_lock_interruptible(&ctx->lut_mutex)) {
+               struct i915_address_space *vm = rcu_access_pointer(ctx->vm);
+
+               if (unlikely(vm && vma->vm != vm))
+                       err = -EAGAIN; /* user racing with ctx set-vm */
+               else if (likely(!i915_gem_context_is_closed(ctx)))
                        err = radix_tree_insert(&ctx->handles_vma, handle, vma);
+               else
+                       err = -ENOENT;
                if (err == 0) { /* And nor has this handle */
                        struct drm_i915_gem_object *obj = vma->obj;
 
@@ -798,7 +803,7 @@ static int __eb_add_lut(struct i915_execbuffer *eb,
                        }
                        spin_unlock(&obj->lut_lock);
                }
-               mutex_unlock(&ctx->mutex);
+               mutex_unlock(&ctx->lut_mutex);
        }
        if (unlikely(err))
                goto err;
@@ -814,6 +819,8 @@ err:
 
 static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
 {
+       struct i915_address_space *vm = eb->context->vm;
+
        do {
                struct drm_i915_gem_object *obj;
                struct i915_vma *vma;
@@ -821,7 +828,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
 
                rcu_read_lock();
                vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle);
-               if (likely(vma))
+               if (likely(vma && vma->vm == vm))
                        vma = i915_vma_tryget(vma);
                rcu_read_unlock();
                if (likely(vma))
@@ -831,7 +838,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
                if (unlikely(!obj))
                        return ERR_PTR(-ENOENT);
 
-               vma = i915_vma_instance(obj, eb->context->vm, NULL);
+               vma = i915_vma_instance(obj, vm, NULL);
                if (IS_ERR(vma)) {
                        i915_gem_object_put(obj);
                        return vma;
@@ -1973,8 +1980,7 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
 
 static int num_vcs_engines(const struct drm_i915_private *i915)
 {
-       return hweight64(INTEL_INFO(i915)->engine_mask &
-                        GENMASK_ULL(VCS0 + I915_MAX_VCS - 1, VCS0));
+       return hweight64(VDBOX_MASK(&i915->gt));
 }
 
 /*
index fe27c5b..b233685 100644 (file)
@@ -448,7 +448,7 @@ void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
  * mapping will then trigger a page fault on the next user access, allowing
  * fixup by vm_fault_gtt().
  */
-static void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
+void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        intel_wakeref_t wakeref;
@@ -507,19 +507,6 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
        spin_unlock(&obj->mmo.lock);
 }
 
-/**
- * i915_gem_object_release_mmap - remove physical page mappings
- * @obj: obj in question
- *
- * Preserve the reservation of the mmapping with the DRM core code, but
- * relinquish ownership of the pages back to the system.
- */
-void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
-{
-       i915_gem_object_release_mmap_gtt(obj);
-       i915_gem_object_release_mmap_offset(obj);
-}
-
 static struct i915_mmap_offset *
 lookup_mmo(struct drm_i915_gem_object *obj,
           enum i915_mmap_type mmap_type)
index 862e01b..efee9e0 100644 (file)
@@ -25,7 +25,8 @@ int i915_gem_dumb_mmap_offset(struct drm_file *file_priv,
                              u32 handle, u64 *offset);
 
 void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
-void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
+void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
+
 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj);
 
 #endif
index 6b69191..c8421fd 100644 (file)
@@ -143,14 +143,14 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
                 * vma, in the same fd namespace, by virtue of flink/open.
                 */
 
-               mutex_lock(&ctx->mutex);
+               mutex_lock(&ctx->lut_mutex);
                vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
                if (vma) {
                        GEM_BUG_ON(vma->obj != obj);
                        GEM_BUG_ON(!atomic_read(&vma->open_count));
                        i915_vma_close(vma);
                }
-               mutex_unlock(&ctx->mutex);
+               mutex_unlock(&ctx->lut_mutex);
 
                i915_gem_context_put(lut->ctx);
                i915_lut_handle_free(lut);
@@ -171,14 +171,35 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
        atomic_dec(&i915->mm.free_count);
 }
 
+static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
+{
+       /* Skip serialisation and waking the device if known to be not used. */
+
+       if (obj->userfault_count)
+               i915_gem_object_release_mmap_gtt(obj);
+
+       if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) {
+               struct i915_mmap_offset *mmo, *mn;
+
+               i915_gem_object_release_mmap_offset(obj);
+
+               rbtree_postorder_for_each_entry_safe(mmo, mn,
+                                                    &obj->mmo.offsets,
+                                                    offset) {
+                       drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
+                                             &mmo->vma_node);
+                       kfree(mmo);
+               }
+               obj->mmo.offsets = RB_ROOT;
+       }
+}
+
 static void __i915_gem_free_objects(struct drm_i915_private *i915,
                                    struct llist_node *freed)
 {
        struct drm_i915_gem_object *obj, *on;
 
        llist_for_each_entry_safe(obj, on, freed, freed) {
-               struct i915_mmap_offset *mmo, *mn;
-
                trace_i915_gem_object_destroy(obj);
 
                if (!list_empty(&obj->vma.list)) {
@@ -204,18 +225,8 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
                        spin_unlock(&obj->vma.lock);
                }
 
-               i915_gem_object_release_mmap(obj);
-
-               rbtree_postorder_for_each_entry_safe(mmo, mn,
-                                                    &obj->mmo.offsets,
-                                                    offset) {
-                       drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
-                                             &mmo->vma_node);
-                       kfree(mmo);
-               }
-               obj->mmo.offsets = RB_ROOT;
+               __i915_gem_object_free_mmaps(obj);
 
-               GEM_BUG_ON(obj->userfault_count);
                GEM_BUG_ON(!list_empty(&obj->lut_list));
 
                atomic_set(&obj->mm.pages_pin_count, 0);
index 2faa481..e5b9276 100644 (file)
@@ -258,10 +258,6 @@ struct page *
 i915_gem_object_get_page(struct drm_i915_gem_object *obj,
                         unsigned int n);
 
-struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
-                              unsigned int n);
-
 dma_addr_t
 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
                                    unsigned long n,
@@ -394,6 +390,8 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
        i915_gem_object_unpin_pages(obj);
 }
 
+void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
+
 void
 i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
                                   unsigned int flush_domains);
index af9e48e..7050519 100644 (file)
@@ -408,6 +408,21 @@ void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
        }
 }
 
+void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
+{
+       GEM_BUG_ON(!obj->mm.mapping);
+
+       /*
+        * We allow removing the mapping from underneath pinned pages!
+        *
+        * Furthermore, since this is an unsafe operation reserved only
+        * for construction time manipulation, we ignore locking prudence.
+        */
+       unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
+
+       i915_gem_object_unpin_map(obj);
+}
+
 struct scatterlist *
 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
                       unsigned int n,
@@ -533,20 +548,6 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
        return nth_page(sg_page(sg), offset);
 }
 
-/* Like i915_gem_object_get_page(), but mark the returned page dirty */
-struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
-                              unsigned int n)
-{
-       struct page *page;
-
-       page = i915_gem_object_get_page(obj, n);
-       if (!obj->mm.dirty)
-               set_page_dirty(page);
-
-       return page;
-}
-
 dma_addr_t
 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
                                    unsigned long n,
index 5b65ce7..dc8f052 100644 (file)
@@ -13,6 +13,8 @@
 #include <linux/dma-buf.h>
 #include <linux/vmalloc.h>
 
+#include "gt/intel_gt_requests.h"
+
 #include "i915_trace.h"
 
 static bool swap_available(void)
@@ -111,15 +113,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
        unsigned long count = 0;
        unsigned long scanned = 0;
 
-       /*
-        * When shrinking the active list, we should also consider active
-        * contexts. Active contexts are pinned until they are retired, and
-        * so can not be simply unbound to retire and unpin their pages. To
-        * shrink the contexts, we must wait until the gpu is idle and
-        * completed its switch to the kernel context. In short, we do
-        * not have a good mechanism for idling a specific context.
-        */
-
        trace_i915_gem_shrink(i915, target, shrink);
 
        /*
@@ -134,6 +127,20 @@ i915_gem_shrink(struct drm_i915_private *i915,
        }
 
        /*
+        * When shrinking the active list, we should also consider active
+        * contexts. Active contexts are pinned until they are retired, and
+        * so can not be simply unbound to retire and unpin their pages. To
+        * shrink the contexts, we must wait until the gpu is idle and
+        * completed its switch to the kernel context. In short, we do
+        * not have a good mechanism for idling a specific context, but
+        * what we can do is give them a kick so that we do not keep idle
+        * contexts around longer than is necessary.
+        */
+       if (shrink & I915_SHRINK_ACTIVE)
+               /* Retire requests to unpin all idle contexts */
+               intel_gt_retire_requests(&i915->gt);
+
+       /*
         * As we may completely rewrite the (un)bound list whilst unbinding
         * (due to retiring requests) we have to strictly process only
         * one element of the list at the time, and recheck the list
@@ -408,26 +415,15 @@ void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
 void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
                                    struct mutex *mutex)
 {
-       bool unlock = false;
-
        if (!IS_ENABLED(CONFIG_LOCKDEP))
                return;
 
-       if (!lockdep_is_held_type(&i915->drm.struct_mutex, -1)) {
-               mutex_acquire(&i915->drm.struct_mutex.dep_map,
-                             I915_MM_NORMAL, 0, _RET_IP_);
-               unlock = true;
-       }
-
        fs_reclaim_acquire(GFP_KERNEL);
 
        mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
        mutex_release(&mutex->dep_map, _RET_IP_);
 
        fs_reclaim_release(GFP_KERNEL);
-
-       if (unlock)
-               mutex_release(&i915->drm.struct_mutex.dep_map, _RET_IP_);
 }
 
 #define obj_to_i915(obj__) to_i915((obj__)->base.dev)
index 0158e49..ff72ee2 100644 (file)
@@ -299,7 +299,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
        i915_gem_object_unlock(obj);
 
        /* Force the fence to be reacquired for GTT access */
-       i915_gem_object_release_mmap(obj);
+       i915_gem_object_release_mmap_gtt(obj);
 
        /* Try to preallocate memory required to save swizzling on put-pages */
        if (i915_gem_object_needs_bit17_swizzle(obj)) {
index b819788..7ffc3c7 100644 (file)
@@ -1229,7 +1229,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
        int inst = 0;
        int ret = 0;
 
-       if (INTEL_GEN(i915) < 9 || !RUNTIME_INFO(i915)->sseu.has_slice_pg)
+       if (INTEL_GEN(i915) < 9)
                return 0;
 
        if (flags & TEST_RESET)
@@ -1255,6 +1255,9 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
                if (hweight32(engine->sseu.slice_mask) < 2)
                        continue;
 
+               if (!engine->gt->info.sseu.has_slice_pg)
+                       continue;
+
                /*
                 * Gen11 VME friendly power-gated configuration with
                 * half enabled sub-slices.
index aa0d06c..51b5a34 100644 (file)
@@ -23,6 +23,8 @@ mock_context(struct drm_i915_private *i915,
        INIT_LIST_HEAD(&ctx->link);
        ctx->i915 = i915;
 
+       mutex_init(&ctx->mutex);
+
        spin_lock_init(&ctx->stale.lock);
        INIT_LIST_HEAD(&ctx->stale.engines);
 
@@ -35,7 +37,7 @@ mock_context(struct drm_i915_private *i915,
        RCU_INIT_POINTER(ctx->engines, e);
 
        INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
-       mutex_init(&ctx->mutex);
+       mutex_init(&ctx->lut_mutex);
 
        if (name) {
                struct i915_ppgtt *ppgtt;
index 1de5fba..3a21cf6 100644 (file)
@@ -9,6 +9,7 @@
 #include "debugfs_engines.h"
 #include "debugfs_gt.h"
 #include "debugfs_gt_pm.h"
+#include "intel_sseu_debugfs.h"
 #include "uc/intel_uc_debugfs.h"
 #include "i915_drv.h"
 
@@ -25,6 +26,7 @@ void debugfs_gt_register(struct intel_gt *gt)
 
        debugfs_engines_register(gt, root);
        debugfs_gt_pm_register(gt, root);
+       intel_sseu_debugfs_register(gt, root);
 
        intel_uc_debugfs_register(&gt->uc, root);
 }
index f4fec7e..cdc0b9c 100644 (file)
@@ -183,13 +183,11 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
        struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
        struct i915_page_directory * const pd = ppgtt->base.pd;
        struct i915_page_table *pt, *alloc = NULL;
-       intel_wakeref_t wakeref;
+       bool flush = false;
        u64 from = start;
        unsigned int pde;
        int ret = 0;
 
-       wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
-
        spin_lock(&pd->lock);
        gen6_for_each_pde(pt, pd, start, length, pde) {
                const unsigned int count = gen6_pte_count(start, length);
@@ -214,14 +212,20 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
                                alloc = pt;
                                pt = pd->entry[pde];
                        }
+
+                       flush = true;
                }
 
                atomic_add(count, &pt->used);
        }
        spin_unlock(&pd->lock);
 
-       if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND))
-               gen6_flush_pd(ppgtt, from, start);
+       if (flush && i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
+               intel_wakeref_t wakeref;
+
+               with_intel_runtime_pm(&vm->i915->runtime_pm, wakeref)
+                       gen6_flush_pd(ppgtt, from, start);
+       }
 
        goto out;
 
@@ -230,7 +234,6 @@ unwind_out:
 out:
        if (alloc)
                free_px(vm, alloc);
-       intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
        return ret;
 }
 
@@ -299,11 +302,12 @@ static void pd_vma_clear_pages(struct i915_vma *vma)
        vma->pages = NULL;
 }
 
-static int pd_vma_bind(struct i915_vma *vma,
+static int pd_vma_bind(struct i915_address_space *vm,
+                      struct i915_vma *vma,
                       enum i915_cache_level cache_level,
                       u32 unused)
 {
-       struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
+       struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
        struct gen6_ppgtt *ppgtt = vma->private;
        u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
 
@@ -314,7 +318,7 @@ static int pd_vma_bind(struct i915_vma *vma,
        return 0;
 }
 
-static void pd_vma_unbind(struct i915_vma *vma)
+static void pd_vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
 {
        struct gen6_ppgtt *ppgtt = vma->private;
        struct i915_page_directory * const pd = ppgtt->base.pd;
index de595b6..d93d85c 100644 (file)
@@ -396,7 +396,7 @@ int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
        emit_batch(vma, memset(batch, 0, bv.max_size), &bv);
 
        i915_gem_object_flush_map(vma->obj);
-       i915_gem_object_unpin_map(vma->obj);
+       __i915_gem_object_release_map(vma->obj);
 
        return 0;
 }
index d907d53..9178631 100644 (file)
@@ -314,13 +314,18 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
 {
        lockdep_assert_held(&rq->lock);
 
+       if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+               return true;
+
        if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
                struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
                struct intel_context *ce = rq->context;
                struct list_head *pos;
 
                spin_lock(&b->irq_lock);
-               GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
+
+               if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
+                       goto unlock;
 
                if (!__intel_breadcrumbs_arm_irq(b))
                        goto unlock;
index e4aece2..52db2bd 100644 (file)
@@ -204,25 +204,25 @@ static int __ring_active(struct intel_ring *ring)
 {
        int err;
 
-       err = i915_active_acquire(&ring->vma->active);
+       err = intel_ring_pin(ring);
        if (err)
                return err;
 
-       err = intel_ring_pin(ring);
+       err = i915_active_acquire(&ring->vma->active);
        if (err)
-               goto err_active;
+               goto err_pin;
 
        return 0;
 
-err_active:
-       i915_active_release(&ring->vma->active);
+err_pin:
+       intel_ring_unpin(ring);
        return err;
 }
 
 static void __ring_retire(struct intel_ring *ring)
 {
-       intel_ring_unpin(ring);
        i915_active_release(&ring->vma->active);
+       intel_ring_unpin(ring);
 }
 
 __i915_active_call
index 27ae480..b9c8163 100644 (file)
@@ -30,7 +30,7 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
        *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
        *cs++ = lower_32_bits(offset);
        *cs++ = upper_32_bits(offset);
-       *cs++ = intel_sseu_make_rpcs(rq->engine->i915, &sseu);
+       *cs++ = intel_sseu_make_rpcs(rq->engine->gt, &sseu);
 
        intel_ring_advance(rq, cs);
 
index 7bf2f76..dd1a42c 100644 (file)
@@ -370,7 +370,7 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine)
                 * instances.
                 */
                if ((INTEL_GEN(i915) >= 11 &&
-                    RUNTIME_INFO(i915)->vdbox_sfc_access & engine->mask) ||
+                    engine->gt->info.vdbox_sfc_access & engine->mask) ||
                    (INTEL_GEN(i915) >= 9 && engine->instance == 0))
                        engine->uabi_capabilities |=
                                I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
@@ -450,6 +450,80 @@ void intel_engines_free(struct intel_gt *gt)
        }
 }
 
+/*
+ * Determine which engines are fused off in our particular hardware.
+ * Note that we have a catch-22 situation where we need to be able to access
+ * the blitter forcewake domain to read the engine fuses, but at the same time
+ * we need to know which engines are available on the system to know which
+ * forcewake domains are present. We solve this by intializing the forcewake
+ * domains based on the full engine mask in the platform capabilities before
+ * calling this function and pruning the domains for fused-off engines
+ * afterwards.
+ */
+static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
+{
+       struct drm_i915_private *i915 = gt->i915;
+       struct intel_gt_info *info = &gt->info;
+       struct intel_uncore *uncore = gt->uncore;
+       unsigned int logical_vdbox = 0;
+       unsigned int i;
+       u32 media_fuse;
+       u16 vdbox_mask;
+       u16 vebox_mask;
+
+       info->engine_mask = INTEL_INFO(i915)->platform_engine_mask;
+
+       if (INTEL_GEN(i915) < 11)
+               return info->engine_mask;
+
+       media_fuse = ~intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
+
+       vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
+       vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
+                     GEN11_GT_VEBOX_DISABLE_SHIFT;
+
+       for (i = 0; i < I915_MAX_VCS; i++) {
+               if (!HAS_ENGINE(gt, _VCS(i))) {
+                       vdbox_mask &= ~BIT(i);
+                       continue;
+               }
+
+               if (!(BIT(i) & vdbox_mask)) {
+                       info->engine_mask &= ~BIT(_VCS(i));
+                       drm_dbg(&i915->drm, "vcs%u fused off\n", i);
+                       continue;
+               }
+
+               /*
+                * In Gen11, only even numbered logical VDBOXes are
+                * hooked up to an SFC (Scaler & Format Converter) unit.
+                * In TGL each VDBOX has access to an SFC.
+                */
+               if (INTEL_GEN(i915) >= 12 || logical_vdbox++ % 2 == 0)
+                       gt->info.vdbox_sfc_access |= BIT(i);
+       }
+       drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
+               vdbox_mask, VDBOX_MASK(gt));
+       GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt));
+
+       for (i = 0; i < I915_MAX_VECS; i++) {
+               if (!HAS_ENGINE(gt, _VECS(i))) {
+                       vebox_mask &= ~BIT(i);
+                       continue;
+               }
+
+               if (!(BIT(i) & vebox_mask)) {
+                       info->engine_mask &= ~BIT(_VECS(i));
+                       drm_dbg(&i915->drm, "vecs%u fused off\n", i);
+               }
+       }
+       drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n",
+               vebox_mask, VEBOX_MASK(gt));
+       GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
+
+       return info->engine_mask;
+}
+
 /**
  * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
  * @gt: pointer to struct intel_gt
@@ -459,8 +533,7 @@ void intel_engines_free(struct intel_gt *gt)
 int intel_engines_init_mmio(struct intel_gt *gt)
 {
        struct drm_i915_private *i915 = gt->i915;
-       struct intel_device_info *device_info = mkwrite_device_info(i915);
-       const unsigned int engine_mask = INTEL_INFO(i915)->engine_mask;
+       const unsigned int engine_mask = init_engine_mask(gt);
        unsigned int mask = 0;
        unsigned int i;
        int err;
@@ -473,7 +546,7 @@ int intel_engines_init_mmio(struct intel_gt *gt)
                return -ENODEV;
 
        for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
-               if (!HAS_ENGINE(i915, i))
+               if (!HAS_ENGINE(gt, i))
                        continue;
 
                err = intel_engine_setup(gt, i);
@@ -489,14 +562,16 @@ int intel_engines_init_mmio(struct intel_gt *gt)
         * engines.
         */
        if (drm_WARN_ON(&i915->drm, mask != engine_mask))
-               device_info->engine_mask = mask;
+               gt->info.engine_mask = mask;
 
-       RUNTIME_INFO(i915)->num_engines = hweight32(mask);
+       gt->info.num_engines = hweight32(mask);
 
        intel_gt_check_and_clear_faults(gt);
 
        intel_setup_engine_capabilities(gt);
 
+       intel_uncore_prune_engine_fw_domains(gt->uncore, gt);
+
        return 0;
 
 cleanup:
@@ -634,7 +709,7 @@ static int engine_setup_common(struct intel_engine_cs *engine)
 
        /* Use the whole device by default */
        engine->sseu =
-               intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
+               intel_sseu_from_device_info(&engine->gt->info.sseu);
 
        intel_engine_init_workarounds(engine);
        intel_engine_init_whitelist(engine);
@@ -1000,7 +1075,7 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
                               struct intel_instdone *instdone)
 {
        struct drm_i915_private *i915 = engine->i915;
-       const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
+       const struct sseu_dev_info *sseu = &engine->gt->info.sseu;
        struct intel_uncore *uncore = engine->uncore;
        u32 mmio_base = engine->mmio_base;
        int slice;
index d0a1078..8ec3eec 100644 (file)
@@ -142,6 +142,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
                return true;
 
        GEM_BUG_ON(!intel_context_is_barrier(ce));
+       GEM_BUG_ON(ce->timeline->hwsp_ggtt != engine->status_page.vma);
 
        /* Already inside the kernel context, safe to power down. */
        if (engine->wakeref_serial == engine->serial)
index 490af81..8de92fd 100644 (file)
@@ -177,8 +177,12 @@ struct intel_engine_execlists {
         * the first error interrupt, record the EIR and schedule the tasklet.
         * In the tasklet, we process the pending CS events to ensure we have
         * the guilty request, and then reset the engine.
+        *
+        * Low 16b are used by HW, with the upper 16b used as the enabling mask.
+        * Reserve the upper 16b for tracking internal errors.
         */
        u32 error_interrupt;
+#define ERROR_CSB BIT(31)
 
        /**
         * @reset_ccid: Active CCID [EXECLISTS_STATUS_HI] at the time of reset
index 848dece..34e6096 100644 (file)
@@ -201,7 +201,7 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
                                     uabi_node);
                char old[sizeof(engine->name)];
 
-               if (intel_gt_has_init_error(engine->gt))
+               if (intel_gt_has_unrecoverable_error(engine->gt))
                        continue; /* ignore incomplete engines */
 
                GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
index 323c328..62979ea 100644 (file)
@@ -436,7 +436,8 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm,
        intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
 }
 
-static int ggtt_bind_vma(struct i915_vma *vma,
+static int ggtt_bind_vma(struct i915_address_space *vm,
+                        struct i915_vma *vma,
                         enum i915_cache_level cache_level,
                         u32 flags)
 {
@@ -451,15 +452,15 @@ static int ggtt_bind_vma(struct i915_vma *vma,
        if (i915_gem_object_is_readonly(obj))
                pte_flags |= PTE_READ_ONLY;
 
-       vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+       vm->insert_entries(vm, vma, cache_level, pte_flags);
        vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
 
        return 0;
 }
 
-static void ggtt_unbind_vma(struct i915_vma *vma)
+static void ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
 {
-       vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
+       vm->clear_range(vm, vma->node.start, vma->size);
 }
 
 static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
@@ -567,7 +568,8 @@ err:
        return ret;
 }
 
-static int aliasing_gtt_bind_vma(struct i915_vma *vma,
+static int aliasing_gtt_bind_vma(struct i915_address_space *vm,
+                                struct i915_vma *vma,
                                 enum i915_cache_level cache_level,
                                 u32 flags)
 {
@@ -580,44 +582,27 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
                pte_flags |= PTE_READ_ONLY;
 
        if (flags & I915_VMA_LOCAL_BIND) {
-               struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
+               struct i915_ppgtt *alias = i915_vm_to_ggtt(vm)->alias;
 
-               if (flags & I915_VMA_ALLOC) {
-                       ret = alias->vm.allocate_va_range(&alias->vm,
-                                                         vma->node.start,
-                                                         vma->size);
-                       if (ret)
-                               return ret;
-
-                       set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
-               }
-
-               GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT,
-                                    __i915_vma_flags(vma)));
-               alias->vm.insert_entries(&alias->vm, vma,
-                                        cache_level, pte_flags);
+               ret = ppgtt_bind_vma(&alias->vm, vma, cache_level, flags);
+               if (ret)
+                       return ret;
        }
 
        if (flags & I915_VMA_GLOBAL_BIND)
-               vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+               vm->insert_entries(vm, vma, cache_level, pte_flags);
 
        return 0;
 }
 
-static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
+static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
+                                   struct i915_vma *vma)
 {
-       if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
-               struct i915_address_space *vm = vma->vm;
-
+       if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
                vm->clear_range(vm, vma->node.start, vma->size);
-       }
-
-       if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
-               struct i915_address_space *vm =
-                       &i915_vm_to_ggtt(vma->vm)->alias->vm;
 
-               vm->clear_range(vm, vma->node.start, vma->size);
-       }
+       if (i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND))
+               ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma);
 }
 
 static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
index ebc29b6..e0755f1 100644 (file)
@@ -44,6 +44,14 @@ void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
        gt->ggtt = ggtt;
 }
 
+int intel_gt_init_mmio(struct intel_gt *gt)
+{
+       intel_uc_init_mmio(&gt->uc);
+       intel_sseu_info_init(gt);
+
+       return intel_engines_init_mmio(gt);
+}
+
 static void init_unused_ring(struct intel_gt *gt, u32 base)
 {
        struct intel_uncore *uncore = gt->uncore;
@@ -510,7 +518,7 @@ static int __engines_verify_workarounds(struct intel_gt *gt)
 
 static void __intel_gt_disable(struct intel_gt *gt)
 {
-       intel_gt_set_wedged_on_init(gt);
+       intel_gt_set_wedged_on_fini(gt);
 
        intel_gt_suspend_prepare(gt);
        intel_gt_suspend_late(gt);
@@ -642,3 +650,11 @@ void intel_gt_driver_late_release(struct intel_gt *gt)
        intel_gt_fini_timelines(gt);
        intel_engines_free(gt);
 }
+
+void intel_gt_info_print(const struct intel_gt_info *info,
+                        struct drm_printer *p)
+{
+       drm_printf(p, "available engines: %x\n", info->engine_mask);
+
+       intel_sseu_dump(&info->sseu, p);
+}
index 4fac043..9157c74 100644 (file)
@@ -11,6 +11,7 @@
 #include "intel_reset.h"
 
 struct drm_i915_private;
+struct drm_printer;
 
 #define GT_TRACE(gt, fmt, ...) do {                                    \
        const struct intel_gt *gt__ __maybe_unused = (gt);              \
@@ -35,6 +36,7 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
 
 void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
 void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt);
+int intel_gt_init_mmio(struct intel_gt *gt);
 int __must_check intel_gt_init_hw(struct intel_gt *gt);
 int intel_gt_init(struct intel_gt *gt);
 void intel_gt_driver_register(struct intel_gt *gt);
@@ -58,14 +60,21 @@ static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
        return i915_ggtt_offset(gt->scratch) + field;
 }
 
-static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
+static inline bool intel_gt_has_unrecoverable_error(const struct intel_gt *gt)
 {
-       return __intel_reset_failed(&gt->reset);
+       return test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags) ||
+              test_bit(I915_WEDGED_ON_FINI, &gt->reset.flags);
 }
 
-static inline bool intel_gt_has_init_error(const struct intel_gt *gt)
+static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
 {
-       return test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
+       GEM_BUG_ON(intel_gt_has_unrecoverable_error(gt) &&
+                  !test_bit(I915_WEDGED, &gt->reset.flags));
+
+       return unlikely(test_bit(I915_WEDGED, &gt->reset.flags));
 }
 
+void intel_gt_info_print(const struct intel_gt_info *info,
+                        struct drm_printer *p);
+
 #endif /* __INTEL_GT_H__ */
index 0cc7dd5..b05da68 100644 (file)
@@ -27,7 +27,8 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
        if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
                u32 eir;
 
-               eir = ENGINE_READ(engine, RING_EIR);
+               /* Upper 16b are the enabling mask, rsvd for internal errors */
+               eir = ENGINE_READ(engine, RING_EIR) & GENMASK(15, 0);
                ENGINE_TRACE(engine, "CS error: %x\n", eir);
 
                /* Disable the error interrupt until after the reset */
@@ -457,7 +458,7 @@ void gen5_gt_irq_postinstall(struct intel_gt *gt)
                 * RPS interrupts will get enabled/disabled on demand when RPS
                 * itself is enabled/disabled.
                 */
-               if (HAS_ENGINE(gt->i915, VECS0)) {
+               if (HAS_ENGINE(gt, VECS0)) {
                        pm_irqs |= PM_VEBOX_USER_INTERRUPT;
                        gt->pm_ier |= PM_VEBOX_USER_INTERRUPT;
                }
index f1d5333..274aa0d 100644 (file)
@@ -188,7 +188,7 @@ int intel_gt_resume(struct intel_gt *gt)
        enum intel_engine_id id;
        int err;
 
-       err = intel_gt_has_init_error(gt);
+       err = intel_gt_has_unrecoverable_error(gt);
        if (err)
                return err;
 
index 16ff47c..66fcbf9 100644 (file)
@@ -31,12 +31,15 @@ static bool engine_active(const struct intel_engine_cs *engine)
        return !list_empty(&engine->kernel_context->timeline->requests);
 }
 
-static bool flush_submission(struct intel_gt *gt)
+static bool flush_submission(struct intel_gt *gt, long timeout)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
        bool active = false;
 
+       if (!timeout)
+               return false;
+
        if (!intel_gt_pm_is_awake(gt))
                return false;
 
@@ -139,7 +142,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
        if (unlikely(timeout < 0))
                timeout = -timeout, interruptible = false;
 
-       flush_submission(gt); /* kick the ksoftirqd tasklets */
+       flush_submission(gt, timeout); /* kick the ksoftirqd tasklets */
        spin_lock(&timelines->lock);
        list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
                if (!mutex_trylock(&tl->mutex)) {
@@ -194,7 +197,7 @@ out_active: spin_lock(&timelines->lock);
        list_for_each_entry_safe(tl, tn, &free, link)
                __intel_timeline_free(&tl->kref);
 
-       if (flush_submission(gt)) /* Wait, there's more! */
+       if (flush_submission(gt, timeout)) /* Wait, there's more! */
                active_count++;
 
        return active_count ? timeout : 0;
index 0cc1d6b..6d39a4a 100644 (file)
@@ -109,6 +109,17 @@ struct intel_gt {
        struct intel_gt_buffer_pool buffer_pool;
 
        struct i915_vma *scratch;
+
+       struct intel_gt_info {
+               intel_engine_mask_t engine_mask;
+               u8 num_engines;
+
+               /* Media engine access to SFC per instance */
+               u8 vdbox_sfc_access;
+
+               /* Slice/subslice/EU info */
+               struct sseu_dev_info sseu;
+       } info;
 };
 
 enum intel_gt_scratch_field {
index d93ebdf..f2b7507 100644 (file)
@@ -198,14 +198,16 @@ struct intel_gt;
 
 struct i915_vma_ops {
        /* Map an object into an address space with the given cache flags. */
-       int (*bind_vma)(struct i915_vma *vma,
+       int (*bind_vma)(struct i915_address_space *vm,
+                       struct i915_vma *vma,
                        enum i915_cache_level cache_level,
                        u32 flags);
        /*
         * Unmap an object from an address space. This usually consists of
         * setting the valid PTE entries to a reserved scratch page.
         */
-       void (*unbind_vma)(struct i915_vma *vma);
+       void (*unbind_vma)(struct i915_address_space *vm,
+                          struct i915_vma *vma);
 
        int (*set_pages)(struct i915_vma *vma);
        void (*clear_pages)(struct i915_vma *vma);
@@ -566,6 +568,13 @@ int ggtt_set_pages(struct i915_vma *vma);
 int ppgtt_set_pages(struct i915_vma *vma);
 void clear_pages(struct i915_vma *vma);
 
+int ppgtt_bind_vma(struct i915_address_space *vm,
+                  struct i915_vma *vma,
+                  enum i915_cache_level cache_level,
+                  u32 flags);
+void ppgtt_unbind_vma(struct i915_address_space *vm,
+                     struct i915_vma *vma);
+
 void gtt_write_workarounds(struct intel_gt *gt);
 
 void setup_private_pat(struct intel_uncore *uncore);
index e866b8d..e0280a6 100644 (file)
@@ -2569,6 +2569,25 @@ static void process_csb(struct intel_engine_cs *engine)
                return;
 
        /*
+        * We will consume all events from HW, or at least pretend to.
+        *
+        * The sequence of events from the HW is deterministic, and derived
+        * from our writes to the ELSP, with a smidgen of variability for
+        * the arrival of the asynchronous requests wrt to the inflight
+        * execution. If the HW sends an event that does not correspond with
+        * the one we are expecting, we have to abandon all hope as we lose
+        * all tracking of what the engine is actually executing. We will
+        * only detect we are out of sequence with the HW when we get an
+        * 'impossible' event because we have already drained our own
+        * preemption/promotion queue. If this occurs, we know that we likely
+        * lost track of execution earlier and must unwind and restart, the
+        * simplest way is by stop processing the event queue and force the
+        * engine to reset.
+        */
+       execlists->csb_head = tail;
+       ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
+
+       /*
         * Hopefully paired with a wmb() in HW!
         *
         * We must complete the read of the write pointer before any reads
@@ -2577,8 +2596,6 @@ static void process_csb(struct intel_engine_cs *engine)
         * we perform the READ_ONCE(*csb_write).
         */
        rmb();
-
-       ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
        do {
                bool promote;
 
@@ -2613,6 +2630,11 @@ static void process_csb(struct intel_engine_cs *engine)
                if (promote) {
                        struct i915_request * const *old = execlists->active;
 
+                       if (GEM_WARN_ON(!*execlists->pending)) {
+                               execlists->error_interrupt |= ERROR_CSB;
+                               break;
+                       }
+
                        ring_set_paused(engine, 0);
 
                        /* Point active to the new ELSP; prevent overwriting */
@@ -2635,7 +2657,10 @@ static void process_csb(struct intel_engine_cs *engine)
 
                        WRITE_ONCE(execlists->pending[0], NULL);
                } else {
-                       GEM_BUG_ON(!*execlists->active);
+                       if (GEM_WARN_ON(!*execlists->active)) {
+                               execlists->error_interrupt |= ERROR_CSB;
+                               break;
+                       }
 
                        /* port0 completed, advanced to port1 */
                        trace_ports(execlists, "completed", execlists->active);
@@ -2686,7 +2711,6 @@ static void process_csb(struct intel_engine_cs *engine)
                }
        } while (head != tail);
 
-       execlists->csb_head = head;
        set_timeslice(engine);
 
        /*
@@ -3005,12 +3029,12 @@ static u32 active_ccid(struct intel_engine_cs *engine)
        return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI);
 }
 
-static bool execlists_capture(struct intel_engine_cs *engine)
+static void execlists_capture(struct intel_engine_cs *engine)
 {
        struct execlists_capture *cap;
 
        if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
-               return true;
+               return;
 
        /*
         * We need to _quickly_ capture the engine state before we reset.
@@ -3019,7 +3043,7 @@ static bool execlists_capture(struct intel_engine_cs *engine)
         */
        cap = capture_regs(engine);
        if (!cap)
-               return true;
+               return;
 
        spin_lock_irq(&engine->active.lock);
        cap->rq = active_context(engine, active_ccid(engine));
@@ -3056,14 +3080,13 @@ static bool execlists_capture(struct intel_engine_cs *engine)
 
        INIT_WORK(&cap->work, execlists_capture_work);
        schedule_work(&cap->work);
-       return true;
+       return;
 
 err_rq:
        i915_request_put(cap->rq);
 err_free:
        i915_gpu_coredump_put(cap->error);
        kfree(cap);
-       return false;
 }
 
 static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
@@ -3083,10 +3106,8 @@ static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
        tasklet_disable_nosync(&engine->execlists.tasklet);
 
        ring_set_paused(engine, 1); /* Freeze the current request in place */
-       if (execlists_capture(engine))
-               intel_engine_reset(engine, msg);
-       else
-               ring_set_paused(engine, 0);
+       execlists_capture(engine);
+       intel_engine_reset(engine, msg);
 
        tasklet_enable(&engine->execlists.tasklet);
        clear_and_wake_up_bit(bit, lock);
@@ -3117,9 +3138,18 @@ static void execlists_submission_tasklet(unsigned long data)
        process_csb(engine);
 
        if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
+               const char *msg;
+
+               /* Generate the error message in priority wrt to the user! */
+               if (engine->execlists.error_interrupt & GENMASK(15, 0))
+                       msg = "CS error"; /* thrown by a user payload */
+               else if (engine->execlists.error_interrupt & ERROR_CSB)
+                       msg = "invalid CSB event";
+               else
+                       msg = "internal error";
+
                engine->execlists.error_interrupt = 0;
-               if (ENGINE_READ(engine, RING_ESR)) /* confirm the error */
-                       execlists_reset(engine, "CS error");
+               execlists_reset(engine, msg);
        }
 
        if (!READ_ONCE(engine->execlists.pending[0]) || timeout) {
@@ -3422,7 +3452,7 @@ __execlists_update_reg_state(const struct intel_context *ce,
        /* RPCS */
        if (engine->class == RENDER_CLASS) {
                regs[CTX_R_PWR_CLK_STATE] =
-                       intel_sseu_make_rpcs(engine->i915, &ce->sseu);
+                       intel_sseu_make_rpcs(engine->gt, &ce->sseu);
 
                i915_oa_init_reg_state(ce, engine);
        }
@@ -3880,7 +3910,6 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
        struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx,
                                            &wa_ctx->per_ctx };
        wa_bb_func_t wa_bb_fn[2];
-       struct page *page;
        void *batch, *batch_ptr;
        unsigned int i;
        int ret;
@@ -3916,14 +3945,14 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
                return ret;
        }
 
-       page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
-       batch = batch_ptr = kmap_atomic(page);
+       batch = i915_gem_object_pin_map(wa_ctx->vma->obj, I915_MAP_WB);
 
        /*
         * Emit the two workaround batch buffers, recording the offset from the
         * start of the workaround batch buffer object for each and their
         * respective sizes.
         */
+       batch_ptr = batch;
        for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
                wa_bb[i]->offset = batch_ptr - batch;
                if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
@@ -3935,10 +3964,10 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
                        batch_ptr = wa_bb_fn[i](engine, batch_ptr);
                wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
        }
+       GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE);
 
-       BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE);
-
-       kunmap_atomic(batch);
+       __i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch);
+       __i915_gem_object_release_map(wa_ctx->vma->obj);
        if (ret)
                lrc_destroy_wa_ctx(engine);
 
@@ -5403,13 +5432,8 @@ static void virtual_engine_initial_hint(struct virtual_engine *ve)
         * typically be the first we inspect for submission.
         */
        swp = prandom_u32_max(ve->num_siblings);
-       if (!swp)
-               return;
-
-       swap(ve->siblings[swp], ve->siblings[0]);
-       if (!intel_engine_has_relative_mmio(ve->siblings[0]))
-               virtual_update_register_offsets(ve->context.lrc_reg_state,
-                                               ve->siblings[0]);
+       if (swp)
+               swap(ve->siblings[swp], ve->siblings[0]);
 }
 
 static int virtual_context_alloc(struct intel_context *ce)
@@ -5422,15 +5446,9 @@ static int virtual_context_alloc(struct intel_context *ce)
 static int virtual_context_pin(struct intel_context *ce)
 {
        struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
-       int err;
 
        /* Note: we must use a real engine class for setting up reg state */
-       err = __execlists_context_pin(ce, ve->siblings[0]);
-       if (err)
-               return err;
-
-       virtual_engine_initial_hint(ve);
-       return 0;
+       return __execlists_context_pin(ce, ve->siblings[0]);
 }
 
 static void virtual_context_enter(struct intel_context *ce)
@@ -5695,6 +5713,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
        intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
        intel_engine_init_breadcrumbs(&ve->base);
        intel_engine_init_execlists(&ve->base);
+       ve->base.breadcrumbs.irq_armed = true; /* fake HW, used for irq_work */
 
        ve->base.cops = &virtual_context_ops;
        ve->base.request_alloc = execlists_request_alloc;
@@ -5776,6 +5795,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
 
        ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
 
+       virtual_engine_initial_hint(ve);
        return &ve->context;
 
 err_put:
index f86f7e6..f0862e9 100644 (file)
@@ -155,16 +155,16 @@ struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt)
        return ppgtt;
 }
 
-static int ppgtt_bind_vma(struct i915_vma *vma,
-                         enum i915_cache_level cache_level,
-                         u32 flags)
+int ppgtt_bind_vma(struct i915_address_space *vm,
+                  struct i915_vma *vma,
+                  enum i915_cache_level cache_level,
+                  u32 flags)
 {
        u32 pte_flags;
        int err;
 
-       if (flags & I915_VMA_ALLOC) {
-               err = vma->vm->allocate_va_range(vma->vm,
-                                                vma->node.start, vma->size);
+       if (!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
+               err = vm->allocate_va_range(vm, vma->node.start, vma->size);
                if (err)
                        return err;
 
@@ -176,17 +176,16 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
        if (i915_gem_object_is_readonly(vma->obj))
                pte_flags |= PTE_READ_ONLY;
 
-       GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)));
-       vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+       vm->insert_entries(vm, vma, cache_level, pte_flags);
        wmb();
 
        return 0;
 }
 
-static void ppgtt_unbind_vma(struct i915_vma *vma)
+void ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
 {
        if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
-               vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
+               vm->clear_range(vm, vma->node.start, vma->size);
 }
 
 int ppgtt_set_pages(struct i915_vma *vma)
index 6db2338..1bfad58 100644 (file)
@@ -150,7 +150,7 @@ static int render_state_setup(struct intel_renderstate *so,
        ret = 0;
 out:
        __i915_gem_object_flush_map(so->vma->obj, 0, i * sizeof(u32));
-       i915_gem_object_unpin_map(so->vma->obj);
+       __i915_gem_object_release_map(so->vma->obj);
        return ret;
 }
 
index 0156f1f..46a5cef 100644 (file)
@@ -342,7 +342,7 @@ static int gen6_reset_engines(struct intel_gt *gt,
 static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
 {
        struct intel_uncore *uncore = engine->uncore;
-       u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
+       u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
        i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
        u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
        i915_reg_t sfc_usage;
@@ -417,7 +417,7 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
 static void gen11_unlock_sfc(struct intel_engine_cs *engine)
 {
        struct intel_uncore *uncore = engine->uncore;
-       u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
+       u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
        i915_reg_t sfc_forced_lock;
        u32 sfc_forced_lock_bit;
 
@@ -880,7 +880,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
                return true;
 
        /* Never fully initialised, recovery impossible */
-       if (test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags))
+       if (intel_gt_has_unrecoverable_error(gt))
                return false;
 
        GT_TRACE(gt, "start\n");
@@ -930,7 +930,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
                 * Warn CI about the unrecoverable wedged condition.
                 * Time for a reboot.
                 */
-               add_taint_for_CI(TAINT_WARN);
+               add_taint_for_CI(gt->i915, TAINT_WARN);
                return false;
        }
 
@@ -1097,7 +1097,7 @@ taint:
         * rather than continue on into oblivion. For everyone else,
         * the system should still plod along, but they have been warned!
         */
-       add_taint_for_CI(TAINT_WARN);
+       add_taint_for_CI(gt->i915, TAINT_WARN);
 error:
        __intel_gt_set_wedged(gt);
        goto finish;
@@ -1246,7 +1246,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
         */
        wakeref = intel_runtime_pm_get(gt->uncore->rpm);
 
-       engine_mask &= INTEL_INFO(gt->i915)->engine_mask;
+       engine_mask &= gt->info.engine_mask;
 
        if (flags & I915_ERROR_CAPTURE) {
                i915_capture_error_state(gt->i915);
@@ -1342,7 +1342,7 @@ int intel_gt_terminally_wedged(struct intel_gt *gt)
        if (!intel_gt_is_wedged(gt))
                return 0;
 
-       if (intel_gt_has_init_error(gt))
+       if (intel_gt_has_unrecoverable_error(gt))
                return -EIO;
 
        /* Reset still in progress? Maybe we will recover? */
@@ -1360,6 +1360,15 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt)
                     I915_WEDGED_ON_INIT);
        intel_gt_set_wedged(gt);
        set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
+
+       /* Wedged on init is non-recoverable */
+       add_taint_for_CI(gt->i915, TAINT_WARN);
+}
+
+void intel_gt_set_wedged_on_fini(struct intel_gt *gt)
+{
+       intel_gt_set_wedged(gt);
+       set_bit(I915_WEDGED_ON_FINI, &gt->reset.flags);
 }
 
 void intel_gt_init_reset(struct intel_gt *gt)
index 8e8d5f7..a0eec7c 100644 (file)
@@ -47,8 +47,10 @@ int intel_gt_terminally_wedged(struct intel_gt *gt);
 /*
  * There's no unset_wedged_on_init paired with this one.
  * Once we're wedged on init, there's no going back.
+ * Same thing for unset_wedged_on_fini.
  */
 void intel_gt_set_wedged_on_init(struct intel_gt *gt);
+void intel_gt_set_wedged_on_fini(struct intel_gt *gt);
 
 int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask);
 
@@ -71,14 +73,6 @@ void __intel_fini_wedge(struct intel_wedge_me *w);
             (W)->gt;                                                   \
             __intel_fini_wedge((W)))
 
-static inline bool __intel_reset_failed(const struct intel_reset *reset)
-{
-       GEM_BUG_ON(test_bit(I915_WEDGED_ON_INIT, &reset->flags) ?
-                  !test_bit(I915_WEDGED, &reset->flags) : false);
-
-       return unlikely(test_bit(I915_WEDGED, &reset->flags));
-}
-
 bool intel_has_gpu_reset(const struct intel_gt *gt);
 bool intel_has_reset_engine(const struct intel_gt *gt);
 
index f43bc3a..add6b86 100644 (file)
@@ -34,12 +34,17 @@ struct intel_reset {
         * longer use the GPU - similar to #I915_WEDGED bit. The difference in
         * in the way we're handling "forced" unwedged (e.g. through debugfs),
         * which is not allowed in case we failed to initialize.
+        *
+        * #I915_WEDGED_ON_FINI - Similar to #I915_WEDGED_ON_INIT, except we
+        * use it to mark that the GPU is no longer available (and prevent
+        * users from using it).
         */
        unsigned long flags;
 #define I915_RESET_BACKOFF     0
 #define I915_RESET_MODESET     1
 #define I915_RESET_ENGINE      2
-#define I915_WEDGED_ON_INIT    (BITS_PER_LONG - 2)
+#define I915_WEDGED_ON_INIT    (BITS_PER_LONG - 3)
+#define I915_WEDGED_ON_FINI    (BITS_PER_LONG - 2)
 #define I915_WEDGED            (BITS_PER_LONG - 1)
 
        struct mutex mutex; /* serialises wedging/unwedging */
index 68a0848..94915f6 100644 (file)
@@ -543,7 +543,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
                           vaddr, engine->context_size);
 
                i915_gem_object_flush_map(obj);
-               i915_gem_object_unpin_map(obj);
+               __i915_gem_object_release_map(obj);
        }
 
        vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
@@ -649,7 +649,7 @@ static inline int mi_set_context(struct i915_request *rq,
        struct drm_i915_private *i915 = engine->i915;
        enum intel_engine_id id;
        const int num_engines =
-               IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
+               IS_HASWELL(i915) ? engine->gt->info.num_engines - 1 : 0;
        bool force_restore = false;
        int len;
        u32 *cs;
index 296391d..97ba14a 100644 (file)
@@ -1062,11 +1062,12 @@ static bool gen6_rps_enable(struct intel_rps *rps)
 static int chv_rps_max_freq(struct intel_rps *rps)
 {
        struct drm_i915_private *i915 = rps_to_i915(rps);
+       struct intel_gt *gt = rps_to_gt(rps);
        u32 val;
 
        val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
 
-       switch (RUNTIME_INFO(i915)->sseu.eu_total) {
+       switch (gt->info.sseu.eu_total) {
        case 8:
                /* (2 * 4) config */
                val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT;
index d173271..f1c039e 100644 (file)
@@ -60,10 +60,552 @@ intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice)
        return hweight32(intel_sseu_get_subslices(sseu, slice));
 }
 
-u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
+static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
+                      int subslice)
+{
+       int slice_stride = sseu->max_subslices * sseu->eu_stride;
+
+       return slice * slice_stride + subslice * sseu->eu_stride;
+}
+
+static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
+                       int subslice)
+{
+       int i, offset = sseu_eu_idx(sseu, slice, subslice);
+       u16 eu_mask = 0;
+
+       for (i = 0; i < sseu->eu_stride; i++)
+               eu_mask |=
+                       ((u16)sseu->eu_mask[offset + i]) << (i * BITS_PER_BYTE);
+
+       return eu_mask;
+}
+
+static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
+                        u16 eu_mask)
+{
+       int i, offset = sseu_eu_idx(sseu, slice, subslice);
+
+       for (i = 0; i < sseu->eu_stride; i++)
+               sseu->eu_mask[offset + i] =
+                       (eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
+}
+
+static u16 compute_eu_total(const struct sseu_dev_info *sseu)
+{
+       u16 i, total = 0;
+
+       for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++)
+               total += hweight8(sseu->eu_mask[i]);
+
+       return total;
+}
+
+static void gen11_compute_sseu_info(struct sseu_dev_info *sseu,
+                                   u8 s_en, u32 ss_en, u16 eu_en)
+{
+       int s, ss;
+
+       /* ss_en represents entire subslice mask across all slices */
+       GEM_BUG_ON(sseu->max_slices * sseu->max_subslices >
+                  sizeof(ss_en) * BITS_PER_BYTE);
+
+       for (s = 0; s < sseu->max_slices; s++) {
+               if ((s_en & BIT(s)) == 0)
+                       continue;
+
+               sseu->slice_mask |= BIT(s);
+
+               intel_sseu_set_subslices(sseu, s, ss_en);
+
+               for (ss = 0; ss < sseu->max_subslices; ss++)
+                       if (intel_sseu_has_subslice(sseu, s, ss))
+                               sseu_set_eus(sseu, s, ss, eu_en);
+       }
+       sseu->eu_per_subslice = hweight16(eu_en);
+       sseu->eu_total = compute_eu_total(sseu);
+}
+
+static void gen12_sseu_info_init(struct intel_gt *gt)
+{
+       struct sseu_dev_info *sseu = &gt->info.sseu;
+       struct intel_uncore *uncore = gt->uncore;
+       u32 dss_en;
+       u16 eu_en = 0;
+       u8 eu_en_fuse;
+       u8 s_en;
+       int eu;
+
+       /*
+        * Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS.
+        * Instead of splitting these, provide userspace with an array
+        * of DSS to more closely represent the hardware resource.
+        */
+       intel_sseu_set_info(sseu, 1, 6, 16);
+
+       s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
+               GEN11_GT_S_ENA_MASK;
+
+       dss_en = intel_uncore_read(uncore, GEN12_GT_DSS_ENABLE);
+
+       /* one bit per pair of EUs */
+       eu_en_fuse = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
+                      GEN11_EU_DIS_MASK);
+       for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
+               if (eu_en_fuse & BIT(eu))
+                       eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);
+
+       gen11_compute_sseu_info(sseu, s_en, dss_en, eu_en);
+
+       /* TGL only supports slice-level power gating */
+       sseu->has_slice_pg = 1;
+}
+
+static void gen11_sseu_info_init(struct intel_gt *gt)
+{
+       struct sseu_dev_info *sseu = &gt->info.sseu;
+       struct intel_uncore *uncore = gt->uncore;
+       u32 ss_en;
+       u8 eu_en;
+       u8 s_en;
+
+       if (IS_ELKHARTLAKE(gt->i915))
+               intel_sseu_set_info(sseu, 1, 4, 8);
+       else
+               intel_sseu_set_info(sseu, 1, 8, 8);
+
+       s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
+               GEN11_GT_S_ENA_MASK;
+       ss_en = ~intel_uncore_read(uncore, GEN11_GT_SUBSLICE_DISABLE);
+
+       eu_en = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
+                 GEN11_EU_DIS_MASK);
+
+       gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en);
+
+       /* ICL has no power gating restrictions. */
+       sseu->has_slice_pg = 1;
+       sseu->has_subslice_pg = 1;
+       sseu->has_eu_pg = 1;
+}
+
+static void gen10_sseu_info_init(struct intel_gt *gt)
+{
+       struct intel_uncore *uncore = gt->uncore;
+       struct sseu_dev_info *sseu = &gt->info.sseu;
+       const u32 fuse2 = intel_uncore_read(uncore, GEN8_FUSE2);
+       const int eu_mask = 0xff;
+       u32 subslice_mask, eu_en;
+       int s, ss;
+
+       intel_sseu_set_info(sseu, 6, 4, 8);
+
+       sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
+               GEN10_F2_S_ENA_SHIFT;
+
+       /* Slice0 */
+       eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE0);
+       for (ss = 0; ss < sseu->max_subslices; ss++)
+               sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask);
+       /* Slice1 */
+       sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask);
+       eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE1);
+       sseu_set_eus(sseu, 1, 1, eu_en & eu_mask);
+       /* Slice2 */
+       sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask);
+       sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask);
+       /* Slice3 */
+       sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask);
+       eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE2);
+       sseu_set_eus(sseu, 3, 1, eu_en & eu_mask);
+       /* Slice4 */
+       sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask);
+       sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask);
+       /* Slice5 */
+       sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask);
+       eu_en = ~intel_uncore_read(uncore, GEN10_EU_DISABLE3);
+       sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);
+
+       subslice_mask = (1 << 4) - 1;
+       subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
+                          GEN10_F2_SS_DIS_SHIFT);
+
+       for (s = 0; s < sseu->max_slices; s++) {
+               u32 subslice_mask_with_eus = subslice_mask;
+
+               for (ss = 0; ss < sseu->max_subslices; ss++) {
+                       if (sseu_get_eus(sseu, s, ss) == 0)
+                               subslice_mask_with_eus &= ~BIT(ss);
+               }
+
+               /*
+                * Slice0 can have up to 3 subslices, but there are only 2 in
+                * slice1/2.
+                */
+               intel_sseu_set_subslices(sseu, s, s == 0 ?
+                                        subslice_mask_with_eus :
+                                        subslice_mask_with_eus & 0x3);
+       }
+
+       sseu->eu_total = compute_eu_total(sseu);
+
+       /*
+        * CNL is expected to always have a uniform distribution
+        * of EU across subslices with the exception that any one
+        * EU in any one subslice may be fused off for die
+        * recovery.
+        */
+       sseu->eu_per_subslice =
+               intel_sseu_subslice_total(sseu) ?
+               DIV_ROUND_UP(sseu->eu_total, intel_sseu_subslice_total(sseu)) :
+               0;
+
+       /* No restrictions on Power Gating */
+       sseu->has_slice_pg = 1;
+       sseu->has_subslice_pg = 1;
+       sseu->has_eu_pg = 1;
+}
+
+static void cherryview_sseu_info_init(struct intel_gt *gt)
+{
+       struct sseu_dev_info *sseu = &gt->info.sseu;
+       u32 fuse;
+       u8 subslice_mask = 0;
+
+       fuse = intel_uncore_read(gt->uncore, CHV_FUSE_GT);
+
+       sseu->slice_mask = BIT(0);
+       intel_sseu_set_info(sseu, 1, 2, 8);
+
+       if (!(fuse & CHV_FGT_DISABLE_SS0)) {
+               u8 disabled_mask =
+                       ((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >>
+                        CHV_FGT_EU_DIS_SS0_R0_SHIFT) |
+                       (((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
+                         CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);
+
+               subslice_mask |= BIT(0);
+               sseu_set_eus(sseu, 0, 0, ~disabled_mask);
+       }
+
+       if (!(fuse & CHV_FGT_DISABLE_SS1)) {
+               u8 disabled_mask =
+                       ((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >>
+                        CHV_FGT_EU_DIS_SS1_R0_SHIFT) |
+                       (((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
+                         CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);
+
+               subslice_mask |= BIT(1);
+               sseu_set_eus(sseu, 0, 1, ~disabled_mask);
+       }
+
+       intel_sseu_set_subslices(sseu, 0, subslice_mask);
+
+       sseu->eu_total = compute_eu_total(sseu);
+
+       /*
+        * CHV expected to always have a uniform distribution of EU
+        * across subslices.
+        */
+       sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
+               sseu->eu_total /
+               intel_sseu_subslice_total(sseu) :
+               0;
+       /*
+        * CHV supports subslice power gating on devices with more than
+        * one subslice, and supports EU power gating on devices with
+        * more than one EU pair per subslice.
+        */
+       sseu->has_slice_pg = 0;
+       sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1;
+       sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
+}
+
+static void gen9_sseu_info_init(struct intel_gt *gt)
+{
+       struct drm_i915_private *i915 = gt->i915;
+       struct intel_device_info *info = mkwrite_device_info(i915);
+       struct sseu_dev_info *sseu = &gt->info.sseu;
+       struct intel_uncore *uncore = gt->uncore;
+       u32 fuse2, eu_disable, subslice_mask;
+       const u8 eu_mask = 0xff;
+       int s, ss;
+
+       fuse2 = intel_uncore_read(uncore, GEN8_FUSE2);
+       sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+
+       /* BXT has a single slice and at most 3 subslices. */
+       intel_sseu_set_info(sseu, IS_GEN9_LP(i915) ? 1 : 3,
+                           IS_GEN9_LP(i915) ? 3 : 4, 8);
+
+       /*
+        * The subslice disable field is global, i.e. it applies
+        * to each of the enabled slices.
+        */
+       subslice_mask = (1 << sseu->max_subslices) - 1;
+       subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
+                          GEN9_F2_SS_DIS_SHIFT);
+
+       /*
+        * Iterate through enabled slices and subslices to
+        * count the total enabled EU.
+        */
+       for (s = 0; s < sseu->max_slices; s++) {
+               if (!(sseu->slice_mask & BIT(s)))
+                       /* skip disabled slice */
+                       continue;
+
+               intel_sseu_set_subslices(sseu, s, subslice_mask);
+
+               eu_disable = intel_uncore_read(uncore, GEN9_EU_DISABLE(s));
+               for (ss = 0; ss < sseu->max_subslices; ss++) {
+                       int eu_per_ss;
+                       u8 eu_disabled_mask;
+
+                       if (!intel_sseu_has_subslice(sseu, s, ss))
+                               /* skip disabled subslice */
+                               continue;
+
+                       eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask;
+
+                       sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
+
+                       eu_per_ss = sseu->max_eus_per_subslice -
+                               hweight8(eu_disabled_mask);
+
+                       /*
+                        * Record which subslice(s) has(have) 7 EUs. we
+                        * can tune the hash used to spread work among
+                        * subslices if they are unbalanced.
+                        */
+                       if (eu_per_ss == 7)
+                               sseu->subslice_7eu[s] |= BIT(ss);
+               }
+       }
+
+       sseu->eu_total = compute_eu_total(sseu);
+
+       /*
+        * SKL is expected to always have a uniform distribution
+        * of EU across subslices with the exception that any one
+        * EU in any one subslice may be fused off for die
+        * recovery. BXT is expected to be perfectly uniform in EU
+        * distribution.
+        */
+       sseu->eu_per_subslice =
+               intel_sseu_subslice_total(sseu) ?
+               DIV_ROUND_UP(sseu->eu_total, intel_sseu_subslice_total(sseu)) :
+               0;
+
+       /*
+        * SKL+ supports slice power gating on devices with more than
+        * one slice, and supports EU power gating on devices with
+        * more than one EU pair per subslice. BXT+ supports subslice
+        * power gating on devices with more than one subslice, and
+        * supports EU power gating on devices with more than one EU
+        * pair per subslice.
+        */
+       sseu->has_slice_pg =
+               !IS_GEN9_LP(i915) && hweight8(sseu->slice_mask) > 1;
+       sseu->has_subslice_pg =
+               IS_GEN9_LP(i915) && intel_sseu_subslice_total(sseu) > 1;
+       sseu->has_eu_pg = sseu->eu_per_subslice > 2;
+
+       if (IS_GEN9_LP(i915)) {
+#define IS_SS_DISABLED(ss)     (!(sseu->subslice_mask[0] & BIT(ss)))
+               info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3;
+
+               sseu->min_eu_in_pool = 0;
+               if (info->has_pooled_eu) {
+                       if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
+                               sseu->min_eu_in_pool = 3;
+                       else if (IS_SS_DISABLED(1))
+                               sseu->min_eu_in_pool = 6;
+                       else
+                               sseu->min_eu_in_pool = 9;
+               }
+#undef IS_SS_DISABLED
+       }
+}
+
+static void bdw_sseu_info_init(struct intel_gt *gt)
+{
+       struct sseu_dev_info *sseu = &gt->info.sseu;
+       struct intel_uncore *uncore = gt->uncore;
+       int s, ss;
+       u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
+       u32 eu_disable0, eu_disable1, eu_disable2;
+
+       fuse2 = intel_uncore_read(uncore, GEN8_FUSE2);
+       sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+       intel_sseu_set_info(sseu, 3, 3, 8);
+
+       /*
+        * The subslice disable field is global, i.e. it applies
+        * to each of the enabled slices.
+        */
+       subslice_mask = GENMASK(sseu->max_subslices - 1, 0);
+       subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
+                          GEN8_F2_SS_DIS_SHIFT);
+       eu_disable0 = intel_uncore_read(uncore, GEN8_EU_DISABLE0);
+       eu_disable1 = intel_uncore_read(uncore, GEN8_EU_DISABLE1);
+       eu_disable2 = intel_uncore_read(uncore, GEN8_EU_DISABLE2);
+       eu_disable[0] = eu_disable0 & GEN8_EU_DIS0_S0_MASK;
+       eu_disable[1] = (eu_disable0 >> GEN8_EU_DIS0_S1_SHIFT) |
+               ((eu_disable1 & GEN8_EU_DIS1_S1_MASK) <<
+                (32 - GEN8_EU_DIS0_S1_SHIFT));
+       eu_disable[2] = (eu_disable1 >> GEN8_EU_DIS1_S2_SHIFT) |
+               ((eu_disable2 & GEN8_EU_DIS2_S2_MASK) <<
+                (32 - GEN8_EU_DIS1_S2_SHIFT));
+
+       /*
+        * Iterate through enabled slices and subslices to
+        * count the total enabled EU.
+        */
+       for (s = 0; s < sseu->max_slices; s++) {
+               if (!(sseu->slice_mask & BIT(s)))
+                       /* skip disabled slice */
+                       continue;
+
+               intel_sseu_set_subslices(sseu, s, subslice_mask);
+
+               for (ss = 0; ss < sseu->max_subslices; ss++) {
+                       u8 eu_disabled_mask;
+                       u32 n_disabled;
+
+                       if (!intel_sseu_has_subslice(sseu, s, ss))
+                               /* skip disabled subslice */
+                               continue;
+
+                       eu_disabled_mask =
+                               eu_disable[s] >> (ss * sseu->max_eus_per_subslice);
+
+                       sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
+
+                       n_disabled = hweight8(eu_disabled_mask);
+
+                       /*
+                        * Record which subslices have 7 EUs.
+                        */
+                       if (sseu->max_eus_per_subslice - n_disabled == 7)
+                               sseu->subslice_7eu[s] |= 1 << ss;
+               }
+       }
+
+       sseu->eu_total = compute_eu_total(sseu);
+
+       /*
+        * BDW is expected to always have a uniform distribution of EU across
+        * subslices with the exception that any one EU in any one subslice may
+        * be fused off for die recovery.
+        */
+       sseu->eu_per_subslice =
+               intel_sseu_subslice_total(sseu) ?
+               DIV_ROUND_UP(sseu->eu_total, intel_sseu_subslice_total(sseu)) :
+               0;
+
+       /*
+        * BDW supports slice power gating on devices with more than
+        * one slice.
+        */
+       sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
+       sseu->has_subslice_pg = 0;
+       sseu->has_eu_pg = 0;
+}
+
+static void hsw_sseu_info_init(struct intel_gt *gt)
+{
+       struct drm_i915_private *i915 = gt->i915;
+       struct sseu_dev_info *sseu = &gt->info.sseu;
+       u32 fuse1;
+       u8 subslice_mask = 0;
+       int s, ss;
+
+       /*
+        * There isn't a register to tell us how many slices/subslices. We
+        * work off the PCI-ids here.
+        */
+       switch (INTEL_INFO(i915)->gt) {
+       default:
+               MISSING_CASE(INTEL_INFO(i915)->gt);
+               fallthrough;
+       case 1:
+               sseu->slice_mask = BIT(0);
+               subslice_mask = BIT(0);
+               break;
+       case 2:
+               sseu->slice_mask = BIT(0);
+               subslice_mask = BIT(0) | BIT(1);
+               break;
+       case 3:
+               sseu->slice_mask = BIT(0) | BIT(1);
+               subslice_mask = BIT(0) | BIT(1);
+               break;
+       }
+
+       fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1);
+       switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
+       default:
+               MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >>
+                            HSW_F1_EU_DIS_SHIFT);
+               fallthrough;
+       case HSW_F1_EU_DIS_10EUS:
+               sseu->eu_per_subslice = 10;
+               break;
+       case HSW_F1_EU_DIS_8EUS:
+               sseu->eu_per_subslice = 8;
+               break;
+       case HSW_F1_EU_DIS_6EUS:
+               sseu->eu_per_subslice = 6;
+               break;
+       }
+
+       intel_sseu_set_info(sseu, hweight8(sseu->slice_mask),
+                           hweight8(subslice_mask),
+                           sseu->eu_per_subslice);
+
+       for (s = 0; s < sseu->max_slices; s++) {
+               intel_sseu_set_subslices(sseu, s, subslice_mask);
+
+               for (ss = 0; ss < sseu->max_subslices; ss++) {
+                       sseu_set_eus(sseu, s, ss,
+                                    (1UL << sseu->eu_per_subslice) - 1);
+               }
+       }
+
+       sseu->eu_total = compute_eu_total(sseu);
+
+       /* No powergating for you. */
+       sseu->has_slice_pg = 0;
+       sseu->has_subslice_pg = 0;
+       sseu->has_eu_pg = 0;
+}
+
+void intel_sseu_info_init(struct intel_gt *gt)
+{
+       struct drm_i915_private *i915 = gt->i915;
+
+       if (IS_HASWELL(i915))
+               hsw_sseu_info_init(gt);
+       else if (IS_CHERRYVIEW(i915))
+               cherryview_sseu_info_init(gt);
+       else if (IS_BROADWELL(i915))
+               bdw_sseu_info_init(gt);
+       else if (IS_GEN(i915, 9))
+               gen9_sseu_info_init(gt);
+       else if (IS_GEN(i915, 10))
+               gen10_sseu_info_init(gt);
+       else if (IS_GEN(i915, 11))
+               gen11_sseu_info_init(gt);
+       else if (INTEL_GEN(i915) >= 12)
+               gen12_sseu_info_init(gt);
+}
+
+u32 intel_sseu_make_rpcs(struct intel_gt *gt,
                         const struct intel_sseu *req_sseu)
 {
-       const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
+       struct drm_i915_private *i915 = gt->i915;
+       const struct sseu_dev_info *sseu = &gt->info.sseu;
        bool subslice_pg = sseu->has_subslice_pg;
        u8 slices, subslices;
        u32 rpcs = 0;
@@ -173,3 +715,48 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
 
        return rpcs;
 }
+
+void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
+{
+       int s;
+
+       drm_printf(p, "slice total: %u, mask=%04x\n",
+                  hweight8(sseu->slice_mask), sseu->slice_mask);
+       drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
+       for (s = 0; s < sseu->max_slices; s++) {
+               drm_printf(p, "slice%d: %u subslices, mask=%08x\n",
+                          s, intel_sseu_subslices_per_slice(sseu, s),
+                          intel_sseu_get_subslices(sseu, s));
+       }
+       drm_printf(p, "EU total: %u\n", sseu->eu_total);
+       drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
+       drm_printf(p, "has slice power gating: %s\n",
+                  yesno(sseu->has_slice_pg));
+       drm_printf(p, "has subslice power gating: %s\n",
+                  yesno(sseu->has_subslice_pg));
+       drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
+}
+
+void intel_sseu_print_topology(const struct sseu_dev_info *sseu,
+                              struct drm_printer *p)
+{
+       int s, ss;
+
+       if (sseu->max_slices == 0) {
+               drm_printf(p, "Unavailable\n");
+               return;
+       }
+
+       for (s = 0; s < sseu->max_slices; s++) {
+               drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n",
+                          s, intel_sseu_subslices_per_slice(sseu, s),
+                          intel_sseu_get_subslices(sseu, s));
+
+               for (ss = 0; ss < sseu->max_subslices; ss++) {
+                       u16 enabled_eus = sseu_get_eus(sseu, s, ss);
+
+                       drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n",
+                                  ss, hweight16(enabled_eus), enabled_eus);
+               }
+       }
+}
index d1d2252..23ba6c2 100644 (file)
@@ -13,6 +13,8 @@
 #include "i915_gem.h"
 
 struct drm_i915_private;
+struct intel_gt;
+struct drm_printer;
 
 #define GEN_MAX_SLICES         (6) /* CNL upper bound */
 #define GEN_MAX_SUBSLICES      (8) /* ICL upper bound */
@@ -94,7 +96,13 @@ u32  intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice);
 void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
                              u32 ss_mask);
 
-u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
+void intel_sseu_info_init(struct intel_gt *gt);
+
+u32 intel_sseu_make_rpcs(struct intel_gt *gt,
                         const struct intel_sseu *req_sseu);
 
+void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p);
+void intel_sseu_print_topology(const struct sseu_dev_info *sseu,
+                              struct drm_printer *p);
+
 #endif /* __INTEL_SSEU_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
new file mode 100644 (file)
index 0000000..5178028
--- /dev/null
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright Â© 2020 Intel Corporation
+ */
+
+#include "debugfs_gt.h"
+#include "intel_sseu_debugfs.h"
+#include "i915_drv.h"
+
+static void sseu_copy_subslices(const struct sseu_dev_info *sseu,
+                               int slice, u8 *to_mask)
+{
+       int offset = slice * sseu->ss_stride;
+
+       memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
+}
+
+static void cherryview_sseu_device_status(struct intel_gt *gt,
+                                         struct sseu_dev_info *sseu)
+{
+#define SS_MAX 2
+       struct intel_uncore *uncore = gt->uncore;
+       const int ss_max = SS_MAX;
+       u32 sig1[SS_MAX], sig2[SS_MAX];
+       int ss;
+
+       sig1[0] = intel_uncore_read(uncore, CHV_POWER_SS0_SIG1);
+       sig1[1] = intel_uncore_read(uncore, CHV_POWER_SS1_SIG1);
+       sig2[0] = intel_uncore_read(uncore, CHV_POWER_SS0_SIG2);
+       sig2[1] = intel_uncore_read(uncore, CHV_POWER_SS1_SIG2);
+
+       for (ss = 0; ss < ss_max; ss++) {
+               unsigned int eu_cnt;
+
+               if (sig1[ss] & CHV_SS_PG_ENABLE)
+                       /* skip disabled subslice */
+                       continue;
+
+               sseu->slice_mask = BIT(0);
+               sseu->subslice_mask[0] |= BIT(ss);
+               eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
+                        ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
+                        ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
+                        ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
+               sseu->eu_total += eu_cnt;
+               sseu->eu_per_subslice = max_t(unsigned int,
+                                             sseu->eu_per_subslice, eu_cnt);
+       }
+#undef SS_MAX
+}
+
+static void gen10_sseu_device_status(struct intel_gt *gt,
+                                    struct sseu_dev_info *sseu)
+{
+#define SS_MAX 6
+       struct intel_uncore *uncore = gt->uncore;
+       const struct intel_gt_info *info = &gt->info;
+       u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
+       int s, ss;
+
+       for (s = 0; s < info->sseu.max_slices; s++) {
+               /*
+                * FIXME: Valid SS Mask respects the spec and read
+                * only valid bits for those registers, excluding reserved
+                * although this seems wrong because it would leave many
+                * subslices without ACK.
+                */
+               s_reg[s] = intel_uncore_read(uncore, GEN10_SLICE_PGCTL_ACK(s)) &
+                       GEN10_PGCTL_VALID_SS_MASK(s);
+               eu_reg[2 * s] = intel_uncore_read(uncore,
+                                                 GEN10_SS01_EU_PGCTL_ACK(s));
+               eu_reg[2 * s + 1] = intel_uncore_read(uncore,
+                                                     GEN10_SS23_EU_PGCTL_ACK(s));
+       }
+
+       eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
+                    GEN9_PGCTL_SSA_EU19_ACK |
+                    GEN9_PGCTL_SSA_EU210_ACK |
+                    GEN9_PGCTL_SSA_EU311_ACK;
+       eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
+                    GEN9_PGCTL_SSB_EU19_ACK |
+                    GEN9_PGCTL_SSB_EU210_ACK |
+                    GEN9_PGCTL_SSB_EU311_ACK;
+
+       for (s = 0; s < info->sseu.max_slices; s++) {
+               if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
+                       /* skip disabled slice */
+                       continue;
+
+               sseu->slice_mask |= BIT(s);
+               sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
+
+               for (ss = 0; ss < info->sseu.max_subslices; ss++) {
+                       unsigned int eu_cnt;
+
+                       if (info->sseu.has_subslice_pg &&
+                           !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
+                               /* skip disabled subslice */
+                               continue;
+
+                       eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
+                                              eu_mask[ss % 2]);
+                       sseu->eu_total += eu_cnt;
+                       sseu->eu_per_subslice = max_t(unsigned int,
+                                                     sseu->eu_per_subslice,
+                                                     eu_cnt);
+               }
+       }
+#undef SS_MAX
+}
+
+static void gen9_sseu_device_status(struct intel_gt *gt,
+                                   struct sseu_dev_info *sseu)
+{
+#define SS_MAX 3
+       struct intel_uncore *uncore = gt->uncore;
+       const struct intel_gt_info *info = &gt->info;
+       u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
+       int s, ss;
+
+       for (s = 0; s < info->sseu.max_slices; s++) {
+               s_reg[s] = intel_uncore_read(uncore, GEN9_SLICE_PGCTL_ACK(s));
+               eu_reg[2 * s] =
+                       intel_uncore_read(uncore, GEN9_SS01_EU_PGCTL_ACK(s));
+               eu_reg[2 * s + 1] =
+                       intel_uncore_read(uncore, GEN9_SS23_EU_PGCTL_ACK(s));
+       }
+
+       eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
+                    GEN9_PGCTL_SSA_EU19_ACK |
+                    GEN9_PGCTL_SSA_EU210_ACK |
+                    GEN9_PGCTL_SSA_EU311_ACK;
+       eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
+                    GEN9_PGCTL_SSB_EU19_ACK |
+                    GEN9_PGCTL_SSB_EU210_ACK |
+                    GEN9_PGCTL_SSB_EU311_ACK;
+
+       for (s = 0; s < info->sseu.max_slices; s++) {
+               if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
+                       /* skip disabled slice */
+                       continue;
+
+               sseu->slice_mask |= BIT(s);
+
+               if (IS_GEN9_BC(gt->i915))
+                       sseu_copy_subslices(&info->sseu, s,
+                                           sseu->subslice_mask);
+
+               for (ss = 0; ss < info->sseu.max_subslices; ss++) {
+                       unsigned int eu_cnt;
+                       u8 ss_idx = s * info->sseu.ss_stride +
+                                   ss / BITS_PER_BYTE;
+
+                       if (IS_GEN9_LP(gt->i915)) {
+                               if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
+                                       /* skip disabled subslice */
+                                       continue;
+
+                               sseu->subslice_mask[ss_idx] |=
+                                       BIT(ss % BITS_PER_BYTE);
+                       }
+
+                       eu_cnt = eu_reg[2 * s + ss / 2] & eu_mask[ss % 2];
+                       eu_cnt = 2 * hweight32(eu_cnt);
+
+                       sseu->eu_total += eu_cnt;
+                       sseu->eu_per_subslice = max_t(unsigned int,
+                                                     sseu->eu_per_subslice,
+                                                     eu_cnt);
+               }
+       }
+#undef SS_MAX
+}
+
+static void bdw_sseu_device_status(struct intel_gt *gt,
+                                  struct sseu_dev_info *sseu)
+{
+       const struct intel_gt_info *info = &gt->info;
+       u32 slice_info = intel_uncore_read(gt->uncore, GEN8_GT_SLICE_INFO);
+       int s;
+
+       sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
+
+       if (sseu->slice_mask) {
+               sseu->eu_per_subslice = info->sseu.eu_per_subslice;
+               for (s = 0; s < fls(sseu->slice_mask); s++)
+                       sseu_copy_subslices(&info->sseu, s,
+                                           sseu->subslice_mask);
+               sseu->eu_total = sseu->eu_per_subslice *
+                                intel_sseu_subslice_total(sseu);
+
+               /* subtract fused off EU(s) from enabled slice(s) */
+               for (s = 0; s < fls(sseu->slice_mask); s++) {
+                       u8 subslice_7eu = info->sseu.subslice_7eu[s];
+
+                       sseu->eu_total -= hweight8(subslice_7eu);
+               }
+       }
+}
+
+static void i915_print_sseu_info(struct seq_file *m,
+                                bool is_available_info,
+                                bool has_pooled_eu,
+                                const struct sseu_dev_info *sseu)
+{
+       const char *type = is_available_info ? "Available" : "Enabled";
+       int s;
+
+       seq_printf(m, "  %s Slice Mask: %04x\n", type,
+                  sseu->slice_mask);
+       seq_printf(m, "  %s Slice Total: %u\n", type,
+                  hweight8(sseu->slice_mask));
+       seq_printf(m, "  %s Subslice Total: %u\n", type,
+                  intel_sseu_subslice_total(sseu));
+       for (s = 0; s < fls(sseu->slice_mask); s++) {
+               seq_printf(m, "  %s Slice%i subslices: %u\n", type,
+                          s, intel_sseu_subslices_per_slice(sseu, s));
+       }
+       seq_printf(m, "  %s EU Total: %u\n", type,
+                  sseu->eu_total);
+       seq_printf(m, "  %s EU Per Subslice: %u\n", type,
+                  sseu->eu_per_subslice);
+
+       if (!is_available_info)
+               return;
+
+       seq_printf(m, "  Has Pooled EU: %s\n", yesno(has_pooled_eu));
+       if (has_pooled_eu)
+               seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
+
+       seq_printf(m, "  Has Slice Power Gating: %s\n",
+                  yesno(sseu->has_slice_pg));
+       seq_printf(m, "  Has Subslice Power Gating: %s\n",
+                  yesno(sseu->has_subslice_pg));
+       seq_printf(m, "  Has EU Power Gating: %s\n",
+                  yesno(sseu->has_eu_pg));
+}
+
+/*
+ * this is called from top-level debugfs as well, so we can't get the gt from
+ * the seq_file.
+ */
+int intel_sseu_status(struct seq_file *m, struct intel_gt *gt)
+{
+       struct drm_i915_private *i915 = gt->i915;
+       const struct intel_gt_info *info = &gt->info;
+       struct sseu_dev_info sseu;
+       intel_wakeref_t wakeref;
+
+       if (INTEL_GEN(i915) < 8)
+               return -ENODEV;
+
+       seq_puts(m, "SSEU Device Info\n");
+       i915_print_sseu_info(m, true, HAS_POOLED_EU(i915), &info->sseu);
+
+       seq_puts(m, "SSEU Device Status\n");
+       memset(&sseu, 0, sizeof(sseu));
+       intel_sseu_set_info(&sseu, info->sseu.max_slices,
+                           info->sseu.max_subslices,
+                           info->sseu.max_eus_per_subslice);
+
+       with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+               if (IS_CHERRYVIEW(i915))
+                       cherryview_sseu_device_status(gt, &sseu);
+               else if (IS_BROADWELL(i915))
+                       bdw_sseu_device_status(gt, &sseu);
+               else if (IS_GEN(i915, 9))
+                       gen9_sseu_device_status(gt, &sseu);
+               else if (INTEL_GEN(i915) >= 10)
+                       gen10_sseu_device_status(gt, &sseu);
+       }
+
+       i915_print_sseu_info(m, false, HAS_POOLED_EU(i915), &sseu);
+
+       return 0;
+}
+
+static int sseu_status_show(struct seq_file *m, void *unused)
+{
+       struct intel_gt *gt = m->private;
+
+       return intel_sseu_status(m, gt);
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(sseu_status);
+
+static int rcs_topology_show(struct seq_file *m, void *unused)
+{
+       struct intel_gt *gt = m->private;
+       struct drm_printer p = drm_seq_file_printer(m);
+
+       intel_sseu_print_topology(&gt->info.sseu, &p);
+
+       return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(rcs_topology);
+
+void intel_sseu_debugfs_register(struct intel_gt *gt, struct dentry *root)
+{
+       static const struct debugfs_gt_file files[] = {
+               { "sseu_status", &sseu_status_fops, NULL },
+               { "rcs_topology", &rcs_topology_fops, NULL },
+       };
+
+       intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.h b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.h
new file mode 100644 (file)
index 0000000..73f0015
--- /dev/null
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright Â© 2020 Intel Corporation
+ */
+
+#ifndef INTEL_SSEU_DEBUGFS_H
+#define INTEL_SSEU_DEBUGFS_H
+
+struct intel_gt;
+struct dentry;
+struct seq_file;
+
+int intel_sseu_status(struct seq_file *m, struct intel_gt *gt);
+void intel_sseu_debugfs_register(struct intel_gt *gt, struct dentry *root);
+
+#endif /* INTEL_SSEU_DEBUGFS_H */
index 4546284..46d20f5 100644 (file)
@@ -73,6 +73,8 @@ hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline)
                        return vma;
                }
 
+               GT_TRACE(timeline->gt, "new HWSP allocated\n");
+
                vma->private = hwsp;
                hwsp->gt = timeline->gt;
                hwsp->vma = vma;
@@ -327,6 +329,8 @@ int intel_timeline_pin(struct intel_timeline *tl)
        tl->hwsp_offset =
                i915_ggtt_offset(tl->hwsp_ggtt) +
                offset_in_page(tl->hwsp_offset);
+       GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
+                tl->fence_context, tl->hwsp_offset);
 
        cacheline_acquire(tl->hwsp_cacheline);
        if (atomic_fetch_inc(&tl->pin_count)) {
@@ -434,6 +438,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
        int err;
 
        might_lock(&tl->gt->ggtt->vm.mutex);
+       GT_TRACE(tl->gt, "timeline:%llx wrapped\n", tl->fence_context);
 
        /*
         * If there is an outstanding GPU reference to this cacheline,
@@ -497,6 +502,8 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
                memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES);
 
        tl->hwsp_offset += i915_ggtt_offset(vma);
+       GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
+                tl->fence_context, tl->hwsp_offset);
 
        cacheline_acquire(cl);
        tl->hwsp_cacheline = cl;
index 2da3668..5726cd0 100644 (file)
@@ -404,7 +404,7 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
 static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
                                struct i915_wa_list *wal)
 {
-       struct drm_i915_private *i915 = engine->i915;
+       struct intel_gt *gt = engine->gt;
        u8 vals[3] = { 0, 0, 0 };
        unsigned int i;
 
@@ -415,7 +415,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
                 * Only consider slices where one, and only one, subslice has 7
                 * EUs
                 */
-               if (!is_power_of_2(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]))
+               if (!is_power_of_2(gt->info.sseu.subslice_7eu[i]))
                        continue;
 
                /*
@@ -424,7 +424,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
                 *
                 * ->    0 <= ss <= 3;
                 */
-               ss = ffs(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]) - 1;
+               ss = ffs(gt->info.sseu.subslice_7eu[i]) - 1;
                vals[i] = 3 - ss;
        }
 
@@ -1036,7 +1036,7 @@ cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 static void
 wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-       const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
+       const struct sseu_dev_info *sseu = &i915->gt.info.sseu;
        unsigned int slice, subslice;
        u32 l3_en, mcr, mcr_mask;
 
@@ -1649,11 +1649,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
                            GEN7_SARCHKMD,
                            GEN7_DISABLE_SAMPLER_PREFETCH);
 
-               /* Wa_1407928979:tgl */
-               wa_write_or(wal,
-                           GEN7_FF_THREAD_MODE,
-                           GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
-
                /* Wa_1408615072:tgl */
                wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
                            VSUNIT_CLKGATE_DIS_TGL);
@@ -1677,6 +1672,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
                 * Wa_14010229206:tgl
                 */
                wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
+
+               /*
+                * Wa_1407928979:tgl A*
+                * Wa_18011464164:tgl B0+
+                * Wa_22010931296:tgl B0+
+                */
+               wa_write_or(wal, GEN7_FF_THREAD_MODE,
+                           GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
        }
 
        if (IS_GEN(i915, 11)) {
index daa4aab..3fc5de9 100644 (file)
@@ -963,7 +963,7 @@ slice_semaphore_queue(struct intel_engine_cs *outer,
                goto out;
 
        if (i915_request_wait(head, 0,
-                             2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) {
+                             2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) {
                pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n",
                       count, n);
                GEM_TRACE_DUMP();
@@ -3569,8 +3569,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
        }
 
        pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
-               count, flags,
-               RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext);
+               count, flags, smoke->gt->info.num_engines, smoke->ncontext);
        return 0;
 }
 
@@ -3597,8 +3596,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
        } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
 
        pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
-               count, flags,
-               RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext);
+               count, flags, smoke->gt->info.num_engines, smoke->ncontext);
        return 0;
 }
 
index 3c84348..64ef5ee 100644 (file)
@@ -233,7 +233,7 @@ int live_rc6_ctx_wa(void *arg)
                            i915_reset_engine_count(error, engine)) {
                                pr_err("%s: GPU reset required\n",
                                       engine->name);
-                               add_taint_for_CI(TAINT_WARN);
+                               add_taint_for_CI(gt->i915, TAINT_WARN);
                                err = -EIO;
                                goto out;
                        }
index bb753f0..8624f5d 100644 (file)
@@ -29,9 +29,9 @@ static int cmp_u64(const void *A, const void *B)
 {
        const u64 *a = A, *b = B;
 
-       if (a < b)
+       if (*a < *b)
                return -1;
-       else if (a > b)
+       else if (*a > *b)
                return 1;
        else
                return 0;
@@ -41,9 +41,9 @@ static int cmp_u32(const void *A, const void *B)
 {
        const u32 *a = A, *b = B;
 
-       if (a < b)
+       if (*a < *b)
                return -1;
-       else if (a > b)
+       else if (*a > *b)
                return 1;
        else
                return 0;
index fcdee95..fb5b7d3 100644 (file)
@@ -562,8 +562,9 @@ static int live_hwsp_engine(void *arg)
                struct intel_timeline *tl = timelines[n];
 
                if (!err && *tl->hwsp_seqno != n) {
-                       pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
-                              n, *tl->hwsp_seqno);
+                       pr_err("Invalid seqno stored in timeline %lu @ %x, found 0x%x\n",
+                              n, tl->hwsp_offset, *tl->hwsp_seqno);
+                       GEM_TRACE_DUMP();
                        err = -EINVAL;
                }
                intel_timeline_put(tl);
@@ -633,8 +634,9 @@ out:
                struct intel_timeline *tl = timelines[n];
 
                if (!err && *tl->hwsp_seqno != n) {
-                       pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
-                              n, *tl->hwsp_seqno);
+                       pr_err("Invalid seqno stored in timeline %lu @ %x, found 0x%x\n",
+                              n, tl->hwsp_offset, *tl->hwsp_seqno);
+                       GEM_TRACE_DUMP();
                        err = -EINVAL;
                }
                intel_timeline_put(tl);
@@ -965,8 +967,9 @@ static int live_hwsp_recycle(void *arg)
                        }
 
                        if (*tl->hwsp_seqno != count) {
-                               pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
+                               pr_err("Invalid seqno stored in timeline %lu @ tl->hwsp_offset, found 0x%x\n",
                                       count, *tl->hwsp_seqno);
+                               GEM_TRACE_DUMP();
                                err = -EINVAL;
                        }
 
index 1017280..d440610 100644 (file)
@@ -67,7 +67,7 @@ struct __guc_ads_blob {
 
 static void __guc_ads_init(struct intel_guc *guc)
 {
-       struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
+       struct intel_gt *gt = guc_to_gt(guc);
        struct __guc_ads_blob *blob = guc->ads_blob;
        const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
        u32 base;
@@ -99,13 +99,13 @@ static void __guc_ads_init(struct intel_guc *guc)
        }
 
        /* System info */
-       blob->system_info.slice_enabled = hweight8(RUNTIME_INFO(dev_priv)->sseu.slice_mask);
+       blob->system_info.slice_enabled = hweight8(gt->info.sseu.slice_mask);
        blob->system_info.rcs_enabled = 1;
        blob->system_info.bcs_enabled = 1;
 
-       blob->system_info.vdbox_enable_mask = VDBOX_MASK(dev_priv);
-       blob->system_info.vebox_enable_mask = VEBOX_MASK(dev_priv);
-       blob->system_info.vdbox_sfc_support_mask = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
+       blob->system_info.vdbox_enable_mask = VDBOX_MASK(gt);
+       blob->system_info.vebox_enable_mask = VEBOX_MASK(gt);
+       blob->system_info.vdbox_sfc_support_mask = gt->info.vdbox_sfc_access;
 
        base = intel_guc_ggtt_offset(guc, guc->ads_vma);
 
index 1c2d635..d6f55f7 100644 (file)
@@ -267,8 +267,17 @@ static void __uc_fetch_firmwares(struct intel_uc *uc)
        GEM_BUG_ON(!intel_uc_wants_guc(uc));
 
        err = intel_uc_fw_fetch(&uc->guc.fw);
-       if (err)
+       if (err) {
+               /* Make sure we transition out of transient "SELECTED" state */
+               if (intel_uc_wants_huc(uc)) {
+                       drm_dbg(&uc_to_gt(uc)->i915->drm,
+                               "Failed to fetch GuC: %d disabling HuC\n", err);
+                       intel_uc_fw_change_status(&uc->huc.fw,
+                                                 INTEL_UC_FIRMWARE_ERROR);
+               }
+
                return;
+       }
 
        if (intel_uc_wants_huc(uc))
                intel_uc_fw_fetch(&uc->huc.fw);
index 9d16b78..089d986 100644 (file)
@@ -4,14 +4,41 @@
  */
 
 #include <linux/debugfs.h>
+#include <drm/drm_print.h>
 
+#include "gt/debugfs_gt.h"
 #include "intel_guc_debugfs.h"
 #include "intel_huc_debugfs.h"
 #include "intel_uc.h"
 #include "intel_uc_debugfs.h"
 
+static int uc_usage_show(struct seq_file *m, void *data)
+{
+       struct intel_uc *uc = m->private;
+       struct drm_printer p = drm_seq_file_printer(m);
+
+       drm_printf(&p, "[guc] supported:%s wanted:%s used:%s\n",
+                  yesno(intel_uc_supports_guc(uc)),
+                  yesno(intel_uc_wants_guc(uc)),
+                  yesno(intel_uc_uses_guc(uc)));
+       drm_printf(&p, "[huc] supported:%s wanted:%s used:%s\n",
+                  yesno(intel_uc_supports_huc(uc)),
+                  yesno(intel_uc_wants_huc(uc)),
+                  yesno(intel_uc_uses_huc(uc)));
+       drm_printf(&p, "[submission] supported:%s wanted:%s used:%s\n",
+                  yesno(intel_uc_supports_guc_submission(uc)),
+                  yesno(intel_uc_wants_guc_submission(uc)),
+                  yesno(intel_uc_uses_guc_submission(uc)));
+
+       return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(uc_usage);
+
 void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root)
 {
+       static const struct debugfs_gt_file files[] = {
+               { "usage", &uc_usage_fops, NULL },
+       };
        struct dentry *root;
 
        if (!gt_root)
@@ -25,6 +52,8 @@ void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root)
        if (IS_ERR(root))
                return;
 
+       intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), uc);
+
        intel_guc_debugfs_register(&uc->guc, root);
        intel_huc_debugfs_register(&uc->huc, root);
 }
index ec47d41..62e6a14 100644 (file)
@@ -66,7 +66,7 @@ static inline int mmio_diff_handler(struct intel_gvt *gvt,
        vreg = vgpu_vreg(param->vgpu, offset);
 
        if (preg != vreg) {
-               node = kmalloc(sizeof(*node), GFP_KERNEL);
+               node = kmalloc(sizeof(*node), GFP_ATOMIC);
                if (!node)
                        return -ENOMEM;
 
index 26cae48..63bba7b 100644 (file)
@@ -347,7 +347,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                        gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
                        vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
                }
-               engine_mask &= INTEL_INFO(vgpu->gvt->gt->i915)->engine_mask;
+               engine_mask &= vgpu->gvt->gt->info.engine_mask;
        }
 
        /* vgpu_lock already hold by emulate mmio r/w */
@@ -1729,14 +1729,14 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
        write_vreg(vgpu, offset, p_data, bytes);
 
-       if (data & _MASKED_BIT_ENABLE(1)) {
+       if (IS_MASKED_BITS_ENABLED(data, 1)) {
                enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
                return 0;
        }
 
        if ((IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
             IS_COMETLAKE(vgpu->gvt->gt->i915)) &&
-           data & _MASKED_BIT_ENABLE(2)) {
+           IS_MASKED_BITS_ENABLED(data, 2)) {
                enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
                return 0;
        }
@@ -1745,14 +1745,14 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
         * pvinfo, if not, we will treat this guest as non-gvtg-aware
         * guest, and stop emulating its cfg space, mmio, gtt, etc.
         */
-       if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) ||
-                       (data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)))
-                       && !vgpu->pv_notified) {
+       if ((IS_MASKED_BITS_ENABLED(data, GFX_PPGTT_ENABLE) ||
+           IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE)) &&
+           !vgpu->pv_notified) {
                enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
                return 0;
        }
-       if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
-                       || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
+       if (IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE) ||
+           IS_MASKED_BITS_DISABLED(data, GFX_RUN_LIST_ENABLE)) {
                enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
 
                gvt_dbg_core("EXECLIST %s on ring %s\n",
@@ -1813,7 +1813,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
        write_vreg(vgpu, offset, p_data, bytes);
        data = vgpu_vreg(vgpu, offset);
 
-       if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET))
+       if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET))
                data |= RESET_CTL_READY_TO_RESET;
        else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
                data &= ~RESET_CTL_READY_TO_RESET;
@@ -1831,7 +1831,8 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
        (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
        write_vreg(vgpu, offset, p_data, bytes);
 
-       if (data & _MASKED_BIT_ENABLE(0x10) || data & _MASKED_BIT_ENABLE(0x8))
+       if (IS_MASKED_BITS_ENABLED(data, 0x10) ||
+           IS_MASKED_BITS_ENABLED(data, 0x8))
                enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
 
        return 0;
@@ -1867,7 +1868,7 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
        MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
        MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
        MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
-       if (HAS_ENGINE(dev_priv, VCS1)) \
+       if (HAS_ENGINE(gvt->gt, VCS1)) \
                MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
 } while (0)
 
@@ -3059,6 +3060,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
        MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
        MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
+       MMIO_D(_MMIO(_PLANE_SURF_3_B), D_SKL_PLUS);
 
        MMIO_D(CSR_SSP_BASE, D_SKL_PLUS);
        MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
@@ -3135,8 +3137,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
                 NULL, NULL);
 
-       MMIO_D(GAMT_CHKN_BIT_REG, D_KBL);
-       MMIO_D(GEN9_CTX_PREEMPT_REG, D_KBL | D_SKL);
+       MMIO_D(GAMT_CHKN_BIT_REG, D_KBL | D_CFL);
+       MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS);
 
        return 0;
 }
index 540017f..7498878 100644 (file)
@@ -540,7 +540,7 @@ static void gen8_init_irq(
        SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
        SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
 
-       if (HAS_ENGINE(gvt->gt->i915, VCS1)) {
+       if (HAS_ENGINE(gvt->gt, VCS1)) {
                SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
                        INTEL_GVT_IRQ_INFO_GT1);
                SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
index 2ccaf78..86a60bd 100644 (file)
@@ -171,7 +171,7 @@ static void load_render_mocs(const struct intel_engine_cs *engine)
                return;
 
        for (ring_id = 0; ring_id < cnt; ring_id++) {
-               if (!HAS_ENGINE(engine->i915, ring_id))
+               if (!HAS_ENGINE(engine->gt, ring_id))
                        continue;
 
                offset.reg = regs[ring_id];
index 970704b..3b25e7f 100644 (file)
@@ -54,8 +54,8 @@ bool is_inhibit_context(struct intel_context *ce);
 
 int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
                                       struct i915_request *req);
-#define IS_RESTORE_INHIBIT(a)  \
-       (_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) == \
-       ((a) & _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)))
+
+#define IS_RESTORE_INHIBIT(a) \
+       IS_MASKED_BITS_ENABLED(a, CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)
 
 #endif
index 5b66e14..b88e033 100644 (file)
 #define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
                ((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
 
+#define IS_MASKED_BITS_ENABLED(_val, _b) \
+               (((_val) & _MASKED_BIT_ENABLE(_b)) == _MASKED_BIT_ENABLE(_b))
+#define IS_MASKED_BITS_DISABLED(_val, _b) \
+               ((_val) & _MASKED_BIT_DISABLE(_b))
+
 #define FORCEWAKE_RENDER_GEN9_REG 0xa278
 #define FORCEWAKE_ACK_RENDER_GEN9_REG 0x0D84
 #define FORCEWAKE_BLITTER_GEN9_REG 0xa188
index 9ca94a4..7842199 100644 (file)
 #include "gem/i915_gem_context.h"
 #include "gt/intel_gt_buffer_pool.h"
 #include "gt/intel_gt_clock_utils.h"
+#include "gt/intel_gt.h"
 #include "gt/intel_gt_pm.h"
 #include "gt/intel_gt_requests.h"
 #include "gt/intel_reset.h"
 #include "gt/intel_rc6.h"
 #include "gt/intel_rps.h"
+#include "gt/intel_sseu_debugfs.h"
 
 #include "i915_debugfs.h"
 #include "i915_debugfs_params.h"
@@ -61,6 +63,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
 
        intel_device_info_print_static(INTEL_INFO(i915), &p);
        intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
+       intel_gt_info_print(&i915->gt.info, &p);
        intel_driver_caps_print(&i915->caps, &p);
 
        kernel_param_lock(THIS_MODULE);
@@ -492,6 +495,10 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                seq_printf(m, "PCU interrupt enable:\t%08x\n",
                           I915_READ(GEN8_PCU_IER));
        } else if (INTEL_GEN(dev_priv) >= 11) {
+               if (HAS_MASTER_UNIT_IRQ(dev_priv))
+                       seq_printf(m, "Master Unit Interrupt Control:  %08x\n",
+                                  I915_READ(DG1_MSTR_UNIT_INTR));
+
                seq_printf(m, "Master Interrupt Control:  %08x\n",
                           I915_READ(GEN11_GFX_MSTR_IRQ));
 
@@ -1138,13 +1145,20 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
        struct intel_uncore *uncore = &dev_priv->uncore;
        intel_wakeref_t wakeref;
 
-       wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
        seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
                   swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
        seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
                   swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
 
+       if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+               seq_puts(m, "L-shaped memory detected\n");
+
+       /* On BDW+, swizzling is not used. See detect_bit_6_swizzle() */
+       if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv))
+               return 0;
+
+       wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
        if (IS_GEN_RANGE(dev_priv, 3, 4)) {
                seq_printf(m, "DDC = 0x%08x\n",
                           intel_uncore_read(uncore, DCC));
@@ -1173,9 +1187,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
                           intel_uncore_read(uncore, DISP_ARB_CTL));
        }
 
-       if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
-               seq_puts(m, "L-shaped memory detected\n");
-
        intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 
        return 0;
@@ -1316,16 +1327,6 @@ static int i915_engine_info(struct seq_file *m, void *unused)
        return 0;
 }
 
-static int i915_rcs_topology(struct seq_file *m, void *unused)
-{
-       struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       struct drm_printer p = drm_seq_file_printer(m);
-
-       intel_device_info_print_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
-
-       return 0;
-}
-
 static int i915_shrinker_info(struct seq_file *m, void *unused)
 {
        struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -1572,264 +1573,16 @@ i915_cache_sharing_set(void *data, u64 val)
        return 0;
 }
 
-static void
-intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice,
-                         u8 *to_mask)
-{
-       int offset = slice * sseu->ss_stride;
-
-       memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
-}
-
 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
                        i915_cache_sharing_get, i915_cache_sharing_set,
                        "%llu\n");
 
-static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
-                                         struct sseu_dev_info *sseu)
-{
-#define SS_MAX 2
-       const int ss_max = SS_MAX;
-       u32 sig1[SS_MAX], sig2[SS_MAX];
-       int ss;
-
-       sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
-       sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
-       sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
-       sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
-
-       for (ss = 0; ss < ss_max; ss++) {
-               unsigned int eu_cnt;
-
-               if (sig1[ss] & CHV_SS_PG_ENABLE)
-                       /* skip disabled subslice */
-                       continue;
-
-               sseu->slice_mask = BIT(0);
-               sseu->subslice_mask[0] |= BIT(ss);
-               eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
-                        ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
-                        ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
-                        ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
-               sseu->eu_total += eu_cnt;
-               sseu->eu_per_subslice = max_t(unsigned int,
-                                             sseu->eu_per_subslice, eu_cnt);
-       }
-#undef SS_MAX
-}
-
-static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
-                                    struct sseu_dev_info *sseu)
-{
-#define SS_MAX 6
-       const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
-       u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
-       int s, ss;
-
-       for (s = 0; s < info->sseu.max_slices; s++) {
-               /*
-                * FIXME: Valid SS Mask respects the spec and read
-                * only valid bits for those registers, excluding reserved
-                * although this seems wrong because it would leave many
-                * subslices without ACK.
-                */
-               s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
-                       GEN10_PGCTL_VALID_SS_MASK(s);
-               eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
-               eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
-       }
-
-       eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
-                    GEN9_PGCTL_SSA_EU19_ACK |
-                    GEN9_PGCTL_SSA_EU210_ACK |
-                    GEN9_PGCTL_SSA_EU311_ACK;
-       eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
-                    GEN9_PGCTL_SSB_EU19_ACK |
-                    GEN9_PGCTL_SSB_EU210_ACK |
-                    GEN9_PGCTL_SSB_EU311_ACK;
-
-       for (s = 0; s < info->sseu.max_slices; s++) {
-               if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
-                       /* skip disabled slice */
-                       continue;
-
-               sseu->slice_mask |= BIT(s);
-               intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
-
-               for (ss = 0; ss < info->sseu.max_subslices; ss++) {
-                       unsigned int eu_cnt;
-
-                       if (info->sseu.has_subslice_pg &&
-                           !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
-                               /* skip disabled subslice */
-                               continue;
-
-                       eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
-                                              eu_mask[ss % 2]);
-                       sseu->eu_total += eu_cnt;
-                       sseu->eu_per_subslice = max_t(unsigned int,
-                                                     sseu->eu_per_subslice,
-                                                     eu_cnt);
-               }
-       }
-#undef SS_MAX
-}
-
-static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
-                                   struct sseu_dev_info *sseu)
-{
-#define SS_MAX 3
-       const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
-       u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
-       int s, ss;
-
-       for (s = 0; s < info->sseu.max_slices; s++) {
-               s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
-               eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
-               eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
-       }
-
-       eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
-                    GEN9_PGCTL_SSA_EU19_ACK |
-                    GEN9_PGCTL_SSA_EU210_ACK |
-                    GEN9_PGCTL_SSA_EU311_ACK;
-       eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
-                    GEN9_PGCTL_SSB_EU19_ACK |
-                    GEN9_PGCTL_SSB_EU210_ACK |
-                    GEN9_PGCTL_SSB_EU311_ACK;
-
-       for (s = 0; s < info->sseu.max_slices; s++) {
-               if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
-                       /* skip disabled slice */
-                       continue;
-
-               sseu->slice_mask |= BIT(s);
-
-               if (IS_GEN9_BC(dev_priv))
-                       intel_sseu_copy_subslices(&info->sseu, s,
-                                                 sseu->subslice_mask);
-
-               for (ss = 0; ss < info->sseu.max_subslices; ss++) {
-                       unsigned int eu_cnt;
-                       u8 ss_idx = s * info->sseu.ss_stride +
-                                   ss / BITS_PER_BYTE;
-
-                       if (IS_GEN9_LP(dev_priv)) {
-                               if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
-                                       /* skip disabled subslice */
-                                       continue;
-
-                               sseu->subslice_mask[ss_idx] |=
-                                       BIT(ss % BITS_PER_BYTE);
-                       }
-
-                       eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
-                                              eu_mask[ss%2]);
-                       sseu->eu_total += eu_cnt;
-                       sseu->eu_per_subslice = max_t(unsigned int,
-                                                     sseu->eu_per_subslice,
-                                                     eu_cnt);
-               }
-       }
-#undef SS_MAX
-}
-
-static void bdw_sseu_device_status(struct drm_i915_private *dev_priv,
-                                  struct sseu_dev_info *sseu)
-{
-       const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
-       u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
-       int s;
-
-       sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
-
-       if (sseu->slice_mask) {
-               sseu->eu_per_subslice = info->sseu.eu_per_subslice;
-               for (s = 0; s < fls(sseu->slice_mask); s++)
-                       intel_sseu_copy_subslices(&info->sseu, s,
-                                                 sseu->subslice_mask);
-               sseu->eu_total = sseu->eu_per_subslice *
-                                intel_sseu_subslice_total(sseu);
-
-               /* subtract fused off EU(s) from enabled slice(s) */
-               for (s = 0; s < fls(sseu->slice_mask); s++) {
-                       u8 subslice_7eu = info->sseu.subslice_7eu[s];
-
-                       sseu->eu_total -= hweight8(subslice_7eu);
-               }
-       }
-}
-
-static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
-                                const struct sseu_dev_info *sseu)
-{
-       struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       const char *type = is_available_info ? "Available" : "Enabled";
-       int s;
-
-       seq_printf(m, "  %s Slice Mask: %04x\n", type,
-                  sseu->slice_mask);
-       seq_printf(m, "  %s Slice Total: %u\n", type,
-                  hweight8(sseu->slice_mask));
-       seq_printf(m, "  %s Subslice Total: %u\n", type,
-                  intel_sseu_subslice_total(sseu));
-       for (s = 0; s < fls(sseu->slice_mask); s++) {
-               seq_printf(m, "  %s Slice%i subslices: %u\n", type,
-                          s, intel_sseu_subslices_per_slice(sseu, s));
-       }
-       seq_printf(m, "  %s EU Total: %u\n", type,
-                  sseu->eu_total);
-       seq_printf(m, "  %s EU Per Subslice: %u\n", type,
-                  sseu->eu_per_subslice);
-
-       if (!is_available_info)
-               return;
-
-       seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
-       if (HAS_POOLED_EU(dev_priv))
-               seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
-
-       seq_printf(m, "  Has Slice Power Gating: %s\n",
-                  yesno(sseu->has_slice_pg));
-       seq_printf(m, "  Has Subslice Power Gating: %s\n",
-                  yesno(sseu->has_subslice_pg));
-       seq_printf(m, "  Has EU Power Gating: %s\n",
-                  yesno(sseu->has_eu_pg));
-}
-
 static int i915_sseu_status(struct seq_file *m, void *unused)
 {
-       struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
-       struct sseu_dev_info sseu;
-       intel_wakeref_t wakeref;
-
-       if (INTEL_GEN(dev_priv) < 8)
-               return -ENODEV;
-
-       seq_puts(m, "SSEU Device Info\n");
-       i915_print_sseu_info(m, true, &info->sseu);
-
-       seq_puts(m, "SSEU Device Status\n");
-       memset(&sseu, 0, sizeof(sseu));
-       intel_sseu_set_info(&sseu, info->sseu.max_slices,
-                           info->sseu.max_subslices,
-                           info->sseu.max_eus_per_subslice);
-
-       with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
-               if (IS_CHERRYVIEW(dev_priv))
-                       cherryview_sseu_device_status(dev_priv, &sseu);
-               else if (IS_BROADWELL(dev_priv))
-                       bdw_sseu_device_status(dev_priv, &sseu);
-               else if (IS_GEN(dev_priv, 9))
-                       gen9_sseu_device_status(dev_priv, &sseu);
-               else if (INTEL_GEN(dev_priv) >= 10)
-                       gen10_sseu_device_status(dev_priv, &sseu);
-       }
-
-       i915_print_sseu_info(m, false, &sseu);
+       struct drm_i915_private *i915 = node_to_i915(m->private);
+       struct intel_gt *gt = &i915->gt;
 
-       return 0;
+       return intel_sseu_status(m, gt);
 }
 
 static int i915_forcewake_open(struct inode *inode, struct file *file)
@@ -1876,7 +1629,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
        {"i915_llc", i915_llc, 0},
        {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
        {"i915_engine_info", i915_engine_info, 0},
-       {"i915_rcs_topology", i915_rcs_topology, 0},
        {"i915_shrinker_info", i915_shrinker_info, 0},
        {"i915_wa_registers", i915_wa_registers, 0},
        {"i915_sseu_status", i915_sseu_status, 0},
index 67102dc..5fd5af4 100644 (file)
@@ -531,13 +531,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
        /* Try to make sure MCHBAR is enabled before poking at it */
        intel_setup_mchbar(dev_priv);
 
-       intel_device_info_init_mmio(dev_priv);
-
-       intel_uncore_prune_mmio_domains(&dev_priv->uncore);
-
-       intel_uc_init_mmio(&dev_priv->gt.uc);
-
-       ret = intel_engines_init_mmio(&dev_priv->gt);
+       ret = intel_gt_init_mmio(&dev_priv->gt);
        if (ret)
                goto err_uncore;
 
@@ -890,6 +884,7 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
 
                intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
                intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
+               intel_gt_info_print(&dev_priv->gt.info, &p);
        }
 
        if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
index 2c2e88d..e4f7f65 100644 (file)
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20200702"
-#define DRIVER_TIMESTAMP       1593714328
+#define DRIVER_DATE            "20200715"
+#define DRIVER_TIMESTAMP       1594811881
 
 struct drm_i915_gem_object;
 
@@ -442,6 +442,7 @@ struct intel_fbc {
                struct {
                        const struct drm_format_info *format;
                        unsigned int stride;
+                       u64 modifier;
                } fb;
 
                int cfb_size;
@@ -692,6 +693,7 @@ struct intel_vbt_data {
                bool initialized;
                int bpp;
                struct edp_power_seq pps;
+               bool hobl;
        } edp;
 
        struct {
@@ -1256,7 +1258,7 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
 
 /* Iterator over subset of engines selected by mask */
 #define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
-       for ((tmp__) = (mask__) & INTEL_INFO((gt__)->i915)->engine_mask; \
+       for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
             (tmp__) ? \
             ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
             0;)
@@ -1430,6 +1432,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 #define IS_ELKHARTLAKE(dev_priv)       IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
 #define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
 #define IS_ROCKETLAKE(dev_priv)        IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)
+#define IS_DG1(dev_priv)        IS_PLATFORM(dev_priv, INTEL_DG1)
 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
                                    (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
 #define IS_BDW_ULT(dev_priv) \
@@ -1558,22 +1561,29 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 #define IS_RKL_REVID(p, since, until) \
        (IS_ROCKETLAKE(p) && IS_REVID(p, since, until))
 
+#define DG1_REVID_A0           0x0
+#define DG1_REVID_B0           0x1
+
+#define IS_DG1_REVID(p, since, until) \
+       (IS_DG1(p) && IS_REVID(p, since, until))
+
 #define IS_LP(dev_priv)        (INTEL_INFO(dev_priv)->is_lp)
 #define IS_GEN9_LP(dev_priv)   (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
 #define IS_GEN9_BC(dev_priv)   (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
 
-#define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id))
+#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
+#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
 
-#define ENGINE_INSTANCES_MASK(dev_priv, first, count) ({               \
+#define ENGINE_INSTANCES_MASK(gt, first, count) ({             \
        unsigned int first__ = (first);                                 \
        unsigned int count__ = (count);                                 \
-       (INTEL_INFO(dev_priv)->engine_mask &                            \
+       ((gt)->info.engine_mask &                                               \
         GENMASK(first__ + count__ - 1, first__)) >> first__;           \
 })
-#define VDBOX_MASK(dev_priv) \
-       ENGINE_INSTANCES_MASK(dev_priv, VCS0, I915_MAX_VCS)
-#define VEBOX_MASK(dev_priv) \
-       ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS)
+#define VDBOX_MASK(gt) \
+       ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
+#define VEBOX_MASK(gt) \
+       ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS)
 
 /*
  * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
@@ -1597,6 +1607,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 #define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
                (INTEL_INFO(dev_priv)->has_logical_ring_preemption)
 
+#define HAS_MASTER_UNIT_IRQ(dev_priv) (INTEL_INFO(dev_priv)->has_master_unit_irq)
+
 #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
 
 #define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
index 1753c84..f333e88 100644 (file)
@@ -72,7 +72,7 @@ struct drm_i915_private;
        trace_printk(__VA_ARGS__);                                      \
 } while (0)
 #define GEM_TRACE_DUMP() \
-       do { ftrace_dump(DUMP_ALL); add_taint_for_CI(TAINT_WARN); } while (0)
+       do { ftrace_dump(DUMP_ALL); __add_taint_for_CI(TAINT_WARN); } while (0)
 #define GEM_TRACE_DUMP_ON(expr) \
        do { if (expr) GEM_TRACE_DUMP(); } while (0)
 #else
index cb43381..c5ee156 100644 (file)
@@ -31,6 +31,8 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
                if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
                                     pages->sgl, pages->nents,
                                     PCI_DMA_BIDIRECTIONAL,
+                                    DMA_ATTR_SKIP_CPU_SYNC |
+                                    DMA_ATTR_NO_KERNEL_MAPPING |
                                     DMA_ATTR_NO_WARN))
                        return 0;
 
index 40390b2..4216132 100644 (file)
@@ -12,7 +12,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv)
 {
        struct drm_i915_private *i915 = to_i915(dev);
-       const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
+       const struct sseu_dev_info *sseu = &i915->gt.info.sseu;
        drm_i915_getparam_t *param = data;
        int value;
 
index 866166a..6a3a2ce 100644 (file)
@@ -42,6 +42,7 @@
 
 #include "gem/i915_gem_context.h"
 #include "gem/i915_gem_lmem.h"
+#include "gt/intel_gt.h"
 #include "gt/intel_gt_pm.h"
 
 #include "i915_drv.h"
@@ -425,7 +426,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
 static void error_print_instdone(struct drm_i915_error_state_buf *m,
                                 const struct intel_engine_coredump *ee)
 {
-       const struct sseu_dev_info *sseu = &RUNTIME_INFO(m->i915)->sseu;
+       const struct sseu_dev_info *sseu = &ee->engine->gt->info.sseu;
        int slice;
        int subslice;
 
@@ -619,16 +620,13 @@ static void print_error_vma(struct drm_i915_error_state_buf *m,
 }
 
 static void err_print_capabilities(struct drm_i915_error_state_buf *m,
-                                  const struct intel_device_info *info,
-                                  const struct intel_runtime_info *runtime,
-                                  const struct intel_driver_caps *caps)
+                                  struct i915_gpu_coredump *error)
 {
        struct drm_printer p = i915_error_printer(m);
 
-       intel_device_info_print_static(info, &p);
-       intel_device_info_print_runtime(runtime, &p);
-       intel_device_info_print_topology(&runtime->sseu, &p);
-       intel_driver_caps_print(caps, &p);
+       intel_device_info_print_static(&error->device_info, &p);
+       intel_device_info_print_runtime(&error->runtime_info, &p);
+       intel_driver_caps_print(&error->driver_caps, &p);
 }
 
 static void err_print_params(struct drm_i915_error_state_buf *m,
@@ -678,6 +676,15 @@ static void err_free_sgl(struct scatterlist *sgl)
        }
 }
 
+static void err_print_gt_info(struct drm_i915_error_state_buf *m,
+                             struct intel_gt_coredump *gt)
+{
+       struct drm_printer p = i915_error_printer(m);
+
+       intel_gt_info_print(&gt->info, &p);
+       intel_sseu_print_topology(&gt->info.sseu, &p);
+}
+
 static void err_print_gt(struct drm_i915_error_state_buf *m,
                         struct intel_gt_coredump *gt)
 {
@@ -734,6 +741,8 @@ static void err_print_gt(struct drm_i915_error_state_buf *m,
 
        if (gt->uc)
                err_print_uc(m, gt->uc);
+
+       err_print_gt_info(m, gt);
 }
 
 static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
@@ -798,8 +807,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
        if (error->display)
                intel_display_print_error_state(m, error->display);
 
-       err_print_capabilities(m, &error->device_info, &error->runtime_info,
-                              &error->driver_caps);
+       err_print_capabilities(m, error);
        err_print_params(m, &error->params);
 }
 
@@ -1630,6 +1638,11 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
        gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
 }
 
+static void gt_record_info(struct intel_gt_coredump *gt)
+{
+       memcpy(&gt->info, &gt->_gt->info, sizeof(struct intel_gt_info));
+}
+
 /*
  * Generate a semi-unique error code. The code is not meant to have meaning, The
  * code's only purpose is to try to prevent false duplicated bug reports by
@@ -1808,6 +1821,7 @@ struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_private *i915)
                        return ERR_PTR(-ENOMEM);
                }
 
+               gt_record_info(error->gt);
                gt_record_engines(error->gt, compress);
 
                if (INTEL_INFO(i915)->has_gt_uc)
index 76b80fb..0220b09 100644 (file)
@@ -15,6 +15,7 @@
 #include <drm/drm_mm.h>
 
 #include "gt/intel_engine.h"
+#include "gt/intel_gt_types.h"
 #include "gt/uc/intel_uc_fw.h"
 
 #include "intel_device_info.h"
@@ -118,6 +119,8 @@ struct intel_gt_coredump {
        bool awake;
        bool simulated;
 
+       struct intel_gt_info info;
+
        /* Generic register state */
        u32 eir;
        u32 pgtbl_er;
index 562b43e..1fa6770 100644 (file)
@@ -2584,6 +2584,46 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
                                   gen11_master_intr_enable);
 }
 
+static u32 dg1_master_intr_disable_and_ack(void __iomem * const regs)
+{
+       u32 val;
+
+       /* First disable interrupts */
+       raw_reg_write(regs, DG1_MSTR_UNIT_INTR, 0);
+
+       /* Get the indication levels and ack the master unit */
+       val = raw_reg_read(regs, DG1_MSTR_UNIT_INTR);
+       if (unlikely(!val))
+               return 0;
+
+       raw_reg_write(regs, DG1_MSTR_UNIT_INTR, val);
+
+       /*
+        * Now with master disabled, get a sample of level indications
+        * for this interrupt and ack them right away - we keep GEN11_MASTER_IRQ
+        * out as this bit doesn't exist anymore for DG1
+        */
+       val = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ) & ~GEN11_MASTER_IRQ;
+       if (unlikely(!val))
+               return 0;
+
+       raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, val);
+
+       return val;
+}
+
+static inline void dg1_master_intr_enable(void __iomem * const regs)
+{
+       raw_reg_write(regs, DG1_MSTR_UNIT_INTR, DG1_MSTR_IRQ);
+}
+
+static irqreturn_t dg1_irq_handler(int irq, void *arg)
+{
+       return __gen11_irq_handler(arg,
+                                  dg1_master_intr_disable_and_ack,
+                                  dg1_master_intr_enable);
+}
+
 /* Called from drm generic code, passed 'crtc' which
  * we use as a pipe index
  */
@@ -2920,7 +2960,10 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv)
 {
        struct intel_uncore *uncore = &dev_priv->uncore;
 
-       gen11_master_intr_disable(dev_priv->uncore.regs);
+       if (HAS_MASTER_UNIT_IRQ(dev_priv))
+               dg1_master_intr_disable_and_ack(dev_priv->uncore.regs);
+       else
+               gen11_master_intr_disable(dev_priv->uncore.regs);
 
        gen11_gt_irq_reset(&dev_priv->gt);
        gen11_display_irq_reset(dev_priv);
@@ -3071,7 +3114,8 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
        hotplug_irqs = sde_ddi_mask | sde_tc_mask;
        enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
 
-       I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+       if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
+               I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
 
        ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
 
@@ -3517,8 +3561,13 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
 
        I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
 
-       gen11_master_intr_enable(uncore->regs);
-       POSTING_READ(GEN11_GFX_MSTR_IRQ);
+       if (HAS_MASTER_UNIT_IRQ(dev_priv)) {
+               dg1_master_intr_enable(uncore->regs);
+               POSTING_READ(DG1_MSTR_UNIT_INTR);
+       } else {
+               gen11_master_intr_enable(uncore->regs);
+               POSTING_READ(GEN11_GFX_MSTR_IRQ);
+       }
 }
 
 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -4043,6 +4092,8 @@ static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
                else
                        return i8xx_irq_handler;
        } else {
+               if (HAS_MASTER_UNIT_IRQ(dev_priv))
+                       return dg1_irq_handler;
                if (INTEL_GEN(dev_priv) >= 11)
                        return gen11_irq_handler;
                else if (INTEL_GEN(dev_priv) >= 8)
index e5fdf17..2338f92 100644 (file)
        .gpu_reset_clobbers_display = true, \
        .hws_needs_physical = 1, \
        .unfenced_needs_alignment = 1, \
-       .engine_mask = BIT(RCS0), \
+       .platform_engine_mask = BIT(RCS0), \
        .has_snoop = true, \
        .has_coherent_ggtt = false, \
        .dma_mask_size = 32, \
        .gpu_reset_clobbers_display = true, \
        .hws_needs_physical = 1, \
        .unfenced_needs_alignment = 1, \
-       .engine_mask = BIT(RCS0), \
+       .platform_engine_mask = BIT(RCS0), \
        .has_snoop = true, \
        .has_coherent_ggtt = false, \
        .dma_mask_size = 32, \
@@ -217,6 +217,7 @@ static const struct intel_device_info i85x_info = {
 static const struct intel_device_info i865g_info = {
        I845_FEATURES,
        PLATFORM(INTEL_I865G),
+       .display.has_fbc = 1,
 };
 
 #define GEN3_FEATURES \
@@ -225,7 +226,7 @@ static const struct intel_device_info i865g_info = {
        .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
        .display.has_gmch = 1, \
        .gpu_reset_clobbers_display = true, \
-       .engine_mask = BIT(RCS0), \
+       .platform_engine_mask = BIT(RCS0), \
        .has_snoop = true, \
        .has_coherent_ggtt = true, \
        .dma_mask_size = 32, \
@@ -316,7 +317,7 @@ static const struct intel_device_info pnv_m_info = {
        .display.has_hotplug = 1, \
        .display.has_gmch = 1, \
        .gpu_reset_clobbers_display = true, \
-       .engine_mask = BIT(RCS0), \
+       .platform_engine_mask = BIT(RCS0), \
        .has_snoop = true, \
        .has_coherent_ggtt = true, \
        .dma_mask_size = 36, \
@@ -348,7 +349,7 @@ static const struct intel_device_info i965gm_info = {
 static const struct intel_device_info g45_info = {
        GEN4_FEATURES,
        PLATFORM(INTEL_G45),
-       .engine_mask = BIT(RCS0) | BIT(VCS0),
+       .platform_engine_mask = BIT(RCS0) | BIT(VCS0),
        .gpu_reset_clobbers_display = false,
 };
 
@@ -358,7 +359,7 @@ static const struct intel_device_info gm45_info = {
        .is_mobile = 1,
        .display.has_fbc = 1,
        .display.supports_tv = 1,
-       .engine_mask = BIT(RCS0) | BIT(VCS0),
+       .platform_engine_mask = BIT(RCS0) | BIT(VCS0),
        .gpu_reset_clobbers_display = false,
 };
 
@@ -367,7 +368,7 @@ static const struct intel_device_info gm45_info = {
        .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
        .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
        .display.has_hotplug = 1, \
-       .engine_mask = BIT(RCS0) | BIT(VCS0), \
+       .platform_engine_mask = BIT(RCS0) | BIT(VCS0), \
        .has_snoop = true, \
        .has_coherent_ggtt = true, \
        /* ilk does support rc6, but we do not implement [power] contexts */ \
@@ -397,7 +398,7 @@ static const struct intel_device_info ilk_m_info = {
        .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
        .display.has_hotplug = 1, \
        .display.has_fbc = 1, \
-       .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
+       .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
        .has_coherent_ggtt = true, \
        .has_llc = 1, \
        .has_rc6 = 1, \
@@ -448,7 +449,7 @@ static const struct intel_device_info snb_m_gt2_info = {
        .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \
        .display.has_hotplug = 1, \
        .display.has_fbc = 1, \
-       .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
+       .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
        .has_coherent_ggtt = true, \
        .has_llc = 1, \
        .has_rc6 = 1, \
@@ -519,7 +520,7 @@ static const struct intel_device_info vlv_info = {
        .ppgtt_size = 31,
        .has_snoop = true,
        .has_coherent_ggtt = false,
-       .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
+       .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
        .display_mmio_offset = VLV_DISPLAY_BASE,
        I9XX_PIPE_OFFSETS,
        I9XX_CURSOR_OFFSETS,
@@ -530,7 +531,7 @@ static const struct intel_device_info vlv_info = {
 
 #define G75_FEATURES  \
        GEN7_FEATURES, \
-       .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
+       .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
        .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
                BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \
        .display.has_ddi = 1, \
@@ -597,7 +598,7 @@ static const struct intel_device_info bdw_rsvd_info = {
 static const struct intel_device_info bdw_gt3_info = {
        BDW_PLATFORM,
        .gt = 3,
-       .engine_mask =
+       .platform_engine_mask =
                BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
 };
 
@@ -608,7 +609,7 @@ static const struct intel_device_info chv_info = {
        .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
        .display.has_hotplug = 1,
        .is_lp = 1,
-       .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
+       .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
        .has_64bit_reloc = 1,
        .has_runtime_pm = 1,
        .has_rc6 = 1,
@@ -661,7 +662,7 @@ static const struct intel_device_info skl_gt2_info = {
 
 #define SKL_GT3_PLUS_PLATFORM \
        SKL_PLATFORM, \
-       .engine_mask = \
+       .platform_engine_mask = \
                BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1)
 
 
@@ -680,7 +681,7 @@ static const struct intel_device_info skl_gt4_info = {
        .is_lp = 1, \
        .num_supported_dbuf_slices = 1, \
        .display.has_hotplug = 1, \
-       .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
+       .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
        .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
        .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
                BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
@@ -743,7 +744,7 @@ static const struct intel_device_info kbl_gt2_info = {
 static const struct intel_device_info kbl_gt3_info = {
        KBL_PLATFORM,
        .gt = 3,
-       .engine_mask =
+       .platform_engine_mask =
                BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
 };
 
@@ -764,7 +765,7 @@ static const struct intel_device_info cfl_gt2_info = {
 static const struct intel_device_info cfl_gt3_info = {
        CFL_PLATFORM,
        .gt = 3,
-       .engine_mask =
+       .platform_engine_mask =
                BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
 };
 
@@ -833,7 +834,7 @@ static const struct intel_device_info cnl_info = {
 static const struct intel_device_info icl_info = {
        GEN11_FEATURES,
        PLATFORM(INTEL_ICELAKE),
-       .engine_mask =
+       .platform_engine_mask =
                BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
 };
 
@@ -841,7 +842,7 @@ static const struct intel_device_info ehl_info = {
        GEN11_FEATURES,
        PLATFORM(INTEL_ELKHARTLAKE),
        .require_force_probe = 1,
-       .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
+       .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
        .ppgtt_size = 36,
 };
 
@@ -877,7 +878,7 @@ static const struct intel_device_info tgl_info = {
        GEN12_FEATURES,
        PLATFORM(INTEL_TIGERLAKE),
        .display.has_modular_fia = 1,
-       .engine_mask =
+       .platform_engine_mask =
                BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
 };
 
@@ -890,14 +891,26 @@ static const struct intel_device_info rkl_info = {
                BIT(TRANSCODER_C),
        .require_force_probe = 1,
        .display.has_psr_hw_tracking = 0,
-       .engine_mask =
+       .platform_engine_mask =
                BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0),
 };
 
 #define GEN12_DGFX_FEATURES \
        GEN12_FEATURES, \
+       .memory_regions = REGION_SMEM | REGION_LMEM, \
+       .has_master_unit_irq = 1, \
        .is_dgfx = 1
 
+static const struct intel_device_info dg1_info __maybe_unused = {
+       GEN12_DGFX_FEATURES,
+       PLATFORM(INTEL_DG1),
+       .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+       .require_force_probe = 1,
+       .platform_engine_mask =
+               BIT(RCS0) | BIT(BCS0) | BIT(VECS0) |
+               BIT(VCS0) | BIT(VCS2),
+};
+
 #undef GEN
 #undef PLATFORM
 
index 25329b7..c6f6370 100644 (file)
@@ -1592,6 +1592,7 @@ static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
        u32 d;
 
        cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
+       cmd |= MI_SRM_LRM_GLOBAL_GTT;
        if (INTEL_GEN(stream->perf->i915) >= 8)
                cmd++;
 
@@ -1772,7 +1773,7 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
        GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
 
        i915_gem_object_flush_map(bo);
-       i915_gem_object_unpin_map(bo);
+       __i915_gem_object_release_map(bo);
 
        stream->noa_wait = vma;
        return 0;
@@ -1867,7 +1868,7 @@ alloc_oa_config_buffer(struct i915_perf_stream *stream,
        *cs++ = 0;
 
        i915_gem_object_flush_map(obj);
-       i915_gem_object_unpin_map(obj);
+       __i915_gem_object_release_map(obj);
 
        oa_bo->vma = i915_vma_instance(obj,
                                       &stream->engine->gt->ggtt->vm,
@@ -2196,7 +2197,7 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
                if (!intel_context_pin_if_active(ce))
                        continue;
 
-               flex->value = intel_sseu_make_rpcs(ctx->i915, &ce->sseu);
+               flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu);
                err = gen8_modify_context(ce, flex, count);
 
                intel_context_unpin(ce);
@@ -2340,7 +2341,7 @@ oa_configure_all_contexts(struct i915_perf_stream *stream,
                if (engine->class != RENDER_CLASS)
                        continue;
 
-               regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu);
+               regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu);
 
                err = gen8_modify_self(ce, regs, num_regs, active);
                if (err)
@@ -2740,8 +2741,7 @@ static void
 get_default_sseu_config(struct intel_sseu *out_sseu,
                        struct intel_engine_cs *engine)
 {
-       const struct sseu_dev_info *devinfo_sseu =
-               &RUNTIME_INFO(engine->i915)->sseu;
+       const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu;
 
        *out_sseu = intel_sseu_from_device_info(devinfo_sseu);
 
@@ -2766,7 +2766,7 @@ get_sseu_config(struct intel_sseu *out_sseu,
            drm_sseu->engine.engine_instance != engine->uabi_instance)
                return -EINVAL;
 
-       return i915_gem_user_to_context_sseu(engine->i915, drm_sseu, out_sseu);
+       return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu);
 }
 
 /**
index c1ebda9..fed337a 100644 (file)
@@ -31,7 +31,7 @@ static int copy_query_item(void *query_hdr, size_t query_sz,
 static int query_topology_info(struct drm_i915_private *dev_priv,
                               struct drm_i915_query_item *query_item)
 {
-       const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
+       const struct sseu_dev_info *sseu = &dev_priv->gt.info.sseu;
        struct drm_i915_query_topology_info topo;
        u32 slice_length, subslice_length, eu_length, total_length;
        int ret;
index 9d6536a..4e796ff 100644 (file)
@@ -868,7 +868,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 
 #define OAREPORTTRIG1 _MMIO(0x2740)
 #define OAREPORTTRIG1_THRESHOLD_MASK 0xffff
-#define OAREPORTTRIG1_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
+#define OAREPORTTRIG1_EDGE_LEVEL_TRIGGER_SELECT_MASK 0xffff0000 /* 0=level */
 
 #define OAREPORTTRIG2 _MMIO(0x2744)
 #define OAREPORTTRIG2_INVERT_A_0  (1 << 0)
@@ -921,7 +921,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 
 #define OAREPORTTRIG5 _MMIO(0x2750)
 #define OAREPORTTRIG5_THRESHOLD_MASK 0xffff
-#define OAREPORTTRIG5_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
+#define OAREPORTTRIG5_EDGE_LEVEL_TRIGGER_SELECT_MASK 0xffff0000 /* 0=level */
 
 #define OAREPORTTRIG6 _MMIO(0x2754)
 #define OAREPORTTRIG6_INVERT_A_0  (1 << 0)
@@ -1974,6 +1974,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define ICL_PORT_PCS_DW1_AUX(phy)      _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy))
 #define ICL_PORT_PCS_DW1_GRP(phy)      _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy))
 #define ICL_PORT_PCS_DW1_LN0(phy)      _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, phy))
+#define   DCC_MODE_SELECT_MASK         (0x3 << 20)
+#define   DCC_MODE_SELECT_CONTINUOSLY  (0x3 << 20)
 #define   COMMON_KEEPER_EN             (1 << 26)
 #define   LATENCY_OPTIM_MASK           (0x3 << 2)
 #define   LATENCY_OPTIM_VAL(x)         ((x) << 2)
@@ -2072,6 +2074,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   N_SCALAR(x)                  ((x) << 24)
 #define   N_SCALAR_MASK                        (0x7F << 24)
 
+#define ICL_PORT_TX_DW8_AUX(phy)               _MMIO(_ICL_PORT_TX_DW_AUX(8, phy))
+#define ICL_PORT_TX_DW8_GRP(phy)               _MMIO(_ICL_PORT_TX_DW_GRP(8, phy))
+#define ICL_PORT_TX_DW8_LN0(phy)               _MMIO(_ICL_PORT_TX_DW_LN(8, 0, phy))
+#define   ICL_PORT_TX_DW8_ODCC_CLK_SEL         REG_BIT(31)
+#define   ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK        REG_GENMASK(30, 29)
+#define   ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2        REG_FIELD_PREP(ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK, 0x1)
+
 #define _ICL_DPHY_CHKN_REG                     0x194
 #define ICL_DPHY_CHKN(port)                    _MMIO(_ICL_COMBOPHY(port) + _ICL_DPHY_CHKN_REG)
 #define   ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP     REG_BIT(7)
@@ -2827,6 +2836,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define VLV_GU_CTL0    _MMIO(VLV_DISPLAY_BASE + 0x2030)
 #define VLV_GU_CTL1    _MMIO(VLV_DISPLAY_BASE + 0x2034)
 #define SCPD0          _MMIO(0x209c) /* 915+ only */
+#define  SCPD_FBC_IGNORE_3D                    (1 << 6)
 #define  CSTATE_RENDER_CLOCK_GATE_DISABLE      (1 << 5)
 #define GEN2_IER       _MMIO(0x20a0)
 #define GEN2_IIR       _MMIO(0x20a4)
@@ -7723,6 +7733,10 @@ enum {
 #define  GEN11_GT_DW1_IRQ              (1 << 1)
 #define  GEN11_GT_DW0_IRQ              (1 << 0)
 
+#define DG1_MSTR_UNIT_INTR             _MMIO(0x190008)
+#define   DG1_MSTR_IRQ                 REG_BIT(31)
+#define   DG1_MSTR_UNIT(u)             REG_BIT(u)
+
 #define GEN11_DISPLAY_INT_CTL          _MMIO(0x44200)
 #define  GEN11_DISPLAY_IRQ_ENABLE      (1 << 31)
 #define  GEN11_AUDIO_CODEC_IRQ         (1 << 24)
index 3bb7320..0b2fe55 100644 (file)
@@ -560,22 +560,25 @@ bool __i915_request_submit(struct i915_request *request)
        engine->serial++;
        result = true;
 
-xfer:  /* We may be recursing from the signal callback of another i915 fence */
-       spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
-
+xfer:
        if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
                list_move_tail(&request->sched.link, &engine->active.requests);
                clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
-               __notify_execute_cb(request);
        }
-       GEM_BUG_ON(!llist_empty(&request->execute_cb));
 
-       if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
-           !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
-           !i915_request_enable_breadcrumb(request))
-               intel_engine_signal_breadcrumbs(engine);
+       /* We may be recursing from the signal callback of another i915 fence */
+       if (!i915_request_signaled(request)) {
+               spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+
+               __notify_execute_cb(request);
+               if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+                            &request->fence.flags) &&
+                   !i915_request_enable_breadcrumb(request))
+                       intel_engine_signal_breadcrumbs(engine);
 
-       spin_unlock(&request->lock);
+               spin_unlock(&request->lock);
+               GEM_BUG_ON(!llist_empty(&request->execute_cb));
+       }
 
        return result;
 }
index f42a9e9..4c305d8 100644 (file)
@@ -49,6 +49,16 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
        }
 }
 
+void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint)
+{
+       __i915_printk(i915, KERN_NOTICE, "CI tainted:%#x by %pS\n",
+                     taint, (void *)_RET_IP_);
+
+       /* Failures that occur during fault injection testing are expected */
+       if (!i915_error_injected())
+               __add_taint_for_CI(taint);
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
 static unsigned int i915_probe_fail_count;
 
index 03a73d2..5477337 100644 (file)
@@ -266,19 +266,6 @@ static inline int list_is_last_rcu(const struct list_head *list,
        return READ_ONCE(list->next) == head;
 }
 
-/*
- * Wait until the work is finally complete, even if it tries to postpone
- * by requeueing itself. Note, that if the worker never cancels itself,
- * we will spin forever.
- */
-static inline void drain_delayed_work(struct delayed_work *dw)
-{
-       do {
-               while (flush_delayed_work(dw))
-                       ;
-       } while (delayed_work_pending(dw));
-}
-
 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
 {
        unsigned long j = msecs_to_jiffies(m);
@@ -436,7 +423,8 @@ static inline const char *enableddisabled(bool v)
        return v ? "enabled" : "disabled";
 }
 
-static inline void add_taint_for_CI(unsigned int taint)
+void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint);
+static inline void __add_taint_for_CI(unsigned int taint)
 {
        /*
         * The system is "ok", just about surviving for the user, but
index 7fe1f31..bc64f77 100644 (file)
@@ -104,6 +104,7 @@ vma_create(struct drm_i915_gem_object *obj,
           struct i915_address_space *vm,
           const struct i915_ggtt_view *view)
 {
+       struct i915_vma *pos = ERR_PTR(-E2BIG);
        struct i915_vma *vma;
        struct rb_node *rb, **p;
 
@@ -184,7 +185,6 @@ vma_create(struct drm_i915_gem_object *obj,
        rb = NULL;
        p = &obj->vma.tree.rb_node;
        while (*p) {
-               struct i915_vma *pos;
                long cmp;
 
                rb = *p;
@@ -196,17 +196,12 @@ vma_create(struct drm_i915_gem_object *obj,
                 * and dispose of ours.
                 */
                cmp = i915_vma_compare(pos, vm, view);
-               if (cmp == 0) {
-                       spin_unlock(&obj->vma.lock);
-                       i915_vm_put(vm);
-                       i915_vma_free(vma);
-                       return pos;
-               }
-
                if (cmp < 0)
                        p = &rb->rb_right;
-               else
+               else if (cmp > 0)
                        p = &rb->rb_left;
+               else
+                       goto err_unlock;
        }
        rb_link_node(&vma->obj_node, rb, p);
        rb_insert_color(&vma->obj_node, &obj->vma.tree);
@@ -229,8 +224,9 @@ vma_create(struct drm_i915_gem_object *obj,
 err_unlock:
        spin_unlock(&obj->vma.lock);
 err_vma:
+       i915_vm_put(vm);
        i915_vma_free(vma);
-       return ERR_PTR(-E2BIG);
+       return pos;
 }
 
 static struct i915_vma *
@@ -308,7 +304,7 @@ static int __vma_bind(struct dma_fence_work *work)
        struct i915_vma *vma = vw->vma;
        int err;
 
-       err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags);
+       err = vma->ops->bind_vma(vma->vm, vma, vw->cache_level, vw->flags);
        if (err)
                atomic_or(I915_VMA_ERROR, &vma->flags);
 
@@ -411,7 +407,7 @@ int i915_vma_bind(struct i915_vma *vma,
 
                work->vma = vma;
                work->cache_level = cache_level;
-               work->flags = bind_flags | I915_VMA_ALLOC;
+               work->flags = bind_flags;
 
                /*
                 * Note we only want to chain up to the migration fence on
@@ -437,7 +433,7 @@ int i915_vma_bind(struct i915_vma *vma,
                        work->pinned = vma->obj;
                }
        } else {
-               ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
+               ret = vma->ops->bind_vma(vma->vm, vma, cache_level, bind_flags);
                if (ret)
                        return ret;
        }
@@ -1265,7 +1261,7 @@ void __i915_vma_evict(struct i915_vma *vma)
 
        if (likely(atomic_read(&vma->vm->open))) {
                trace_i915_vma_unbind(vma);
-               vma->ops->unbind_vma(vma);
+               vma->ops->unbind_vma(vma->vm, vma);
        }
        atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
                   &vma->flags);
index 63831cd..9e9082d 100644 (file)
@@ -235,7 +235,6 @@ struct i915_vma {
 #define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)
 
 #define I915_VMA_ALLOC_BIT     12
-#define I915_VMA_ALLOC         ((int)BIT(I915_VMA_ALLOC_BIT))
 
 #define I915_VMA_ERROR_BIT     13
 #define I915_VMA_ERROR         ((int)BIT(I915_VMA_ERROR_BIT))
index 544ac61..40c590d 100644 (file)
@@ -26,6 +26,7 @@
 #include <drm/i915_pciids.h>
 
 #include "display/intel_cdclk.h"
+#include "display/intel_de.h"
 #include "intel_device_info.h"
 #include "i915_drv.h"
 
@@ -63,6 +64,7 @@ static const char * const platform_names[] = {
        PLATFORM_NAME(ELKHARTLAKE),
        PLATFORM_NAME(TIGERLAKE),
        PLATFORM_NAME(ROCKETLAKE),
+       PLATFORM_NAME(DG1),
 };
 #undef PLATFORM_NAME
 
@@ -91,7 +93,6 @@ static const char *iommu_name(void)
 void intel_device_info_print_static(const struct intel_device_info *info,
                                    struct drm_printer *p)
 {
-       drm_printf(p, "engines: %x\n", info->engine_mask);
        drm_printf(p, "gen: %d\n", info->gen);
        drm_printf(p, "gt: %d\n", info->gt);
        drm_printf(p, "iommu: %s\n", iommu_name());
@@ -111,571 +112,18 @@ void intel_device_info_print_static(const struct intel_device_info *info,
 #undef PRINT_FLAG
 }
 
-static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
-{
-       int s;
-
-       drm_printf(p, "slice total: %u, mask=%04x\n",
-                  hweight8(sseu->slice_mask), sseu->slice_mask);
-       drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
-       for (s = 0; s < sseu->max_slices; s++) {
-               drm_printf(p, "slice%d: %u subslices, mask=%08x\n",
-                          s, intel_sseu_subslices_per_slice(sseu, s),
-                          intel_sseu_get_subslices(sseu, s));
-       }
-       drm_printf(p, "EU total: %u\n", sseu->eu_total);
-       drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
-       drm_printf(p, "has slice power gating: %s\n",
-                  yesno(sseu->has_slice_pg));
-       drm_printf(p, "has subslice power gating: %s\n",
-                  yesno(sseu->has_subslice_pg));
-       drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
-}
-
 void intel_device_info_print_runtime(const struct intel_runtime_info *info,
                                     struct drm_printer *p)
 {
-       sseu_dump(&info->sseu, p);
-
        drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq);
        drm_printf(p, "CS timestamp frequency: %u Hz\n",
                   info->cs_timestamp_frequency_hz);
 }
 
-static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
-                      int subslice)
-{
-       int slice_stride = sseu->max_subslices * sseu->eu_stride;
-
-       return slice * slice_stride + subslice * sseu->eu_stride;
-}
-
-static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
-                       int subslice)
-{
-       int i, offset = sseu_eu_idx(sseu, slice, subslice);
-       u16 eu_mask = 0;
-
-       for (i = 0; i < sseu->eu_stride; i++) {
-               eu_mask |= ((u16)sseu->eu_mask[offset + i]) <<
-                       (i * BITS_PER_BYTE);
-       }
-
-       return eu_mask;
-}
-
-static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
-                        u16 eu_mask)
-{
-       int i, offset = sseu_eu_idx(sseu, slice, subslice);
-
-       for (i = 0; i < sseu->eu_stride; i++) {
-               sseu->eu_mask[offset + i] =
-                       (eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
-       }
-}
-
-void intel_device_info_print_topology(const struct sseu_dev_info *sseu,
-                                     struct drm_printer *p)
-{
-       int s, ss;
-
-       if (sseu->max_slices == 0) {
-               drm_printf(p, "Unavailable\n");
-               return;
-       }
-
-       for (s = 0; s < sseu->max_slices; s++) {
-               drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n",
-                          s, intel_sseu_subslices_per_slice(sseu, s),
-                          intel_sseu_get_subslices(sseu, s));
-
-               for (ss = 0; ss < sseu->max_subslices; ss++) {
-                       u16 enabled_eus = sseu_get_eus(sseu, s, ss);
-
-                       drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n",
-                                  ss, hweight16(enabled_eus), enabled_eus);
-               }
-       }
-}
-
-static u16 compute_eu_total(const struct sseu_dev_info *sseu)
-{
-       u16 i, total = 0;
-
-       for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++)
-               total += hweight8(sseu->eu_mask[i]);
-
-       return total;
-}
-
-static void gen11_compute_sseu_info(struct sseu_dev_info *sseu,
-                                   u8 s_en, u32 ss_en, u16 eu_en)
-{
-       int s, ss;
-
-       /* ss_en represents entire subslice mask across all slices */
-       GEM_BUG_ON(sseu->max_slices * sseu->max_subslices >
-                  sizeof(ss_en) * BITS_PER_BYTE);
-
-       for (s = 0; s < sseu->max_slices; s++) {
-               if ((s_en & BIT(s)) == 0)
-                       continue;
-
-               sseu->slice_mask |= BIT(s);
-
-               intel_sseu_set_subslices(sseu, s, ss_en);
-
-               for (ss = 0; ss < sseu->max_subslices; ss++)
-                       if (intel_sseu_has_subslice(sseu, s, ss))
-                               sseu_set_eus(sseu, s, ss, eu_en);
-       }
-       sseu->eu_per_subslice = hweight16(eu_en);
-       sseu->eu_total = compute_eu_total(sseu);
-}
-
-static void gen12_sseu_info_init(struct drm_i915_private *dev_priv)
-{
-       struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
-       u8 s_en;
-       u32 dss_en;
-       u16 eu_en = 0;
-       u8 eu_en_fuse;
-       int eu;
-
-       /*
-        * Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS.
-        * Instead of splitting these, provide userspace with an array
-        * of DSS to more closely represent the hardware resource.
-        */
-       intel_sseu_set_info(sseu, 1, 6, 16);
-
-       s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
-
-       dss_en = I915_READ(GEN12_GT_DSS_ENABLE);
-
-       /* one bit per pair of EUs */
-       eu_en_fuse = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);
-       for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
-               if (eu_en_fuse & BIT(eu))
-                       eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);
-
-       gen11_compute_sseu_info(sseu, s_en, dss_en, eu_en);
-
-       /* TGL only supports slice-level power gating */
-       sseu->has_slice_pg = 1;
-}
-
-static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
-{
-       struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
-       u8 s_en;
-       u32 ss_en;
-       u8 eu_en;
-
-       if (IS_ELKHARTLAKE(dev_priv))
-               intel_sseu_set_info(sseu, 1, 4, 8);
-       else
-               intel_sseu_set_info(sseu, 1, 8, 8);
-
-       s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
-       ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);
-       eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);
-
-       gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en);
-
-       /* ICL has no power gating restrictions. */
-       sseu->has_slice_pg = 1;
-       sseu->has_subslice_pg = 1;
-       sseu->has_eu_pg = 1;
-}
-
-static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
-{
-       struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
-       const u32 fuse2 = I915_READ(GEN8_FUSE2);
-       int s, ss;
-       const int eu_mask = 0xff;
-       u32 subslice_mask, eu_en;
-
-       intel_sseu_set_info(sseu, 6, 4, 8);
-
-       sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
-                           GEN10_F2_S_ENA_SHIFT;
-
-       /* Slice0 */
-       eu_en = ~I915_READ(GEN8_EU_DISABLE0);
-       for (ss = 0; ss < sseu->max_subslices; ss++)
-               sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask);
-       /* Slice1 */
-       sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask);
-       eu_en = ~I915_READ(GEN8_EU_DISABLE1);
-       sseu_set_eus(sseu, 1, 1, eu_en & eu_mask);
-       /* Slice2 */
-       sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask);
-       sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask);
-       /* Slice3 */
-       sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask);
-       eu_en = ~I915_READ(GEN8_EU_DISABLE2);
-       sseu_set_eus(sseu, 3, 1, eu_en & eu_mask);
-       /* Slice4 */
-       sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask);
-       sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask);
-       /* Slice5 */
-       sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask);
-       eu_en = ~I915_READ(GEN10_EU_DISABLE3);
-       sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);
-
-       subslice_mask = (1 << 4) - 1;
-       subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
-                          GEN10_F2_SS_DIS_SHIFT);
-
-       for (s = 0; s < sseu->max_slices; s++) {
-               u32 subslice_mask_with_eus = subslice_mask;
-
-               for (ss = 0; ss < sseu->max_subslices; ss++) {
-                       if (sseu_get_eus(sseu, s, ss) == 0)
-                               subslice_mask_with_eus &= ~BIT(ss);
-               }
-
-               /*
-                * Slice0 can have up to 3 subslices, but there are only 2 in
-                * slice1/2.
-                */
-               intel_sseu_set_subslices(sseu, s, s == 0 ?
-                                                 subslice_mask_with_eus :
-                                                 subslice_mask_with_eus & 0x3);
-       }
-
-       sseu->eu_total = compute_eu_total(sseu);
-
-       /*
-        * CNL is expected to always have a uniform distribution
-        * of EU across subslices with the exception that any one
-        * EU in any one subslice may be fused off for die
-        * recovery.
-        */
-       sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
-                               DIV_ROUND_UP(sseu->eu_total,
-                                            intel_sseu_subslice_total(sseu)) :
-                               0;
-
-       /* No restrictions on Power Gating */
-       sseu->has_slice_pg = 1;
-       sseu->has_subslice_pg = 1;
-       sseu->has_eu_pg = 1;
-}
-
-static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
-{
-       struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
-       u32 fuse;
-       u8 subslice_mask = 0;
-
-       fuse = I915_READ(CHV_FUSE_GT);
-
-       sseu->slice_mask = BIT(0);
-       intel_sseu_set_info(sseu, 1, 2, 8);
-
-       if (!(fuse & CHV_FGT_DISABLE_SS0)) {
-               u8 disabled_mask =
-                       ((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >>
-                        CHV_FGT_EU_DIS_SS0_R0_SHIFT) |
-                       (((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
-                         CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);
-
-               subslice_mask |= BIT(0);
-               sseu_set_eus(sseu, 0, 0, ~disabled_mask);
-       }
-
-       if (!(fuse & CHV_FGT_DISABLE_SS1)) {
-               u8 disabled_mask =
-                       ((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >>
-                        CHV_FGT_EU_DIS_SS1_R0_SHIFT) |
-                       (((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
-                         CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);
-
-               subslice_mask |= BIT(1);
-               sseu_set_eus(sseu, 0, 1, ~disabled_mask);
-       }
-
-       intel_sseu_set_subslices(sseu, 0, subslice_mask);
-
-       sseu->eu_total = compute_eu_total(sseu);
-
-       /*
-        * CHV expected to always have a uniform distribution of EU
-        * across subslices.
-       */
-       sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
-                               sseu->eu_total /
-                                       intel_sseu_subslice_total(sseu) :
-                               0;
-       /*
-        * CHV supports subslice power gating on devices with more than
-        * one subslice, and supports EU power gating on devices with
-        * more than one EU pair per subslice.
-       */
-       sseu->has_slice_pg = 0;
-       sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1;
-       sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
-}
-
-static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
-{
-       struct intel_device_info *info = mkwrite_device_info(dev_priv);
-       struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
-       int s, ss;
-       u32 fuse2, eu_disable, subslice_mask;
-       const u8 eu_mask = 0xff;
-
-       fuse2 = I915_READ(GEN8_FUSE2);
-       sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
-
-       /* BXT has a single slice and at most 3 subslices. */
-       intel_sseu_set_info(sseu, IS_GEN9_LP(dev_priv) ? 1 : 3,
-                           IS_GEN9_LP(dev_priv) ? 3 : 4, 8);
-
-       /*
-        * The subslice disable field is global, i.e. it applies
-        * to each of the enabled slices.
-       */
-       subslice_mask = (1 << sseu->max_subslices) - 1;
-       subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
-                          GEN9_F2_SS_DIS_SHIFT);
-
-       /*
-        * Iterate through enabled slices and subslices to
-        * count the total enabled EU.
-       */
-       for (s = 0; s < sseu->max_slices; s++) {
-               if (!(sseu->slice_mask & BIT(s)))
-                       /* skip disabled slice */
-                       continue;
-
-               intel_sseu_set_subslices(sseu, s, subslice_mask);
-
-               eu_disable = I915_READ(GEN9_EU_DISABLE(s));
-               for (ss = 0; ss < sseu->max_subslices; ss++) {
-                       int eu_per_ss;
-                       u8 eu_disabled_mask;
-
-                       if (!intel_sseu_has_subslice(sseu, s, ss))
-                               /* skip disabled subslice */
-                               continue;
-
-                       eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask;
-
-                       sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
-
-                       eu_per_ss = sseu->max_eus_per_subslice -
-                               hweight8(eu_disabled_mask);
-
-                       /*
-                        * Record which subslice(s) has(have) 7 EUs. we
-                        * can tune the hash used to spread work among
-                        * subslices if they are unbalanced.
-                        */
-                       if (eu_per_ss == 7)
-                               sseu->subslice_7eu[s] |= BIT(ss);
-               }
-       }
-
-       sseu->eu_total = compute_eu_total(sseu);
-
-       /*
-        * SKL is expected to always have a uniform distribution
-        * of EU across subslices with the exception that any one
-        * EU in any one subslice may be fused off for die
-        * recovery. BXT is expected to be perfectly uniform in EU
-        * distribution.
-       */
-       sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
-                               DIV_ROUND_UP(sseu->eu_total,
-                                            intel_sseu_subslice_total(sseu)) :
-                               0;
-       /*
-        * SKL+ supports slice power gating on devices with more than
-        * one slice, and supports EU power gating on devices with
-        * more than one EU pair per subslice. BXT+ supports subslice
-        * power gating on devices with more than one subslice, and
-        * supports EU power gating on devices with more than one EU
-        * pair per subslice.
-       */
-       sseu->has_slice_pg =
-               !IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1;
-       sseu->has_subslice_pg =
-               IS_GEN9_LP(dev_priv) && intel_sseu_subslice_total(sseu) > 1;
-       sseu->has_eu_pg = sseu->eu_per_subslice > 2;
-
-       if (IS_GEN9_LP(dev_priv)) {
-#define IS_SS_DISABLED(ss)     (!(sseu->subslice_mask[0] & BIT(ss)))
-               info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3;
-
-               sseu->min_eu_in_pool = 0;
-               if (info->has_pooled_eu) {
-                       if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
-                               sseu->min_eu_in_pool = 3;
-                       else if (IS_SS_DISABLED(1))
-                               sseu->min_eu_in_pool = 6;
-                       else
-                               sseu->min_eu_in_pool = 9;
-               }
-#undef IS_SS_DISABLED
-       }
-}
-
-static void bdw_sseu_info_init(struct drm_i915_private *dev_priv)
-{
-       struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
-       int s, ss;
-       u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
-
-       fuse2 = I915_READ(GEN8_FUSE2);
-       sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
-       intel_sseu_set_info(sseu, 3, 3, 8);
-
-       /*
-        * The subslice disable field is global, i.e. it applies
-        * to each of the enabled slices.
-        */
-       subslice_mask = GENMASK(sseu->max_subslices - 1, 0);
-       subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
-                          GEN8_F2_SS_DIS_SHIFT);
-
-       eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
-       eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
-                       ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
-                        (32 - GEN8_EU_DIS0_S1_SHIFT));
-       eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
-                       ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
-                        (32 - GEN8_EU_DIS1_S2_SHIFT));
-
-       /*
-        * Iterate through enabled slices and subslices to
-        * count the total enabled EU.
-        */
-       for (s = 0; s < sseu->max_slices; s++) {
-               if (!(sseu->slice_mask & BIT(s)))
-                       /* skip disabled slice */
-                       continue;
-
-               intel_sseu_set_subslices(sseu, s, subslice_mask);
-
-               for (ss = 0; ss < sseu->max_subslices; ss++) {
-                       u8 eu_disabled_mask;
-                       u32 n_disabled;
-
-                       if (!intel_sseu_has_subslice(sseu, s, ss))
-                               /* skip disabled subslice */
-                               continue;
-
-                       eu_disabled_mask =
-                               eu_disable[s] >> (ss * sseu->max_eus_per_subslice);
-
-                       sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
-
-                       n_disabled = hweight8(eu_disabled_mask);
-
-                       /*
-                        * Record which subslices have 7 EUs.
-                        */
-                       if (sseu->max_eus_per_subslice - n_disabled == 7)
-                               sseu->subslice_7eu[s] |= 1 << ss;
-               }
-       }
-
-       sseu->eu_total = compute_eu_total(sseu);
-
-       /*
-        * BDW is expected to always have a uniform distribution of EU across
-        * subslices with the exception that any one EU in any one subslice may
-        * be fused off for die recovery.
-        */
-       sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
-                               DIV_ROUND_UP(sseu->eu_total,
-                                            intel_sseu_subslice_total(sseu)) :
-                               0;
-
-       /*
-        * BDW supports slice power gating on devices with more than
-        * one slice.
-        */
-       sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
-       sseu->has_subslice_pg = 0;
-       sseu->has_eu_pg = 0;
-}
-
-static void hsw_sseu_info_init(struct drm_i915_private *dev_priv)
-{
-       struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
-       u32 fuse1;
-       u8 subslice_mask = 0;
-       int s, ss;
-
-       /*
-        * There isn't a register to tell us how many slices/subslices. We
-        * work off the PCI-ids here.
-        */
-       switch (INTEL_INFO(dev_priv)->gt) {
-       default:
-               MISSING_CASE(INTEL_INFO(dev_priv)->gt);
-               /* fall through */
-       case 1:
-               sseu->slice_mask = BIT(0);
-               subslice_mask = BIT(0);
-               break;
-       case 2:
-               sseu->slice_mask = BIT(0);
-               subslice_mask = BIT(0) | BIT(1);
-               break;
-       case 3:
-               sseu->slice_mask = BIT(0) | BIT(1);
-               subslice_mask = BIT(0) | BIT(1);
-               break;
-       }
-
-       fuse1 = I915_READ(HSW_PAVP_FUSE1);
-       switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
-       default:
-               MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >>
-                            HSW_F1_EU_DIS_SHIFT);
-               /* fall through */
-       case HSW_F1_EU_DIS_10EUS:
-               sseu->eu_per_subslice = 10;
-               break;
-       case HSW_F1_EU_DIS_8EUS:
-               sseu->eu_per_subslice = 8;
-               break;
-       case HSW_F1_EU_DIS_6EUS:
-               sseu->eu_per_subslice = 6;
-               break;
-       }
-
-       intel_sseu_set_info(sseu, hweight8(sseu->slice_mask),
-                           hweight8(subslice_mask),
-                           sseu->eu_per_subslice);
-
-       for (s = 0; s < sseu->max_slices; s++) {
-               intel_sseu_set_subslices(sseu, s, subslice_mask);
-
-               for (ss = 0; ss < sseu->max_subslices; ss++) {
-                       sseu_set_eus(sseu, s, ss,
-                                    (1UL << sseu->eu_per_subslice) - 1);
-               }
-       }
-
-       sseu->eu_total = compute_eu_total(sseu);
-
-       /* No powergating for you. */
-       sseu->has_slice_pg = 0;
-       sseu->has_subslice_pg = 0;
-       sseu->has_eu_pg = 0;
-}
-
 static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
 {
-       u32 ts_override = I915_READ(GEN9_TIMESTAMP_OVERRIDE);
+       u32 ts_override = intel_uncore_read(&dev_priv->uncore,
+                                           GEN9_TIMESTAMP_OVERRIDE);
        u32 base_freq, frac_freq;
 
        base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
@@ -738,6 +186,7 @@ static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
 
 static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
 {
+       struct intel_uncore *uncore = &dev_priv->uncore;
        u32 f12_5_mhz = 12500000;
        u32 f19_2_mhz = 19200000;
        u32 f24_mhz = 24000000;
@@ -759,7 +208,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
                 */
                return f12_5_mhz;
        } else if (INTEL_GEN(dev_priv) <= 9) {
-               u32 ctc_reg = I915_READ(CTC_MODE);
+               u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
                u32 freq = 0;
 
                if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
@@ -777,7 +226,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
 
                return freq;
        } else if (INTEL_GEN(dev_priv) <= 12) {
-               u32 ctc_reg = I915_READ(CTC_MODE);
+               u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
                u32 freq = 0;
 
                /* First figure out the reference frequency. There are 2 ways
@@ -788,7 +237,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
                if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
                        freq = read_reference_ts_freq(dev_priv);
                } else {
-                       u32 rpm_config_reg = I915_READ(RPM_CONFIG0);
+                       u32 rpm_config_reg = intel_uncore_read(uncore, RPM_CONFIG0);
 
                        if (INTEL_GEN(dev_priv) <= 10)
                                freq = gen10_get_crystal_clock_freq(dev_priv,
@@ -967,8 +416,8 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
 
        if (HAS_DISPLAY(dev_priv) && IS_GEN_RANGE(dev_priv, 7, 8) &&
            HAS_PCH_SPLIT(dev_priv)) {
-               u32 fuse_strap = I915_READ(FUSE_STRAP);
-               u32 sfuse_strap = I915_READ(SFUSE_STRAP);
+               u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
+               u32 sfuse_strap = intel_de_read(dev_priv, SFUSE_STRAP);
 
                /*
                 * SFUSE_STRAP is supposed to have a bit signalling the display
@@ -993,7 +442,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
                        info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
                }
        } else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
-               u32 dfsm = I915_READ(SKL_DFSM);
+               u32 dfsm = intel_de_read(dev_priv, SKL_DFSM);
 
                if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
                        info->pipe_mask &= ~BIT(PIPE_A);
@@ -1027,22 +476,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
                        info->display.has_dsc = 0;
        }
 
-       /* Initialize slice/subslice/EU info */
-       if (IS_HASWELL(dev_priv))
-               hsw_sseu_info_init(dev_priv);
-       else if (IS_CHERRYVIEW(dev_priv))
-               cherryview_sseu_info_init(dev_priv);
-       else if (IS_BROADWELL(dev_priv))
-               bdw_sseu_info_init(dev_priv);
-       else if (IS_GEN(dev_priv, 9))
-               gen9_sseu_info_init(dev_priv);
-       else if (IS_GEN(dev_priv, 10))
-               gen10_sseu_info_init(dev_priv);
-       else if (IS_GEN(dev_priv, 11))
-               gen11_sseu_info_init(dev_priv);
-       else if (INTEL_GEN(dev_priv) >= 12)
-               gen12_sseu_info_init(dev_priv);
-
        if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
                drm_info(&dev_priv->drm,
                         "Disabling ppGTT for VT-d support\n");
@@ -1073,67 +506,3 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps,
                   yesno(caps->has_logical_contexts));
        drm_printf(p, "scheduler: %x\n", caps->scheduler);
 }
-
-/*
- * Determine which engines are fused off in our particular hardware. Since the
- * fuse register is in the blitter powerwell, we need forcewake to be ready at
- * this point (but later we need to prune the forcewake domains for engines that
- * are indeed fused off).
- */
-void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
-{
-       struct intel_device_info *info = mkwrite_device_info(dev_priv);
-       unsigned int logical_vdbox = 0;
-       unsigned int i;
-       u32 media_fuse;
-       u16 vdbox_mask;
-       u16 vebox_mask;
-
-       if (INTEL_GEN(dev_priv) < 11)
-               return;
-
-       media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
-
-       vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
-       vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
-                     GEN11_GT_VEBOX_DISABLE_SHIFT;
-
-       for (i = 0; i < I915_MAX_VCS; i++) {
-               if (!HAS_ENGINE(dev_priv, _VCS(i))) {
-                       vdbox_mask &= ~BIT(i);
-                       continue;
-               }
-
-               if (!(BIT(i) & vdbox_mask)) {
-                       info->engine_mask &= ~BIT(_VCS(i));
-                       drm_dbg(&dev_priv->drm, "vcs%u fused off\n", i);
-                       continue;
-               }
-
-               /*
-                * In Gen11, only even numbered logical VDBOXes are
-                * hooked up to an SFC (Scaler & Format Converter) unit.
-                * In TGL each VDBOX has access to an SFC.
-                */
-               if (INTEL_GEN(dev_priv) >= 12 || logical_vdbox++ % 2 == 0)
-                       RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
-       }
-       drm_dbg(&dev_priv->drm, "vdbox enable: %04x, instances: %04lx\n",
-               vdbox_mask, VDBOX_MASK(dev_priv));
-       GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
-
-       for (i = 0; i < I915_MAX_VECS; i++) {
-               if (!HAS_ENGINE(dev_priv, _VECS(i))) {
-                       vebox_mask &= ~BIT(i);
-                       continue;
-               }
-
-               if (!(BIT(i) & vebox_mask)) {
-                       info->engine_mask &= ~BIT(_VECS(i));
-                       drm_dbg(&dev_priv->drm, "vecs%u fused off\n", i);
-               }
-       }
-       drm_dbg(&dev_priv->drm, "vebox enable: %04x, instances: %04lx\n",
-               vebox_mask, VEBOX_MASK(dev_priv));
-       GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv));
-}
index 8d62b85..fd23854 100644 (file)
@@ -82,6 +82,7 @@ enum intel_platform {
        /* gen12 */
        INTEL_TIGERLAKE,
        INTEL_ROCKETLAKE,
+       INTEL_DG1,
        INTEL_MAX_PLATFORMS
 };
 
@@ -122,6 +123,7 @@ enum intel_ppgtt_type {
        func(has_logical_ring_contexts); \
        func(has_logical_ring_elsq); \
        func(has_logical_ring_preemption); \
+       func(has_master_unit_irq); \
        func(has_pooled_eu); \
        func(has_rc6); \
        func(has_rc6p); \
@@ -157,7 +159,7 @@ struct intel_device_info {
 
        u8 gen;
        u8 gt; /* GT number, 0 if undefined */
-       intel_engine_mask_t engine_mask; /* Engines supported by the HW */
+       intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */
 
        enum intel_platform platform;
 
@@ -219,18 +221,10 @@ struct intel_runtime_info {
        u8 num_sprites[I915_MAX_PIPES];
        u8 num_scalers[I915_MAX_PIPES];
 
-       u8 num_engines;
-
-       /* Slice/subslice/EU info */
-       struct sseu_dev_info sseu;
-
        u32 rawclk_freq;
 
        u32 cs_timestamp_frequency_hz;
        u32 cs_timestamp_period_ns;
-
-       /* Media engine access to SFC per instance */
-       u8 vdbox_sfc_access;
 };
 
 struct intel_driver_caps {
@@ -247,10 +241,6 @@ void intel_device_info_print_static(const struct intel_device_info *info,
                                    struct drm_printer *p);
 void intel_device_info_print_runtime(const struct intel_runtime_info *info,
                                     struct drm_printer *p);
-void intel_device_info_print_topology(const struct sseu_dev_info *sseu,
-                                     struct drm_printer *p);
-
-void intel_device_info_init_mmio(struct drm_i915_private *dev_priv);
 
 void intel_driver_caps_print(const struct intel_driver_caps *caps,
                             struct drm_printer *p);
index c668e99..6c97192 100644 (file)
@@ -188,6 +188,12 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
 {
        struct pci_dev *pch = NULL;
 
+       /* DG1 has south engine display on the same PCI device */
+       if (IS_DG1(dev_priv)) {
+               dev_priv->pch_type = PCH_DG1;
+               return;
+       }
+
        /*
         * The reason to probe ISA bridge instead of Dev31:Fun0 is to
         * make graphics device passthrough work easy for VMM, that only
index 3053d1c..06d2cd5 100644 (file)
@@ -26,6 +26,9 @@ enum intel_pch {
        PCH_JSP,        /* Jasper Lake PCH */
        PCH_MCC,        /* Mule Creek Canyon PCH */
        PCH_TGP,        /* Tiger Lake PCH */
+
+       /* Fake PCHs, functionality handled on the same PCI dev */
+       PCH_DG1 = 1024,
 };
 
 #define INTEL_PCH_DEVICE_ID_MASK               0xff80
@@ -56,6 +59,7 @@ enum intel_pch {
 
 #define INTEL_PCH_TYPE(dev_priv)               ((dev_priv)->pch_type)
 #define INTEL_PCH_ID(dev_priv)                 ((dev_priv)->pch_id)
+#define HAS_PCH_DG1(dev_priv)                  (INTEL_PCH_TYPE(dev_priv) == PCH_DG1)
 #define HAS_PCH_JSP(dev_priv)                  (INTEL_PCH_TYPE(dev_priv) == PCH_JSP)
 #define HAS_PCH_MCC(dev_priv)                  (INTEL_PCH_TYPE(dev_priv) == PCH_MCC)
 #define HAS_PCH_TGP(dev_priv)                  (INTEL_PCH_TYPE(dev_priv) == PCH_TGP)
index 565a2b9..cfabbe0 100644 (file)
@@ -94,16 +94,13 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
        I915_WRITE(GEN8_CHICKEN_DCPR_1,
                   I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
 
-       /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl,cfl */
-       /* WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl */
+       /*
+        * WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl
+        * Display WA #0859: skl,bxt,kbl,glk,cfl
+        */
        I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
-                  DISP_FBC_WM_DIS |
                   DISP_FBC_MEMORY_WAKE);
 
-       /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl,cfl */
-       I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
-                  ILK_DPFC_DISABLE_DUMMY0);
-
        if (IS_SKYLAKE(dev_priv)) {
                /* WaDisableDopClockGating */
                I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL)
@@ -140,6 +137,20 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
         * application, using batch buffers or any other means.
         */
        I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
+
+       /*
+        * WaFbcTurnOffFbcWatermark:bxt
+        * Display WA #0562: bxt
+        */
+       I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+                  DISP_FBC_WM_DIS);
+
+       /*
+        * WaFbcHighMemBwCorruptionAvoidance:bxt
+        * Display WA #0883: bxt
+        */
+       I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+                  ILK_DPFC_DISABLE_DUMMY0);
 }
 
 static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -7098,6 +7109,10 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
 
 static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
 {
+       /* Wa_1409120013:icl,ehl */
+       I915_WRITE(ILK_DPFC_CHICKEN,
+                  ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+
        /* This is not an Wa. Enable to reduce Sampler power */
        I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
                   I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
@@ -7112,9 +7127,13 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
        u32 vd_pg_enable = 0;
        unsigned int i;
 
+       /* Wa_1409120013:tgl */
+       I915_WRITE(ILK_DPFC_CHICKEN,
+                  ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+
        /* This is not a WA. Enable VD HCP & MFX_ENC powergate */
        for (i = 0; i < I915_MAX_VCS; i++) {
-               if (HAS_ENGINE(dev_priv, _VCS(i)))
+               if (HAS_ENGINE(&dev_priv->gt, _VCS(i)))
                        vd_pg_enable |= VDN_HCP_POWERGATE_ENABLE(i) |
                                        VDN_MFX_POWERGATE_ENABLE(i);
        }
@@ -7155,7 +7174,10 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
        I915_WRITE(GEN8_CHICKEN_DCPR_1,
                   I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
 
-       /* WaFbcWakeMemOn:cnl */
+       /*
+        * WaFbcWakeMemOn:cnl
+        * Display WA #0859: cnl
+        */
        I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
                   DISP_FBC_MEMORY_WAKE);
 
@@ -7181,7 +7203,17 @@ static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
        cnp_init_clock_gating(dev_priv);
        gen9_init_clock_gating(dev_priv);
 
-       /* WaFbcNukeOnHostModify:cfl */
+       /*
+        * WaFbcTurnOffFbcWatermark:cfl
+        * Display WA #0562: cfl
+        */
+       I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+                  DISP_FBC_WM_DIS);
+
+       /*
+        * WaFbcNukeOnHostModify:cfl
+        * Display WA #0873: cfl
+        */
        I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
                   ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
 }
@@ -7200,7 +7232,17 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
                I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
                           GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
 
-       /* WaFbcNukeOnHostModify:kbl */
+       /*
+        * WaFbcTurnOffFbcWatermark:kbl
+        * Display WA #0562: kbl
+        */
+       I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+                  DISP_FBC_WM_DIS);
+
+       /*
+        * WaFbcNukeOnHostModify:kbl
+        * Display WA #0873: kbl
+        */
        I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
                   ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
 }
@@ -7213,15 +7255,37 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
        I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
                   FBC_LLC_FULLY_OPEN);
 
-       /* WaFbcNukeOnHostModify:skl */
+       /*
+        * WaFbcTurnOffFbcWatermark:skl
+        * Display WA #0562: skl
+        */
+       I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+                  DISP_FBC_WM_DIS);
+
+       /*
+        * WaFbcNukeOnHostModify:skl
+        * Display WA #0873: skl
+        */
        I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
                   ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
+
+       /*
+        * WaFbcHighMemBwCorruptionAvoidance:skl
+        * Display WA #0883: skl
+        */
+       I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+                  ILK_DPFC_DISABLE_DUMMY0);
 }
 
 static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
 {
        enum pipe pipe;
 
+       /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
+       I915_WRITE(CHICKEN_PIPESL_1(PIPE_A),
+                  I915_READ(CHICKEN_PIPESL_1(PIPE_A)) |
+                  HSW_FBCQ_DIS);
+
        /* WaSwitchSolVfFArbitrationPriority:bdw */
        I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
 
@@ -7269,6 +7333,11 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
 
 static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
 {
+       /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
+       I915_WRITE(CHICKEN_PIPESL_1(PIPE_A),
+                  I915_READ(CHICKEN_PIPESL_1(PIPE_A)) |
+                  HSW_FBCQ_DIS);
+
        /* This is required by WaCatErrorRejectionIssue:hsw */
        I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
                   I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
@@ -7286,6 +7355,11 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
 
        I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
 
+       /* WaFbcAsynchFlipDisableFbcQueue:ivb */
+       I915_WRITE(ILK_DISPLAY_CHICKEN1,
+                  I915_READ(ILK_DISPLAY_CHICKEN1) |
+                  ILK_FBCQ_DIS);
+
        /* WaDisableBackToBackFlipFix:ivb */
        I915_WRITE(IVB_CHICKEN3,
                   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
@@ -7471,6 +7545,16 @@ static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
 
        I915_WRITE(MEM_MODE,
                   _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
+
+       /*
+        * Have FBC ignore 3D activity since we use software
+        * render tracking, and otherwise a pure 3D workload
+        * (even if it just renders a single frame and then does
+        * abosultely nothing) would not allow FBC to recompress
+        * until a 2D blit occurs.
+        */
+       I915_WRITE(SCPD0,
+                  _MASKED_BIT_ENABLE(SCPD_FBC_IGNORE_3D));
 }
 
 static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
index 592364a..f5edee1 100644 (file)
@@ -142,7 +142,7 @@ fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
        if (wait_ack_clear(d, FORCEWAKE_KERNEL)) {
                DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
                          intel_uncore_forcewake_domain_to_str(d->id));
-               add_taint_for_CI(TAINT_WARN); /* CI now unreliable */
+               add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
        }
 }
 
@@ -219,7 +219,7 @@ fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
        if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
                DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
                          intel_uncore_forcewake_domain_to_str(d->id));
-               add_taint_for_CI(TAINT_WARN); /* CI now unreliable */
+               add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
        }
 }
 
@@ -1529,6 +1529,8 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
        (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
 
        if (INTEL_GEN(i915) >= 11) {
+               /* we'll prune the domains of missing engines later */
+               intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask;
                int i;
 
                uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
@@ -1541,7 +1543,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
                               FORCEWAKE_ACK_BLITTER_GEN9);
 
                for (i = 0; i < I915_MAX_VCS; i++) {
-                       if (!HAS_ENGINE(i915, _VCS(i)))
+                       if (!__HAS_ENGINE(emask, _VCS(i)))
                                continue;
 
                        fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
@@ -1549,7 +1551,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
                                       FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
                }
                for (i = 0; i < I915_MAX_VECS; i++) {
-                       if (!HAS_ENGINE(i915, _VECS(i)))
+                       if (!__HAS_ENGINE(emask, _VECS(i)))
                                continue;
 
                        fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
@@ -1844,20 +1846,20 @@ out_mmio_cleanup:
  * the forcewake domains. Prune them, to make sure they only reference existing
  * engines.
  */
-void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore)
+void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
+                                         struct intel_gt *gt)
 {
-       struct drm_i915_private *i915 = uncore->i915;
        enum forcewake_domains fw_domains = uncore->fw_domains;
        enum forcewake_domain_id domain_id;
        int i;
 
-       if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(i915) < 11)
+       if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(uncore->i915) < 11)
                return;
 
        for (i = 0; i < I915_MAX_VCS; i++) {
                domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
 
-               if (HAS_ENGINE(i915, _VCS(i)))
+               if (HAS_ENGINE(gt, _VCS(i)))
                        continue;
 
                if (fw_domains & BIT(domain_id))
@@ -1867,7 +1869,7 @@ void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore)
        for (i = 0; i < I915_MAX_VECS; i++) {
                domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
 
-               if (HAS_ENGINE(i915, _VECS(i)))
+               if (HAS_ENGINE(gt, _VECS(i)))
                        continue;
 
                if (fw_domains & BIT(domain_id))
index 8d3aa8b..c4b22d9 100644 (file)
@@ -35,6 +35,7 @@
 struct drm_i915_private;
 struct intel_runtime_pm;
 struct intel_uncore;
+struct intel_gt;
 
 struct intel_uncore_mmio_debug {
        spinlock_t lock; /** lock is also taken in irq contexts. */
@@ -186,7 +187,8 @@ intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
 void intel_uncore_init_early(struct intel_uncore *uncore,
                             struct drm_i915_private *i915);
 int intel_uncore_init_mmio(struct intel_uncore *uncore);
-void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore);
+void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
+                                         struct intel_gt *gt);
 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
 bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore);
 void intel_uncore_fini_mmio(struct intel_uncore *uncore);
index be54570..c2d001d 100644 (file)
@@ -280,11 +280,144 @@ out:
        return err;
 }
 
+static int live_noa_gpr(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct i915_perf_stream *stream;
+       struct intel_context *ce;
+       struct i915_request *rq;
+       u32 *cs, *store;
+       void *scratch;
+       u32 gpr0;
+       int err;
+       int i;
+
+       /* Check that the delay does not clobber user context state (GPR) */
+
+       stream = test_stream(&i915->perf);
+       if (!stream)
+               return -ENOMEM;
+
+       gpr0 = i915_mmio_reg_offset(GEN8_RING_CS_GPR(stream->engine->mmio_base, 0));
+
+       ce = intel_context_create(stream->engine);
+       if (IS_ERR(ce)) {
+               err = PTR_ERR(ce);
+               goto out;
+       }
+
+       /* Poison the ce->vm so we detect writes not to the GGTT gt->scratch */
+       scratch = kmap(ce->vm->scratch[0].base.page);
+       memset(scratch, POISON_FREE, PAGE_SIZE);
+
+       rq = intel_context_create_request(ce);
+       if (IS_ERR(rq)) {
+               err = PTR_ERR(rq);
+               goto out_ce;
+       }
+       i915_request_get(rq);
+
+       if (rq->engine->emit_init_breadcrumb) {
+               err = rq->engine->emit_init_breadcrumb(rq);
+               if (err) {
+                       i915_request_add(rq);
+                       goto out_rq;
+               }
+       }
+
+       /* Fill the 16 qword [32 dword] GPR with a known unlikely value */
+       cs = intel_ring_begin(rq, 2 * 32 + 2);
+       if (IS_ERR(cs)) {
+               err = PTR_ERR(cs);
+               i915_request_add(rq);
+               goto out_rq;
+       }
+
+       *cs++ = MI_LOAD_REGISTER_IMM(32);
+       for (i = 0; i < 32; i++) {
+               *cs++ = gpr0 + i * sizeof(u32);
+               *cs++ = STACK_MAGIC;
+       }
+       *cs++ = MI_NOOP;
+       intel_ring_advance(rq, cs);
+
+       /* Execute the GPU delay */
+       err = rq->engine->emit_bb_start(rq,
+                                       i915_ggtt_offset(stream->noa_wait), 0,
+                                       I915_DISPATCH_SECURE);
+       if (err) {
+               i915_request_add(rq);
+               goto out_rq;
+       }
+
+       /* Read the GPR back, using the pinned global HWSP for convenience */
+       store = memset32(rq->engine->status_page.addr + 512, 0, 32);
+       for (i = 0; i < 32; i++) {
+               u32 cmd;
+
+               cs = intel_ring_begin(rq, 4);
+               if (IS_ERR(cs)) {
+                       err = PTR_ERR(cs);
+                       i915_request_add(rq);
+                       goto out_rq;
+               }
+
+               cmd = MI_STORE_REGISTER_MEM;
+               if (INTEL_GEN(i915) >= 8)
+                       cmd++;
+               cmd |= MI_USE_GGTT;
+
+               *cs++ = cmd;
+               *cs++ = gpr0 + i * sizeof(u32);
+               *cs++ = i915_ggtt_offset(rq->engine->status_page.vma) +
+                       offset_in_page(store) +
+                       i * sizeof(u32);
+               *cs++ = 0;
+               intel_ring_advance(rq, cs);
+       }
+
+       i915_request_add(rq);
+
+       if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, HZ / 2) < 0) {
+               pr_err("noa_wait timed out\n");
+               intel_gt_set_wedged(stream->engine->gt);
+               err = -EIO;
+               goto out_rq;
+       }
+
+       /* Verify that the GPR contain our expected values */
+       for (i = 0; i < 32; i++) {
+               if (store[i] == STACK_MAGIC)
+                       continue;
+
+               pr_err("GPR[%d] lost, found:%08x, expected:%08x!\n",
+                      i, store[i], STACK_MAGIC);
+               err = -EINVAL;
+       }
+
+       /* Verify that the user's scratch page was not used for GPR storage */
+       if (memchr_inv(scratch, POISON_FREE, PAGE_SIZE)) {
+               pr_err("Scratch page overwritten!\n");
+               igt_hexdump(scratch, 4096);
+               err = -EINVAL;
+       }
+
+out_rq:
+       i915_request_put(rq);
+out_ce:
+       kunmap(ce->vm->scratch[0].base.page);
+       intel_context_put(ce);
+out:
+       stream_destroy(stream);
+       return err;
+}
+
 int i915_perf_live_selftests(struct drm_i915_private *i915)
 {
        static const struct i915_subtest tests[] = {
                SUBTEST(live_sanitycheck),
                SUBTEST(live_noa_delay),
+               SUBTEST(live_noa_gpr),
        };
        struct i915_perf *perf = &i915->perf;
        int err;
index 9271aad..57dd6f5 100644 (file)
@@ -1454,7 +1454,7 @@ out_flush:
                idx++;
        }
        pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n",
-               num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus);
+               num_waits, num_fences, idx, ncpus);
 
        ret = igt_live_test_end(&live) ?: ret;
 out_contexts:
index 9b105b8..9a46be0 100644 (file)
@@ -190,7 +190,8 @@ struct drm_i915_private *mock_gem_device(void)
        mock_init_ggtt(i915, &i915->ggtt);
        i915->gt.vm = i915_vm_get(&i915->ggtt.vm);
 
-       mkwrite_device_info(i915)->engine_mask = BIT(0);
+       mkwrite_device_info(i915)->platform_engine_mask = BIT(0);
+       i915->gt.info.engine_mask = BIT(0);
 
        i915->gt.engine[RCS0] = mock_engine(i915, "mock", RCS0);
        if (!i915->gt.engine[RCS0])
index edc5e3d..b173086 100644 (file)
@@ -38,7 +38,8 @@ static void mock_insert_entries(struct i915_address_space *vm,
 {
 }
 
-static int mock_bind_ppgtt(struct i915_vma *vma,
+static int mock_bind_ppgtt(struct i915_address_space *vm,
+                          struct i915_vma *vma,
                           enum i915_cache_level cache_level,
                           u32 flags)
 {
@@ -47,7 +48,8 @@ static int mock_bind_ppgtt(struct i915_vma *vma,
        return 0;
 }
 
-static void mock_unbind_ppgtt(struct i915_vma *vma)
+static void mock_unbind_ppgtt(struct i915_address_space *vm,
+                             struct i915_vma *vma)
 {
 }
 
@@ -88,7 +90,8 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
        return ppgtt;
 }
 
-static int mock_bind_ggtt(struct i915_vma *vma,
+static int mock_bind_ggtt(struct i915_address_space *vm,
+                         struct i915_vma *vma,
                          enum i915_cache_level cache_level,
                          u32 flags)
 {
@@ -96,7 +99,8 @@ static int mock_bind_ggtt(struct i915_vma *vma,
        return 0;
 }
 
-static void mock_unbind_ggtt(struct i915_vma *vma)
+static void mock_unbind_ggtt(struct i915_address_space *vm,
+                            struct i915_vma *vma)
 {
 }
 
index 04e1d38..08802e5 100644 (file)
@@ -812,7 +812,7 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
        struct drm_crtc *crtc = &pipe->crtc;
        struct drm_plane *plane = &pipe->plane;
        struct drm_device *drm = crtc->dev;
-       struct mcde *mcde = drm->dev_private;
+       struct mcde *mcde = to_mcde(drm);
        const struct drm_display_mode *mode = &cstate->mode;
        struct drm_framebuffer *fb = plane->state->fb;
        u32 format = fb->format->format;
index d300be5..82137ab 100644 (file)
@@ -209,7 +209,6 @@ static int mcde_modeset_init(struct drm_device *drm)
 
        drm_mode_config_reset(drm);
        drm_kms_helper_poll_init(drm);
-       drm_fbdev_generic_setup(drm, 32);
 
        return 0;
 }
@@ -254,6 +253,8 @@ static int mcde_drm_bind(struct device *dev)
        if (ret < 0)
                goto unbind;
 
+       drm_fbdev_generic_setup(drm, 32);
+
        return 0;
 
 unbind:
index c420f5a..aa74aac 100644 (file)
@@ -6,12 +6,12 @@ config DRM_MEDIATEK
        depends on COMMON_CLK
        depends on HAVE_ARM_SMCCC
        depends on OF
+       depends on MTK_MMSYS
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_HELPER
        select DRM_MIPI_DSI
        select DRM_PANEL
        select MEMORY
-       select MTK_MMSYS
        select MTK_SMI
        select VIDEOMODE_HELPERS
        help
index f6adc5b..040834b 100644 (file)
@@ -189,7 +189,6 @@ static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
        int ret;
        int i;
 
-       DRM_DEBUG_DRIVER("%s\n", __func__);
        for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
                ret = clk_prepare_enable(mtk_crtc->ddp_comp[i]->clk);
                if (ret) {
@@ -209,7 +208,6 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
 {
        int i;
 
-       DRM_DEBUG_DRIVER("%s\n", __func__);
        for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
                clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
 }
@@ -254,7 +252,6 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
        int ret;
        int i;
 
-       DRM_DEBUG_DRIVER("%s\n", __func__);
        if (WARN_ON(!crtc->state))
                return -EINVAL;
 
@@ -295,7 +292,6 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
                goto err_mutex_unprepare;
        }
 
-       DRM_DEBUG_DRIVER("mediatek_ddp_ddp_path_setup\n");
        for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
                mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev,
                                      mtk_crtc->ddp_comp[i]->id,
@@ -345,7 +341,6 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
        struct drm_crtc *crtc = &mtk_crtc->base;
        int i;
 
-       DRM_DEBUG_DRIVER("%s\n", __func__);
        for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
                mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]);
                if (i == 1)
@@ -827,7 +822,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
 
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
        mtk_crtc->cmdq_client =
-                       cmdq_mbox_create(dev, drm_crtc_index(&mtk_crtc->base),
+                       cmdq_mbox_create(mtk_crtc->mmsys_dev,
+                                        drm_crtc_index(&mtk_crtc->base),
                                         2000);
        if (IS_ERR(mtk_crtc->cmdq_client)) {
                dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
index 6bd3694..040a8f3 100644 (file)
@@ -444,7 +444,6 @@ static int mtk_drm_probe(struct platform_device *pdev)
        if (!private)
                return -ENOMEM;
 
-       private->data = of_device_get_match_data(dev);
        private->mmsys_dev = dev->parent;
        if (!private->mmsys_dev) {
                dev_err(dev, "Failed to get MMSYS device\n");
@@ -514,7 +513,8 @@ static int mtk_drm_probe(struct platform_device *pdev)
                                goto err_node;
                        }
 
-                       ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
+                       ret = mtk_ddp_comp_init(dev->parent, node, comp,
+                                               comp_id, NULL);
                        if (ret) {
                                of_node_put(node);
                                goto err_node;
@@ -571,7 +571,6 @@ static int mtk_drm_sys_suspend(struct device *dev)
        int ret;
 
        ret = drm_mode_config_helper_suspend(drm);
-       DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n");
 
        return ret;
 }
@@ -583,7 +582,6 @@ static int mtk_drm_sys_resume(struct device *dev)
        int ret;
 
        ret = drm_mode_config_helper_resume(drm);
-       DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n");
 
        return ret;
 }
index c2bd683..92141a1 100644 (file)
@@ -164,6 +164,16 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
                                                   true, true);
 }
 
+static void mtk_plane_atomic_disable(struct drm_plane *plane,
+                                    struct drm_plane_state *old_state)
+{
+       struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
+
+       state->pending.enable = false;
+       wmb(); /* Make sure the above parameter is set before update */
+       state->pending.dirty = true;
+}
+
 static void mtk_plane_atomic_update(struct drm_plane *plane,
                                    struct drm_plane_state *old_state)
 {
@@ -178,6 +188,11 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
        if (!crtc || WARN_ON(!fb))
                return;
 
+       if (!plane->state->visible) {
+               mtk_plane_atomic_disable(plane, old_state);
+               return;
+       }
+
        gem = fb->obj[0];
        mtk_gem = to_mtk_gem_obj(gem);
        addr = mtk_gem->dma_addr;
@@ -200,16 +215,6 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
        state->pending.dirty = true;
 }
 
-static void mtk_plane_atomic_disable(struct drm_plane *plane,
-                                    struct drm_plane_state *old_state)
-{
-       struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
-
-       state->pending.enable = false;
-       wmb(); /* Make sure the above parameter is set before update */
-       state->pending.dirty = true;
-}
-
 static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
        .prepare_fb = drm_gem_fb_prepare_fb,
        .atomic_check = mtk_plane_atomic_check,
index 270bf22..16fd99d 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
 #include <drm/drm_mipi_dsi.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
@@ -181,9 +182,9 @@ struct mtk_dsi {
        struct device *dev;
        struct mipi_dsi_host host;
        struct drm_encoder encoder;
-       struct drm_connector conn;
-       struct drm_panel *panel;
-       struct drm_bridge *bridge;
+       struct drm_bridge bridge;
+       struct drm_bridge *next_bridge;
+       struct drm_connector *connector;
        struct phy *phy;
 
        void __iomem *regs;
@@ -206,14 +207,9 @@ struct mtk_dsi {
        const struct mtk_dsi_driver_data *driver_data;
 };
 
-static inline struct mtk_dsi *encoder_to_dsi(struct drm_encoder *e)
-{
-       return container_of(e, struct mtk_dsi, encoder);
-}
-
-static inline struct mtk_dsi *connector_to_dsi(struct drm_connector *c)
+static inline struct mtk_dsi *bridge_to_dsi(struct drm_bridge *b)
 {
-       return container_of(c, struct mtk_dsi, conn);
+       return container_of(b, struct mtk_dsi, bridge);
 }
 
 static inline struct mtk_dsi *host_to_dsi(struct mipi_dsi_host *h)
@@ -316,10 +312,7 @@ static void mtk_dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi)
 
 static bool mtk_dsi_clk_hs_state(struct mtk_dsi *dsi)
 {
-       u32 tmp_reg1;
-
-       tmp_reg1 = readl(dsi->regs + DSI_PHY_LCCON);
-       return ((tmp_reg1 & LC_HS_TX_EN) == 1) ? true : false;
+       return readl(dsi->regs + DSI_PHY_LCCON) & LC_HS_TX_EN;
 }
 
 static void mtk_dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter)
@@ -681,16 +674,7 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
        mtk_dsi_lane0_ulp_mode_leave(dsi);
        mtk_dsi_clk_hs_mode(dsi, 0);
 
-       if (dsi->panel) {
-               if (drm_panel_prepare(dsi->panel)) {
-                       DRM_ERROR("failed to prepare the panel\n");
-                       goto err_disable_digital_clk;
-               }
-       }
-
        return 0;
-err_disable_digital_clk:
-       clk_disable_unprepare(dsi->digital_clk);
 err_disable_engine_clk:
        clk_disable_unprepare(dsi->engine_clk);
 err_phy_power_off:
@@ -717,15 +701,7 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
         */
        mtk_dsi_stop(dsi);
 
-       if (!mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500)) {
-               if (dsi->panel) {
-                       if (drm_panel_unprepare(dsi->panel)) {
-                               DRM_ERROR("failed to unprepare the panel\n");
-                               return;
-                       }
-               }
-       }
-
+       mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
        mtk_dsi_reset_engine(dsi);
        mtk_dsi_lane0_ulp_mode_enter(dsi);
        mtk_dsi_clk_ulp_mode_enter(dsi);
@@ -756,19 +732,7 @@ static void mtk_output_dsi_enable(struct mtk_dsi *dsi)
 
        mtk_dsi_start(dsi);
 
-       if (dsi->panel) {
-               if (drm_panel_enable(dsi->panel)) {
-                       DRM_ERROR("failed to enable the panel\n");
-                       goto err_dsi_power_off;
-               }
-       }
-
        dsi->enabled = true;
-
-       return;
-err_dsi_power_off:
-       mtk_dsi_stop(dsi);
-       mtk_dsi_poweroff(dsi);
 }
 
 static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
@@ -776,155 +740,51 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
        if (!dsi->enabled)
                return;
 
-       if (dsi->panel) {
-               if (drm_panel_disable(dsi->panel)) {
-                       DRM_ERROR("failed to disable the panel\n");
-                       return;
-               }
-       }
-
        mtk_dsi_poweroff(dsi);
 
        dsi->enabled = false;
 }
 
-static bool mtk_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
-                                      const struct drm_display_mode *mode,
-                                      struct drm_display_mode *adjusted_mode)
+static int mtk_dsi_bridge_attach(struct drm_bridge *bridge,
+                                enum drm_bridge_attach_flags flags)
 {
-       return true;
+       struct mtk_dsi *dsi = bridge_to_dsi(bridge);
+
+       /* Attach the panel or bridge to the dsi bridge */
+       return drm_bridge_attach(bridge->encoder, dsi->next_bridge,
+                                &dsi->bridge, flags);
 }
 
-static void mtk_dsi_encoder_mode_set(struct drm_encoder *encoder,
-                                    struct drm_display_mode *mode,
-                                    struct drm_display_mode *adjusted)
+static void mtk_dsi_bridge_mode_set(struct drm_bridge *bridge,
+                                   const struct drm_display_mode *mode,
+                                   const struct drm_display_mode *adjusted)
 {
-       struct mtk_dsi *dsi = encoder_to_dsi(encoder);
+       struct mtk_dsi *dsi = bridge_to_dsi(bridge);
 
        drm_display_mode_to_videomode(adjusted, &dsi->vm);
 }
 
-static void mtk_dsi_encoder_disable(struct drm_encoder *encoder)
+static void mtk_dsi_bridge_disable(struct drm_bridge *bridge)
 {
-       struct mtk_dsi *dsi = encoder_to_dsi(encoder);
+       struct mtk_dsi *dsi = bridge_to_dsi(bridge);
 
        mtk_output_dsi_disable(dsi);
 }
 
-static void mtk_dsi_encoder_enable(struct drm_encoder *encoder)
+static void mtk_dsi_bridge_enable(struct drm_bridge *bridge)
 {
-       struct mtk_dsi *dsi = encoder_to_dsi(encoder);
+       struct mtk_dsi *dsi = bridge_to_dsi(bridge);
 
        mtk_output_dsi_enable(dsi);
 }
 
-static int mtk_dsi_connector_get_modes(struct drm_connector *connector)
-{
-       struct mtk_dsi *dsi = connector_to_dsi(connector);
-
-       return drm_panel_get_modes(dsi->panel, connector);
-}
-
-static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = {
-       .mode_fixup = mtk_dsi_encoder_mode_fixup,
-       .mode_set = mtk_dsi_encoder_mode_set,
-       .disable = mtk_dsi_encoder_disable,
-       .enable = mtk_dsi_encoder_enable,
-};
-
-static const struct drm_connector_funcs mtk_dsi_connector_funcs = {
-       .fill_modes = drm_helper_probe_single_connector_modes,
-       .destroy = drm_connector_cleanup,
-       .reset = drm_atomic_helper_connector_reset,
-       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
-       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = {
+       .attach = mtk_dsi_bridge_attach,
+       .disable = mtk_dsi_bridge_disable,
+       .enable = mtk_dsi_bridge_enable,
+       .mode_set = mtk_dsi_bridge_mode_set,
 };
 
-static const struct drm_connector_helper_funcs
-       mtk_dsi_connector_helper_funcs = {
-       .get_modes = mtk_dsi_connector_get_modes,
-};
-
-static int mtk_dsi_create_connector(struct drm_device *drm, struct mtk_dsi *dsi)
-{
-       int ret;
-
-       ret = drm_connector_init(drm, &dsi->conn, &mtk_dsi_connector_funcs,
-                                DRM_MODE_CONNECTOR_DSI);
-       if (ret) {
-               DRM_ERROR("Failed to connector init to drm\n");
-               return ret;
-       }
-
-       drm_connector_helper_add(&dsi->conn, &mtk_dsi_connector_helper_funcs);
-
-       dsi->conn.dpms = DRM_MODE_DPMS_OFF;
-       drm_connector_attach_encoder(&dsi->conn, &dsi->encoder);
-
-       if (dsi->panel) {
-               ret = drm_panel_attach(dsi->panel, &dsi->conn);
-               if (ret) {
-                       DRM_ERROR("Failed to attach panel to drm\n");
-                       goto err_connector_cleanup;
-               }
-       }
-
-       return 0;
-
-err_connector_cleanup:
-       drm_connector_cleanup(&dsi->conn);
-       return ret;
-}
-
-static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
-{
-       int ret;
-
-       ret = drm_simple_encoder_init(drm, &dsi->encoder,
-                                     DRM_MODE_ENCODER_DSI);
-       if (ret) {
-               DRM_ERROR("Failed to encoder init to drm\n");
-               return ret;
-       }
-       drm_encoder_helper_add(&dsi->encoder, &mtk_dsi_encoder_helper_funcs);
-
-       /*
-        * Currently display data paths are statically assigned to a crtc each.
-        * crtc 0 is OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0
-        */
-       dsi->encoder.possible_crtcs = 1;
-
-       /* If there's a bridge, attach to it and let it create the connector */
-       if (dsi->bridge) {
-               ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL, 0);
-               if (ret) {
-                       DRM_ERROR("Failed to attach bridge to drm\n");
-                       goto err_encoder_cleanup;
-               }
-       } else {
-               /* Otherwise create our own connector and attach to a panel */
-               ret = mtk_dsi_create_connector(drm, dsi);
-               if (ret)
-                       goto err_encoder_cleanup;
-       }
-
-       return 0;
-
-err_encoder_cleanup:
-       drm_encoder_cleanup(&dsi->encoder);
-       return ret;
-}
-
-static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi)
-{
-       drm_encoder_cleanup(&dsi->encoder);
-       /* Skip connector cleanup if creation was delegated to the bridge */
-       if (dsi->conn.dev)
-               drm_connector_cleanup(&dsi->conn);
-       if (dsi->panel)
-               drm_panel_detach(dsi->panel);
-}
-
 static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp)
 {
        struct mtk_dsi *dsi = container_of(comp, struct mtk_dsi, ddp_comp);
@@ -953,20 +813,6 @@ static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
        dsi->format = device->format;
        dsi->mode_flags = device->mode_flags;
 
-       if (dsi->conn.dev)
-               drm_helper_hpd_irq_event(dsi->conn.dev);
-
-       return 0;
-}
-
-static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
-                              struct mipi_dsi_device *device)
-{
-       struct mtk_dsi *dsi = host_to_dsi(host);
-
-       if (dsi->conn.dev)
-               drm_helper_hpd_irq_event(dsi->conn.dev);
-
        return 0;
 }
 
@@ -1110,10 +956,46 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
 
 static const struct mipi_dsi_host_ops mtk_dsi_ops = {
        .attach = mtk_dsi_host_attach,
-       .detach = mtk_dsi_host_detach,
        .transfer = mtk_dsi_host_transfer,
 };
 
+static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
+{
+       int ret;
+
+       ret = drm_simple_encoder_init(drm, &dsi->encoder,
+                                     DRM_MODE_ENCODER_DSI);
+       if (ret) {
+               DRM_ERROR("Failed to encoder init to drm\n");
+               return ret;
+       }
+
+       /*
+        * Currently display data paths are statically assigned to a crtc each.
+        * crtc 0 is OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0
+        */
+       dsi->encoder.possible_crtcs = 1;
+
+       ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
+                               DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+       if (ret)
+               goto err_cleanup_encoder;
+
+       dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder);
+       if (IS_ERR(dsi->connector)) {
+               DRM_ERROR("Unable to create bridge connector\n");
+               ret = PTR_ERR(dsi->connector);
+               goto err_cleanup_encoder;
+       }
+       drm_connector_attach_encoder(dsi->connector, &dsi->encoder);
+
+       return 0;
+
+err_cleanup_encoder:
+       drm_encoder_cleanup(&dsi->encoder);
+       return ret;
+}
+
 static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
 {
        int ret;
@@ -1127,11 +1009,9 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
                return ret;
        }
 
-       ret = mtk_dsi_create_conn_enc(drm, dsi);
-       if (ret) {
-               DRM_ERROR("Encoder create failed with %d\n", ret);
+       ret = mtk_dsi_encoder_init(drm, dsi);
+       if (ret)
                goto err_unregister;
-       }
 
        return 0;
 
@@ -1146,7 +1026,7 @@ static void mtk_dsi_unbind(struct device *dev, struct device *master,
        struct drm_device *drm = data;
        struct mtk_dsi *dsi = dev_get_drvdata(dev);
 
-       mtk_dsi_destroy_conn_enc(dsi);
+       drm_encoder_cleanup(&dsi->encoder);
        mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
 }
 
@@ -1159,6 +1039,7 @@ static int mtk_dsi_probe(struct platform_device *pdev)
 {
        struct mtk_dsi *dsi;
        struct device *dev = &pdev->dev;
+       struct drm_panel *panel;
        struct resource *regs;
        int irq_num;
        int comp_id;
@@ -1177,10 +1058,18 @@ static int mtk_dsi_probe(struct platform_device *pdev)
        }
 
        ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
-                                         &dsi->panel, &dsi->bridge);
+                                         &panel, &dsi->next_bridge);
        if (ret)
                goto err_unregister_host;
 
+       if (panel) {
+               dsi->next_bridge = devm_drm_panel_bridge_add(dev, panel);
+               if (IS_ERR(dsi->next_bridge)) {
+                       ret = PTR_ERR(dsi->next_bridge);
+                       goto err_unregister_host;
+               }
+       }
+
        dsi->driver_data = of_device_get_match_data(dev);
 
        dsi->engine_clk = devm_clk_get(dev, "engine");
@@ -1256,6 +1145,12 @@ static int mtk_dsi_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, dsi);
 
+       dsi->bridge.funcs = &mtk_dsi_bridge_funcs;
+       dsi->bridge.of_node = dev->of_node;
+       dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
+
+       drm_bridge_add(&dsi->bridge);
+
        ret = component_add(&pdev->dev, &mtk_dsi_component_ops);
        if (ret) {
                dev_err(&pdev->dev, "failed to add component: %d\n", ret);
@@ -1274,6 +1169,7 @@ static int mtk_dsi_remove(struct platform_device *pdev)
        struct mtk_dsi *dsi = platform_get_drvdata(pdev);
 
        mtk_output_dsi_disable(dsi);
+       drm_bridge_remove(&dsi->bridge);
        component_del(&pdev->dev, &mtk_dsi_component_ops);
        mipi_dsi_host_unregister(&dsi->host);
 
index 55a4d09..9559126 100644 (file)
@@ -1630,8 +1630,6 @@ static int mtk_hdmi_audio_startup(struct device *dev, void *data)
 {
        struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
 
-       dev_dbg(dev, "%s\n", __func__);
-
        mtk_hdmi_audio_enable(hdmi);
 
        return 0;
@@ -1641,8 +1639,6 @@ static void mtk_hdmi_audio_shutdown(struct device *dev, void *data)
 {
        struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
 
-       dev_dbg(dev, "%s\n", __func__);
-
        mtk_hdmi_audio_disable(hdmi);
 }
 
@@ -1651,8 +1647,6 @@ mtk_hdmi_audio_digital_mute(struct device *dev, void *data, bool enable)
 {
        struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
 
-       dev_dbg(dev, "%s(%d)\n", __func__, enable);
-
        if (enable)
                mtk_hdmi_hw_aud_mute(hdmi);
        else
@@ -1665,8 +1659,6 @@ static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
 {
        struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
 
-       dev_dbg(dev, "%s\n", __func__);
-
        memcpy(buf, hdmi->conn.eld, min(sizeof(hdmi->conn.eld), len));
 
        return 0;
@@ -1766,7 +1758,6 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev)
                goto err_bridge_remove;
        }
 
-       dev_dbg(dev, "mediatek hdmi probe success\n");
        return 0;
 
 err_bridge_remove:
@@ -1789,7 +1780,7 @@ static int mtk_hdmi_suspend(struct device *dev)
        struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
 
        mtk_hdmi_clk_disable_audio(hdmi);
-       dev_dbg(dev, "hdmi suspend success!\n");
+
        return 0;
 }
 
@@ -1804,7 +1795,6 @@ static int mtk_hdmi_resume(struct device *dev)
                return ret;
        }
 
-       dev_dbg(dev, "hdmi resume success!\n");
        return 0;
 }
 #endif
index b55f516..827b937 100644 (file)
 #define RGS_HDMITX_5T1_EDG             (0xf << 4)
 #define RGS_HDMITX_PLUG_TST            BIT(0)
 
-static const u8 PREDIV[3][4] = {
-       {0x0, 0x0, 0x0, 0x0},   /* 27Mhz */
-       {0x1, 0x1, 0x1, 0x1},   /* 74Mhz */
-       {0x1, 0x1, 0x1, 0x1}    /* 148Mhz */
-};
-
-static const u8 TXDIV[3][4] = {
-       {0x3, 0x3, 0x3, 0x2},   /* 27Mhz */
-       {0x2, 0x1, 0x1, 0x1},   /* 74Mhz */
-       {0x1, 0x0, 0x0, 0x0}    /* 148Mhz */
-};
-
-static const u8 FBKSEL[3][4] = {
-       {0x1, 0x1, 0x1, 0x1},   /* 27Mhz */
-       {0x1, 0x0, 0x1, 0x1},   /* 74Mhz */
-       {0x1, 0x0, 0x1, 0x1}    /* 148Mhz */
-};
-
-static const u8 FBKDIV[3][4] = {
-       {19, 24, 29, 19},       /* 27Mhz */
-       {19, 24, 14, 19},       /* 74Mhz */
-       {19, 24, 14, 19}        /* 148Mhz */
-};
-
-static const u8 DIVEN[3][4] = {
-       {0x2, 0x1, 0x1, 0x2},   /* 27Mhz */
-       {0x2, 0x2, 0x2, 0x2},   /* 74Mhz */
-       {0x2, 0x2, 0x2, 0x2}    /* 148Mhz */
-};
-
-static const u8 HTPLLBP[3][4] = {
-       {0xc, 0xc, 0x8, 0xc},   /* 27Mhz */
-       {0xc, 0xf, 0xf, 0xc},   /* 74Mhz */
-       {0xc, 0xf, 0xf, 0xc}    /* 148Mhz */
-};
-
-static const u8 HTPLLBC[3][4] = {
-       {0x2, 0x3, 0x3, 0x2},   /* 27Mhz */
-       {0x2, 0x3, 0x3, 0x2},   /* 74Mhz */
-       {0x2, 0x3, 0x3, 0x2}    /* 148Mhz */
-};
-
-static const u8 HTPLLBR[3][4] = {
-       {0x1, 0x1, 0x0, 0x1},   /* 27Mhz */
-       {0x1, 0x2, 0x2, 0x1},   /* 74Mhz */
-       {0x1, 0x2, 0x2, 0x1}    /* 148Mhz */
-};
-
 static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
 {
        struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
 
-       dev_dbg(hdmi_phy->dev, "%s\n", __func__);
-
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_MHLCK_EN);
@@ -178,8 +128,6 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
 {
        struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
 
-       dev_dbg(hdmi_phy->dev, "%s\n", __func__);
-
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
        usleep_range(100, 150);
index 08631fd..446e796 100644 (file)
 #define VIU_OSD_FIFO_DEPTH_VAL(val)      ((val & 0x7f) << 12)
 #define VIU_OSD_WORDS_PER_BURST(words)   (((words & 0x4) >> 1) << 22)
 #define VIU_OSD_FIFO_LIMITS(size)        ((size & 0xf) << 24)
+#define VIU_OSD_BURST_LENGTH_24          (0x0 << 31 | 0x0 << 10)
+#define VIU_OSD_BURST_LENGTH_32          (0x0 << 31 | 0x1 << 10)
+#define VIU_OSD_BURST_LENGTH_48          (0x0 << 31 | 0x2 << 10)
+#define VIU_OSD_BURST_LENGTH_64          (0x0 << 31 | 0x3 << 10)
+#define VIU_OSD_BURST_LENGTH_96          (0x1 << 31 | 0x0 << 10)
+#define VIU_OSD_BURST_LENGTH_128         (0x1 << 31 | 0x1 << 10)
 
 #define VD1_IF0_GEN_REG 0x1a50
 #define VD1_IF0_CANVAS0 0x1a51
index 304f8ff..aede0c6 100644 (file)
@@ -411,13 +411,6 @@ void meson_viu_gxm_disable_osd1_afbc(struct meson_drm *priv)
                            priv->io_base + _REG(VIU_MISC_CTRL1));
 }
 
-static inline uint32_t meson_viu_osd_burst_length_reg(uint32_t length)
-{
-       uint32_t val = (((length & 0x80) % 24) / 12);
-
-       return (((val & 0x3) << 10) | (((val & 0x4) >> 2) << 31));
-}
-
 void meson_viu_init(struct meson_drm *priv)
 {
        uint32_t reg;
@@ -444,9 +437,9 @@ void meson_viu_init(struct meson_drm *priv)
                VIU_OSD_FIFO_LIMITS(2);      /* fifo_lim: 2*16=32 */
 
        if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
-               reg |= meson_viu_osd_burst_length_reg(32);
+               reg |= VIU_OSD_BURST_LENGTH_32;
        else
-               reg |= meson_viu_osd_burst_length_reg(64);
+               reg |= VIU_OSD_BURST_LENGTH_64;
 
        writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
        writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT));
index 14eb52f..54e1b2a 100644 (file)
@@ -8,19 +8,21 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/adreno.xml               (    501 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  42463 bytes, from 2018-11-19 13:44:03)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  14201 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  43052 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 140790 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2018-09-14 13:03:07)
-- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml                     (    594 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml        (   1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml                (  90159 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml       (  14386 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml          (  65048 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml                (  84226 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml                ( 112556 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml                ( 149461 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml                ( 184695 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml            (  11218 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml               (   1773 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml (   4559 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml    (   2872 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
@@ -210,6 +212,854 @@ enum a2xx_rb_blend_opcode {
        BLEND2_DST_PLUS_SRC_BIAS = 5,
 };
 
+enum a2xx_su_perfcnt_select {
+       PERF_PAPC_PASX_REQ = 0,
+       PERF_PAPC_PASX_FIRST_VECTOR = 2,
+       PERF_PAPC_PASX_SECOND_VECTOR = 3,
+       PERF_PAPC_PASX_FIRST_DEAD = 4,
+       PERF_PAPC_PASX_SECOND_DEAD = 5,
+       PERF_PAPC_PASX_VTX_KILL_DISCARD = 6,
+       PERF_PAPC_PASX_VTX_NAN_DISCARD = 7,
+       PERF_PAPC_PA_INPUT_PRIM = 8,
+       PERF_PAPC_PA_INPUT_NULL_PRIM = 9,
+       PERF_PAPC_PA_INPUT_EVENT_FLAG = 10,
+       PERF_PAPC_PA_INPUT_FIRST_PRIM_SLOT = 11,
+       PERF_PAPC_PA_INPUT_END_OF_PACKET = 12,
+       PERF_PAPC_CLPR_CULL_PRIM = 13,
+       PERF_PAPC_CLPR_VV_CULL_PRIM = 15,
+       PERF_PAPC_CLPR_VTX_KILL_CULL_PRIM = 17,
+       PERF_PAPC_CLPR_VTX_NAN_CULL_PRIM = 18,
+       PERF_PAPC_CLPR_CULL_TO_NULL_PRIM = 19,
+       PERF_PAPC_CLPR_VV_CLIP_PRIM = 21,
+       PERF_PAPC_CLPR_POINT_CLIP_CANDIDATE = 23,
+       PERF_PAPC_CLPR_CLIP_PLANE_CNT_1 = 24,
+       PERF_PAPC_CLPR_CLIP_PLANE_CNT_2 = 25,
+       PERF_PAPC_CLPR_CLIP_PLANE_CNT_3 = 26,
+       PERF_PAPC_CLPR_CLIP_PLANE_CNT_4 = 27,
+       PERF_PAPC_CLPR_CLIP_PLANE_CNT_5 = 28,
+       PERF_PAPC_CLPR_CLIP_PLANE_CNT_6 = 29,
+       PERF_PAPC_CLPR_CLIP_PLANE_NEAR = 30,
+       PERF_PAPC_CLPR_CLIP_PLANE_FAR = 31,
+       PERF_PAPC_CLPR_CLIP_PLANE_LEFT = 32,
+       PERF_PAPC_CLPR_CLIP_PLANE_RIGHT = 33,
+       PERF_PAPC_CLPR_CLIP_PLANE_TOP = 34,
+       PERF_PAPC_CLPR_CLIP_PLANE_BOTTOM = 35,
+       PERF_PAPC_CLSM_NULL_PRIM = 36,
+       PERF_PAPC_CLSM_TOTALLY_VISIBLE_PRIM = 37,
+       PERF_PAPC_CLSM_CLIP_PRIM = 38,
+       PERF_PAPC_CLSM_CULL_TO_NULL_PRIM = 39,
+       PERF_PAPC_CLSM_OUT_PRIM_CNT_1 = 40,
+       PERF_PAPC_CLSM_OUT_PRIM_CNT_2 = 41,
+       PERF_PAPC_CLSM_OUT_PRIM_CNT_3 = 42,
+       PERF_PAPC_CLSM_OUT_PRIM_CNT_4 = 43,
+       PERF_PAPC_CLSM_OUT_PRIM_CNT_5 = 44,
+       PERF_PAPC_CLSM_OUT_PRIM_CNT_6_7 = 45,
+       PERF_PAPC_CLSM_NON_TRIVIAL_CULL = 46,
+       PERF_PAPC_SU_INPUT_PRIM = 47,
+       PERF_PAPC_SU_INPUT_CLIP_PRIM = 48,
+       PERF_PAPC_SU_INPUT_NULL_PRIM = 49,
+       PERF_PAPC_SU_ZERO_AREA_CULL_PRIM = 50,
+       PERF_PAPC_SU_BACK_FACE_CULL_PRIM = 51,
+       PERF_PAPC_SU_FRONT_FACE_CULL_PRIM = 52,
+       PERF_PAPC_SU_POLYMODE_FACE_CULL = 53,
+       PERF_PAPC_SU_POLYMODE_BACK_CULL = 54,
+       PERF_PAPC_SU_POLYMODE_FRONT_CULL = 55,
+       PERF_PAPC_SU_POLYMODE_INVALID_FILL = 56,
+       PERF_PAPC_SU_OUTPUT_PRIM = 57,
+       PERF_PAPC_SU_OUTPUT_CLIP_PRIM = 58,
+       PERF_PAPC_SU_OUTPUT_NULL_PRIM = 59,
+       PERF_PAPC_SU_OUTPUT_EVENT_FLAG = 60,
+       PERF_PAPC_SU_OUTPUT_FIRST_PRIM_SLOT = 61,
+       PERF_PAPC_SU_OUTPUT_END_OF_PACKET = 62,
+       PERF_PAPC_SU_OUTPUT_POLYMODE_FACE = 63,
+       PERF_PAPC_SU_OUTPUT_POLYMODE_BACK = 64,
+       PERF_PAPC_SU_OUTPUT_POLYMODE_FRONT = 65,
+       PERF_PAPC_SU_OUT_CLIP_POLYMODE_FACE = 66,
+       PERF_PAPC_SU_OUT_CLIP_POLYMODE_BACK = 67,
+       PERF_PAPC_SU_OUT_CLIP_POLYMODE_FRONT = 68,
+       PERF_PAPC_PASX_REQ_IDLE = 69,
+       PERF_PAPC_PASX_REQ_BUSY = 70,
+       PERF_PAPC_PASX_REQ_STALLED = 71,
+       PERF_PAPC_PASX_REC_IDLE = 72,
+       PERF_PAPC_PASX_REC_BUSY = 73,
+       PERF_PAPC_PASX_REC_STARVED_SX = 74,
+       PERF_PAPC_PASX_REC_STALLED = 75,
+       PERF_PAPC_PASX_REC_STALLED_POS_MEM = 76,
+       PERF_PAPC_PASX_REC_STALLED_CCGSM_IN = 77,
+       PERF_PAPC_CCGSM_IDLE = 78,
+       PERF_PAPC_CCGSM_BUSY = 79,
+       PERF_PAPC_CCGSM_STALLED = 80,
+       PERF_PAPC_CLPRIM_IDLE = 81,
+       PERF_PAPC_CLPRIM_BUSY = 82,
+       PERF_PAPC_CLPRIM_STALLED = 83,
+       PERF_PAPC_CLPRIM_STARVED_CCGSM = 84,
+       PERF_PAPC_CLIPSM_IDLE = 85,
+       PERF_PAPC_CLIPSM_BUSY = 86,
+       PERF_PAPC_CLIPSM_WAIT_CLIP_VERT_ENGH = 87,
+       PERF_PAPC_CLIPSM_WAIT_HIGH_PRI_SEQ = 88,
+       PERF_PAPC_CLIPSM_WAIT_CLIPGA = 89,
+       PERF_PAPC_CLIPSM_WAIT_AVAIL_VTE_CLIP = 90,
+       PERF_PAPC_CLIPSM_WAIT_CLIP_OUTSM = 91,
+       PERF_PAPC_CLIPGA_IDLE = 92,
+       PERF_PAPC_CLIPGA_BUSY = 93,
+       PERF_PAPC_CLIPGA_STARVED_VTE_CLIP = 94,
+       PERF_PAPC_CLIPGA_STALLED = 95,
+       PERF_PAPC_CLIP_IDLE = 96,
+       PERF_PAPC_CLIP_BUSY = 97,
+       PERF_PAPC_SU_IDLE = 98,
+       PERF_PAPC_SU_BUSY = 99,
+       PERF_PAPC_SU_STARVED_CLIP = 100,
+       PERF_PAPC_SU_STALLED_SC = 101,
+       PERF_PAPC_SU_FACENESS_CULL = 102,
+};
+
+enum a2xx_sc_perfcnt_select {
+       SC_SR_WINDOW_VALID = 0,
+       SC_CW_WINDOW_VALID = 1,
+       SC_QM_WINDOW_VALID = 2,
+       SC_FW_WINDOW_VALID = 3,
+       SC_EZ_WINDOW_VALID = 4,
+       SC_IT_WINDOW_VALID = 5,
+       SC_STARVED_BY_PA = 6,
+       SC_STALLED_BY_RB_TILE = 7,
+       SC_STALLED_BY_RB_SAMP = 8,
+       SC_STARVED_BY_RB_EZ = 9,
+       SC_STALLED_BY_SAMPLE_FF = 10,
+       SC_STALLED_BY_SQ = 11,
+       SC_STALLED_BY_SP = 12,
+       SC_TOTAL_NO_PRIMS = 13,
+       SC_NON_EMPTY_PRIMS = 14,
+       SC_NO_TILES_PASSING_QM = 15,
+       SC_NO_PIXELS_PRE_EZ = 16,
+       SC_NO_PIXELS_POST_EZ = 17,
+};
+
+enum a2xx_vgt_perfcount_select {
+       VGT_SQ_EVENT_WINDOW_ACTIVE = 0,
+       VGT_SQ_SEND = 1,
+       VGT_SQ_STALLED = 2,
+       VGT_SQ_STARVED_BUSY = 3,
+       VGT_SQ_STARVED_IDLE = 4,
+       VGT_SQ_STATIC = 5,
+       VGT_PA_EVENT_WINDOW_ACTIVE = 6,
+       VGT_PA_CLIP_V_SEND = 7,
+       VGT_PA_CLIP_V_STALLED = 8,
+       VGT_PA_CLIP_V_STARVED_BUSY = 9,
+       VGT_PA_CLIP_V_STARVED_IDLE = 10,
+       VGT_PA_CLIP_V_STATIC = 11,
+       VGT_PA_CLIP_P_SEND = 12,
+       VGT_PA_CLIP_P_STALLED = 13,
+       VGT_PA_CLIP_P_STARVED_BUSY = 14,
+       VGT_PA_CLIP_P_STARVED_IDLE = 15,
+       VGT_PA_CLIP_P_STATIC = 16,
+       VGT_PA_CLIP_S_SEND = 17,
+       VGT_PA_CLIP_S_STALLED = 18,
+       VGT_PA_CLIP_S_STARVED_BUSY = 19,
+       VGT_PA_CLIP_S_STARVED_IDLE = 20,
+       VGT_PA_CLIP_S_STATIC = 21,
+       RBIU_FIFOS_EVENT_WINDOW_ACTIVE = 22,
+       RBIU_IMMED_DATA_FIFO_STARVED = 23,
+       RBIU_IMMED_DATA_FIFO_STALLED = 24,
+       RBIU_DMA_REQUEST_FIFO_STARVED = 25,
+       RBIU_DMA_REQUEST_FIFO_STALLED = 26,
+       RBIU_DRAW_INITIATOR_FIFO_STARVED = 27,
+       RBIU_DRAW_INITIATOR_FIFO_STALLED = 28,
+       BIN_PRIM_NEAR_CULL = 29,
+       BIN_PRIM_ZERO_CULL = 30,
+       BIN_PRIM_FAR_CULL = 31,
+       BIN_PRIM_BIN_CULL = 32,
+       BIN_PRIM_FACE_CULL = 33,
+       SPARE34 = 34,
+       SPARE35 = 35,
+       SPARE36 = 36,
+       SPARE37 = 37,
+       SPARE38 = 38,
+       SPARE39 = 39,
+       TE_SU_IN_VALID = 40,
+       TE_SU_IN_READ = 41,
+       TE_SU_IN_PRIM = 42,
+       TE_SU_IN_EOP = 43,
+       TE_SU_IN_NULL_PRIM = 44,
+       TE_WK_IN_VALID = 45,
+       TE_WK_IN_READ = 46,
+       TE_OUT_PRIM_VALID = 47,
+       TE_OUT_PRIM_READ = 48,
+};
+
+enum a2xx_tcr_perfcount_select {
+       DGMMPD_IPMUX0_STALL = 0,
+       DGMMPD_IPMUX_ALL_STALL = 4,
+       OPMUX0_L2_WRITES = 5,
+};
+
+enum a2xx_tp_perfcount_select {
+       POINT_QUADS = 0,
+       BILIN_QUADS = 1,
+       ANISO_QUADS = 2,
+       MIP_QUADS = 3,
+       VOL_QUADS = 4,
+       MIP_VOL_QUADS = 5,
+       MIP_ANISO_QUADS = 6,
+       VOL_ANISO_QUADS = 7,
+       ANISO_2_1_QUADS = 8,
+       ANISO_4_1_QUADS = 9,
+       ANISO_6_1_QUADS = 10,
+       ANISO_8_1_QUADS = 11,
+       ANISO_10_1_QUADS = 12,
+       ANISO_12_1_QUADS = 13,
+       ANISO_14_1_QUADS = 14,
+       ANISO_16_1_QUADS = 15,
+       MIP_VOL_ANISO_QUADS = 16,
+       ALIGN_2_QUADS = 17,
+       ALIGN_4_QUADS = 18,
+       PIX_0_QUAD = 19,
+       PIX_1_QUAD = 20,
+       PIX_2_QUAD = 21,
+       PIX_3_QUAD = 22,
+       PIX_4_QUAD = 23,
+       TP_MIPMAP_LOD0 = 24,
+       TP_MIPMAP_LOD1 = 25,
+       TP_MIPMAP_LOD2 = 26,
+       TP_MIPMAP_LOD3 = 27,
+       TP_MIPMAP_LOD4 = 28,
+       TP_MIPMAP_LOD5 = 29,
+       TP_MIPMAP_LOD6 = 30,
+       TP_MIPMAP_LOD7 = 31,
+       TP_MIPMAP_LOD8 = 32,
+       TP_MIPMAP_LOD9 = 33,
+       TP_MIPMAP_LOD10 = 34,
+       TP_MIPMAP_LOD11 = 35,
+       TP_MIPMAP_LOD12 = 36,
+       TP_MIPMAP_LOD13 = 37,
+       TP_MIPMAP_LOD14 = 38,
+};
+
+enum a2xx_tcm_perfcount_select {
+       QUAD0_RD_LAT_FIFO_EMPTY = 0,
+       QUAD0_RD_LAT_FIFO_4TH_FULL = 3,
+       QUAD0_RD_LAT_FIFO_HALF_FULL = 4,
+       QUAD0_RD_LAT_FIFO_FULL = 5,
+       QUAD0_RD_LAT_FIFO_LT_4TH_FULL = 6,
+       READ_STARVED_QUAD0 = 28,
+       READ_STARVED = 32,
+       READ_STALLED_QUAD0 = 33,
+       READ_STALLED = 37,
+       VALID_READ_QUAD0 = 38,
+       TC_TP_STARVED_QUAD0 = 42,
+       TC_TP_STARVED = 46,
+};
+
+enum a2xx_tcf_perfcount_select {
+       VALID_CYCLES = 0,
+       SINGLE_PHASES = 1,
+       ANISO_PHASES = 2,
+       MIP_PHASES = 3,
+       VOL_PHASES = 4,
+       MIP_VOL_PHASES = 5,
+       MIP_ANISO_PHASES = 6,
+       VOL_ANISO_PHASES = 7,
+       ANISO_2_1_PHASES = 8,
+       ANISO_4_1_PHASES = 9,
+       ANISO_6_1_PHASES = 10,
+       ANISO_8_1_PHASES = 11,
+       ANISO_10_1_PHASES = 12,
+       ANISO_12_1_PHASES = 13,
+       ANISO_14_1_PHASES = 14,
+       ANISO_16_1_PHASES = 15,
+       MIP_VOL_ANISO_PHASES = 16,
+       ALIGN_2_PHASES = 17,
+       ALIGN_4_PHASES = 18,
+       TPC_BUSY = 19,
+       TPC_STALLED = 20,
+       TPC_STARVED = 21,
+       TPC_WORKING = 22,
+       TPC_WALKER_BUSY = 23,
+       TPC_WALKER_STALLED = 24,
+       TPC_WALKER_WORKING = 25,
+       TPC_ALIGNER_BUSY = 26,
+       TPC_ALIGNER_STALLED = 27,
+       TPC_ALIGNER_STALLED_BY_BLEND = 28,
+       TPC_ALIGNER_STALLED_BY_CACHE = 29,
+       TPC_ALIGNER_WORKING = 30,
+       TPC_BLEND_BUSY = 31,
+       TPC_BLEND_SYNC = 32,
+       TPC_BLEND_STARVED = 33,
+       TPC_BLEND_WORKING = 34,
+       OPCODE_0x00 = 35,
+       OPCODE_0x01 = 36,
+       OPCODE_0x04 = 37,
+       OPCODE_0x10 = 38,
+       OPCODE_0x11 = 39,
+       OPCODE_0x12 = 40,
+       OPCODE_0x13 = 41,
+       OPCODE_0x18 = 42,
+       OPCODE_0x19 = 43,
+       OPCODE_0x1A = 44,
+       OPCODE_OTHER = 45,
+       IN_FIFO_0_EMPTY = 56,
+       IN_FIFO_0_LT_HALF_FULL = 57,
+       IN_FIFO_0_HALF_FULL = 58,
+       IN_FIFO_0_FULL = 59,
+       IN_FIFO_TPC_EMPTY = 72,
+       IN_FIFO_TPC_LT_HALF_FULL = 73,
+       IN_FIFO_TPC_HALF_FULL = 74,
+       IN_FIFO_TPC_FULL = 75,
+       TPC_TC_XFC = 76,
+       TPC_TC_STATE = 77,
+       TC_STALL = 78,
+       QUAD0_TAPS = 79,
+       QUADS = 83,
+       TCA_SYNC_STALL = 84,
+       TAG_STALL = 85,
+       TCB_SYNC_STALL = 88,
+       TCA_VALID = 89,
+       PROBES_VALID = 90,
+       MISS_STALL = 91,
+       FETCH_FIFO_STALL = 92,
+       TCO_STALL = 93,
+       ANY_STALL = 94,
+       TAG_MISSES = 95,
+       TAG_HITS = 96,
+       SUB_TAG_MISSES = 97,
+       SET0_INVALIDATES = 98,
+       SET1_INVALIDATES = 99,
+       SET2_INVALIDATES = 100,
+       SET3_INVALIDATES = 101,
+       SET0_TAG_MISSES = 102,
+       SET1_TAG_MISSES = 103,
+       SET2_TAG_MISSES = 104,
+       SET3_TAG_MISSES = 105,
+       SET0_TAG_HITS = 106,
+       SET1_TAG_HITS = 107,
+       SET2_TAG_HITS = 108,
+       SET3_TAG_HITS = 109,
+       SET0_SUB_TAG_MISSES = 110,
+       SET1_SUB_TAG_MISSES = 111,
+       SET2_SUB_TAG_MISSES = 112,
+       SET3_SUB_TAG_MISSES = 113,
+       SET0_EVICT1 = 114,
+       SET0_EVICT2 = 115,
+       SET0_EVICT3 = 116,
+       SET0_EVICT4 = 117,
+       SET0_EVICT5 = 118,
+       SET0_EVICT6 = 119,
+       SET0_EVICT7 = 120,
+       SET0_EVICT8 = 121,
+       SET1_EVICT1 = 130,
+       SET1_EVICT2 = 131,
+       SET1_EVICT3 = 132,
+       SET1_EVICT4 = 133,
+       SET1_EVICT5 = 134,
+       SET1_EVICT6 = 135,
+       SET1_EVICT7 = 136,
+       SET1_EVICT8 = 137,
+       SET2_EVICT1 = 146,
+       SET2_EVICT2 = 147,
+       SET2_EVICT3 = 148,
+       SET2_EVICT4 = 149,
+       SET2_EVICT5 = 150,
+       SET2_EVICT6 = 151,
+       SET2_EVICT7 = 152,
+       SET2_EVICT8 = 153,
+       SET3_EVICT1 = 162,
+       SET3_EVICT2 = 163,
+       SET3_EVICT3 = 164,
+       SET3_EVICT4 = 165,
+       SET3_EVICT5 = 166,
+       SET3_EVICT6 = 167,
+       SET3_EVICT7 = 168,
+       SET3_EVICT8 = 169,
+       FF_EMPTY = 178,
+       FF_LT_HALF_FULL = 179,
+       FF_HALF_FULL = 180,
+       FF_FULL = 181,
+       FF_XFC = 182,
+       FF_STALLED = 183,
+       FG_MASKS = 184,
+       FG_LEFT_MASKS = 185,
+       FG_LEFT_MASK_STALLED = 186,
+       FG_LEFT_NOT_DONE_STALL = 187,
+       FG_LEFT_FG_STALL = 188,
+       FG_LEFT_SECTORS = 189,
+       FG0_REQUESTS = 195,
+       FG0_STALLED = 196,
+       MEM_REQ512 = 199,
+       MEM_REQ_SENT = 200,
+       MEM_LOCAL_READ_REQ = 202,
+       TC0_MH_STALLED = 203,
+};
+
+enum a2xx_sq_perfcnt_select {
+       SQ_PIXEL_VECTORS_SUB = 0,
+       SQ_VERTEX_VECTORS_SUB = 1,
+       SQ_ALU0_ACTIVE_VTX_SIMD0 = 2,
+       SQ_ALU1_ACTIVE_VTX_SIMD0 = 3,
+       SQ_ALU0_ACTIVE_PIX_SIMD0 = 4,
+       SQ_ALU1_ACTIVE_PIX_SIMD0 = 5,
+       SQ_ALU0_ACTIVE_VTX_SIMD1 = 6,
+       SQ_ALU1_ACTIVE_VTX_SIMD1 = 7,
+       SQ_ALU0_ACTIVE_PIX_SIMD1 = 8,
+       SQ_ALU1_ACTIVE_PIX_SIMD1 = 9,
+       SQ_EXPORT_CYCLES = 10,
+       SQ_ALU_CST_WRITTEN = 11,
+       SQ_TEX_CST_WRITTEN = 12,
+       SQ_ALU_CST_STALL = 13,
+       SQ_ALU_TEX_STALL = 14,
+       SQ_INST_WRITTEN = 15,
+       SQ_BOOLEAN_WRITTEN = 16,
+       SQ_LOOPS_WRITTEN = 17,
+       SQ_PIXEL_SWAP_IN = 18,
+       SQ_PIXEL_SWAP_OUT = 19,
+       SQ_VERTEX_SWAP_IN = 20,
+       SQ_VERTEX_SWAP_OUT = 21,
+       SQ_ALU_VTX_INST_ISSUED = 22,
+       SQ_TEX_VTX_INST_ISSUED = 23,
+       SQ_VC_VTX_INST_ISSUED = 24,
+       SQ_CF_VTX_INST_ISSUED = 25,
+       SQ_ALU_PIX_INST_ISSUED = 26,
+       SQ_TEX_PIX_INST_ISSUED = 27,
+       SQ_VC_PIX_INST_ISSUED = 28,
+       SQ_CF_PIX_INST_ISSUED = 29,
+       SQ_ALU0_FIFO_EMPTY_SIMD0 = 30,
+       SQ_ALU1_FIFO_EMPTY_SIMD0 = 31,
+       SQ_ALU0_FIFO_EMPTY_SIMD1 = 32,
+       SQ_ALU1_FIFO_EMPTY_SIMD1 = 33,
+       SQ_ALU_NOPS = 34,
+       SQ_PRED_SKIP = 35,
+       SQ_SYNC_ALU_STALL_SIMD0_VTX = 36,
+       SQ_SYNC_ALU_STALL_SIMD1_VTX = 37,
+       SQ_SYNC_TEX_STALL_VTX = 38,
+       SQ_SYNC_VC_STALL_VTX = 39,
+       SQ_CONSTANTS_USED_SIMD0 = 40,
+       SQ_CONSTANTS_SENT_SP_SIMD0 = 41,
+       SQ_GPR_STALL_VTX = 42,
+       SQ_GPR_STALL_PIX = 43,
+       SQ_VTX_RS_STALL = 44,
+       SQ_PIX_RS_STALL = 45,
+       SQ_SX_PC_FULL = 46,
+       SQ_SX_EXP_BUFF_FULL = 47,
+       SQ_SX_POS_BUFF_FULL = 48,
+       SQ_INTERP_QUADS = 49,
+       SQ_INTERP_ACTIVE = 50,
+       SQ_IN_PIXEL_STALL = 51,
+       SQ_IN_VTX_STALL = 52,
+       SQ_VTX_CNT = 53,
+       SQ_VTX_VECTOR2 = 54,
+       SQ_VTX_VECTOR3 = 55,
+       SQ_VTX_VECTOR4 = 56,
+       SQ_PIXEL_VECTOR1 = 57,
+       SQ_PIXEL_VECTOR23 = 58,
+       SQ_PIXEL_VECTOR4 = 59,
+       SQ_CONSTANTS_USED_SIMD1 = 60,
+       SQ_CONSTANTS_SENT_SP_SIMD1 = 61,
+       SQ_SX_MEM_EXP_FULL = 62,
+       SQ_ALU0_ACTIVE_VTX_SIMD2 = 63,
+       SQ_ALU1_ACTIVE_VTX_SIMD2 = 64,
+       SQ_ALU0_ACTIVE_PIX_SIMD2 = 65,
+       SQ_ALU1_ACTIVE_PIX_SIMD2 = 66,
+       SQ_ALU0_ACTIVE_VTX_SIMD3 = 67,
+       SQ_PERFCOUNT_VTX_QUAL_TP_DONE = 68,
+       SQ_ALU0_ACTIVE_PIX_SIMD3 = 69,
+       SQ_PERFCOUNT_PIX_QUAL_TP_DONE = 70,
+       SQ_ALU0_FIFO_EMPTY_SIMD2 = 71,
+       SQ_ALU1_FIFO_EMPTY_SIMD2 = 72,
+       SQ_ALU0_FIFO_EMPTY_SIMD3 = 73,
+       SQ_ALU1_FIFO_EMPTY_SIMD3 = 74,
+       SQ_SYNC_ALU_STALL_SIMD2_VTX = 75,
+       SQ_PERFCOUNT_VTX_POP_THREAD = 76,
+       SQ_SYNC_ALU_STALL_SIMD0_PIX = 77,
+       SQ_SYNC_ALU_STALL_SIMD1_PIX = 78,
+       SQ_SYNC_ALU_STALL_SIMD2_PIX = 79,
+       SQ_PERFCOUNT_PIX_POP_THREAD = 80,
+       SQ_SYNC_TEX_STALL_PIX = 81,
+       SQ_SYNC_VC_STALL_PIX = 82,
+       SQ_CONSTANTS_USED_SIMD2 = 83,
+       SQ_CONSTANTS_SENT_SP_SIMD2 = 84,
+       SQ_PERFCOUNT_VTX_DEALLOC_ACK = 85,
+       SQ_PERFCOUNT_PIX_DEALLOC_ACK = 86,
+       SQ_ALU0_FIFO_FULL_SIMD0 = 87,
+       SQ_ALU1_FIFO_FULL_SIMD0 = 88,
+       SQ_ALU0_FIFO_FULL_SIMD1 = 89,
+       SQ_ALU1_FIFO_FULL_SIMD1 = 90,
+       SQ_ALU0_FIFO_FULL_SIMD2 = 91,
+       SQ_ALU1_FIFO_FULL_SIMD2 = 92,
+       SQ_ALU0_FIFO_FULL_SIMD3 = 93,
+       SQ_ALU1_FIFO_FULL_SIMD3 = 94,
+       VC_PERF_STATIC = 95,
+       VC_PERF_STALLED = 96,
+       VC_PERF_STARVED = 97,
+       VC_PERF_SEND = 98,
+       VC_PERF_ACTUAL_STARVED = 99,
+       PIXEL_THREAD_0_ACTIVE = 100,
+       VERTEX_THREAD_0_ACTIVE = 101,
+       PIXEL_THREAD_0_NUMBER = 102,
+       VERTEX_THREAD_0_NUMBER = 103,
+       VERTEX_EVENT_NUMBER = 104,
+       PIXEL_EVENT_NUMBER = 105,
+       PTRBUFF_EF_PUSH = 106,
+       PTRBUFF_EF_POP_EVENT = 107,
+       PTRBUFF_EF_POP_NEW_VTX = 108,
+       PTRBUFF_EF_POP_DEALLOC = 109,
+       PTRBUFF_EF_POP_PVECTOR = 110,
+       PTRBUFF_EF_POP_PVECTOR_X = 111,
+       PTRBUFF_EF_POP_PVECTOR_VNZ = 112,
+       PTRBUFF_PB_DEALLOC = 113,
+       PTRBUFF_PI_STATE_PPB_POP = 114,
+       PTRBUFF_PI_RTR = 115,
+       PTRBUFF_PI_READ_EN = 116,
+       PTRBUFF_PI_BUFF_SWAP = 117,
+       PTRBUFF_SQ_FREE_BUFF = 118,
+       PTRBUFF_SQ_DEC = 119,
+       PTRBUFF_SC_VALID_CNTL_EVENT = 120,
+       PTRBUFF_SC_VALID_IJ_XFER = 121,
+       PTRBUFF_SC_NEW_VECTOR_1_Q = 122,
+       PTRBUFF_QUAL_NEW_VECTOR = 123,
+       PTRBUFF_QUAL_EVENT = 124,
+       PTRBUFF_END_BUFFER = 125,
+       PTRBUFF_FILL_QUAD = 126,
+       VERTS_WRITTEN_SPI = 127,
+       TP_FETCH_INSTR_EXEC = 128,
+       TP_FETCH_INSTR_REQ = 129,
+       TP_DATA_RETURN = 130,
+       SPI_WRITE_CYCLES_SP = 131,
+       SPI_WRITES_SP = 132,
+       SP_ALU_INSTR_EXEC = 133,
+       SP_CONST_ADDR_TO_SQ = 134,
+       SP_PRED_KILLS_TO_SQ = 135,
+       SP_EXPORT_CYCLES_TO_SX = 136,
+       SP_EXPORTS_TO_SX = 137,
+       SQ_CYCLES_ELAPSED = 138,
+       SQ_TCFS_OPT_ALLOC_EXEC = 139,
+       SQ_TCFS_NO_OPT_ALLOC = 140,
+       SQ_ALU0_NO_OPT_ALLOC = 141,
+       SQ_ALU1_NO_OPT_ALLOC = 142,
+       SQ_TCFS_ARB_XFC_CNT = 143,
+       SQ_ALU0_ARB_XFC_CNT = 144,
+       SQ_ALU1_ARB_XFC_CNT = 145,
+       SQ_TCFS_CFS_UPDATE_CNT = 146,
+       SQ_ALU0_CFS_UPDATE_CNT = 147,
+       SQ_ALU1_CFS_UPDATE_CNT = 148,
+       SQ_VTX_PUSH_THREAD_CNT = 149,
+       SQ_VTX_POP_THREAD_CNT = 150,
+       SQ_PIX_PUSH_THREAD_CNT = 151,
+       SQ_PIX_POP_THREAD_CNT = 152,
+       SQ_PIX_TOTAL = 153,
+       SQ_PIX_KILLED = 154,
+};
+
+enum a2xx_sx_perfcnt_select {
+       SX_EXPORT_VECTORS = 0,
+       SX_DUMMY_QUADS = 1,
+       SX_ALPHA_FAIL = 2,
+       SX_RB_QUAD_BUSY = 3,
+       SX_RB_COLOR_BUSY = 4,
+       SX_RB_QUAD_STALL = 5,
+       SX_RB_COLOR_STALL = 6,
+};
+
+enum a2xx_rbbm_perfcount1_sel {
+       RBBM1_COUNT = 0,
+       RBBM1_NRT_BUSY = 1,
+       RBBM1_RB_BUSY = 2,
+       RBBM1_SQ_CNTX0_BUSY = 3,
+       RBBM1_SQ_CNTX17_BUSY = 4,
+       RBBM1_VGT_BUSY = 5,
+       RBBM1_VGT_NODMA_BUSY = 6,
+       RBBM1_PA_BUSY = 7,
+       RBBM1_SC_CNTX_BUSY = 8,
+       RBBM1_TPC_BUSY = 9,
+       RBBM1_TC_BUSY = 10,
+       RBBM1_SX_BUSY = 11,
+       RBBM1_CP_COHER_BUSY = 12,
+       RBBM1_CP_NRT_BUSY = 13,
+       RBBM1_GFX_IDLE_STALL = 14,
+       RBBM1_INTERRUPT = 15,
+};
+
+enum a2xx_cp_perfcount_sel {
+       ALWAYS_COUNT = 0,
+       TRANS_FIFO_FULL = 1,
+       TRANS_FIFO_AF = 2,
+       RCIU_PFPTRANS_WAIT = 3,
+       RCIU_NRTTRANS_WAIT = 6,
+       CSF_NRT_READ_WAIT = 8,
+       CSF_I1_FIFO_FULL = 9,
+       CSF_I2_FIFO_FULL = 10,
+       CSF_ST_FIFO_FULL = 11,
+       CSF_RING_ROQ_FULL = 13,
+       CSF_I1_ROQ_FULL = 14,
+       CSF_I2_ROQ_FULL = 15,
+       CSF_ST_ROQ_FULL = 16,
+       MIU_TAG_MEM_FULL = 18,
+       MIU_WRITECLEAN = 19,
+       MIU_NRT_WRITE_STALLED = 22,
+       MIU_NRT_READ_STALLED = 23,
+       ME_WRITE_CONFIRM_FIFO_FULL = 24,
+       ME_VS_DEALLOC_FIFO_FULL = 25,
+       ME_PS_DEALLOC_FIFO_FULL = 26,
+       ME_REGS_VS_EVENT_FIFO_FULL = 27,
+       ME_REGS_PS_EVENT_FIFO_FULL = 28,
+       ME_REGS_CF_EVENT_FIFO_FULL = 29,
+       ME_MICRO_RB_STARVED = 30,
+       ME_MICRO_I1_STARVED = 31,
+       ME_MICRO_I2_STARVED = 32,
+       ME_MICRO_ST_STARVED = 33,
+       RCIU_RBBM_DWORD_SENT = 40,
+       ME_BUSY_CLOCKS = 41,
+       ME_WAIT_CONTEXT_AVAIL = 42,
+       PFP_TYPE0_PACKET = 43,
+       PFP_TYPE3_PACKET = 44,
+       CSF_RB_WPTR_NEQ_RPTR = 45,
+       CSF_I1_SIZE_NEQ_ZERO = 46,
+       CSF_I2_SIZE_NEQ_ZERO = 47,
+       CSF_RBI1I2_FETCHING = 48,
+};
+
+enum a2xx_rb_perfcnt_select {
+       RBPERF_CNTX_BUSY = 0,
+       RBPERF_CNTX_BUSY_MAX = 1,
+       RBPERF_SX_QUAD_STARVED = 2,
+       RBPERF_SX_QUAD_STARVED_MAX = 3,
+       RBPERF_GA_GC_CH0_SYS_REQ = 4,
+       RBPERF_GA_GC_CH0_SYS_REQ_MAX = 5,
+       RBPERF_GA_GC_CH1_SYS_REQ = 6,
+       RBPERF_GA_GC_CH1_SYS_REQ_MAX = 7,
+       RBPERF_MH_STARVED = 8,
+       RBPERF_MH_STARVED_MAX = 9,
+       RBPERF_AZ_BC_COLOR_BUSY = 10,
+       RBPERF_AZ_BC_COLOR_BUSY_MAX = 11,
+       RBPERF_AZ_BC_Z_BUSY = 12,
+       RBPERF_AZ_BC_Z_BUSY_MAX = 13,
+       RBPERF_RB_SC_TILE_RTR_N = 14,
+       RBPERF_RB_SC_TILE_RTR_N_MAX = 15,
+       RBPERF_RB_SC_SAMP_RTR_N = 16,
+       RBPERF_RB_SC_SAMP_RTR_N_MAX = 17,
+       RBPERF_RB_SX_QUAD_RTR_N = 18,
+       RBPERF_RB_SX_QUAD_RTR_N_MAX = 19,
+       RBPERF_RB_SX_COLOR_RTR_N = 20,
+       RBPERF_RB_SX_COLOR_RTR_N_MAX = 21,
+       RBPERF_RB_SC_SAMP_LZ_BUSY = 22,
+       RBPERF_RB_SC_SAMP_LZ_BUSY_MAX = 23,
+       RBPERF_ZXP_STALL = 24,
+       RBPERF_ZXP_STALL_MAX = 25,
+       RBPERF_EVENT_PENDING = 26,
+       RBPERF_EVENT_PENDING_MAX = 27,
+       RBPERF_RB_MH_VALID = 28,
+       RBPERF_RB_MH_VALID_MAX = 29,
+       RBPERF_SX_RB_QUAD_SEND = 30,
+       RBPERF_SX_RB_COLOR_SEND = 31,
+       RBPERF_SC_RB_TILE_SEND = 32,
+       RBPERF_SC_RB_SAMPLE_SEND = 33,
+       RBPERF_SX_RB_MEM_EXPORT = 34,
+       RBPERF_SX_RB_QUAD_EVENT = 35,
+       RBPERF_SC_RB_TILE_EVENT_FILTERED = 36,
+       RBPERF_SC_RB_TILE_EVENT_ALL = 37,
+       RBPERF_RB_SC_EZ_SEND = 38,
+       RBPERF_RB_SX_INDEX_SEND = 39,
+       RBPERF_GMEM_INTFO_RD = 40,
+       RBPERF_GMEM_INTF1_RD = 41,
+       RBPERF_GMEM_INTFO_WR = 42,
+       RBPERF_GMEM_INTF1_WR = 43,
+       RBPERF_RB_CP_CONTEXT_DONE = 44,
+       RBPERF_RB_CP_CACHE_FLUSH = 45,
+       RBPERF_ZPASS_DONE = 46,
+       RBPERF_ZCMD_VALID = 47,
+       RBPERF_CCMD_VALID = 48,
+       RBPERF_ACCUM_GRANT = 49,
+       RBPERF_ACCUM_C0_GRANT = 50,
+       RBPERF_ACCUM_C1_GRANT = 51,
+       RBPERF_ACCUM_FULL_BE_WR = 52,
+       RBPERF_ACCUM_REQUEST_NO_GRANT = 53,
+       RBPERF_ACCUM_TIMEOUT_PULSE = 54,
+       RBPERF_ACCUM_LIN_TIMEOUT_PULSE = 55,
+       RBPERF_ACCUM_CAM_HIT_FLUSHING = 56,
+};
+
+enum a2xx_mh_perfcnt_select {
+       CP_R0_REQUESTS = 0,
+       CP_R1_REQUESTS = 1,
+       CP_R2_REQUESTS = 2,
+       CP_R3_REQUESTS = 3,
+       CP_R4_REQUESTS = 4,
+       CP_TOTAL_READ_REQUESTS = 5,
+       CP_TOTAL_WRITE_REQUESTS = 6,
+       CP_TOTAL_REQUESTS = 7,
+       CP_DATA_BYTES_WRITTEN = 8,
+       CP_WRITE_CLEAN_RESPONSES = 9,
+       CP_R0_READ_BURSTS_RECEIVED = 10,
+       CP_R1_READ_BURSTS_RECEIVED = 11,
+       CP_R2_READ_BURSTS_RECEIVED = 12,
+       CP_R3_READ_BURSTS_RECEIVED = 13,
+       CP_R4_READ_BURSTS_RECEIVED = 14,
+       CP_TOTAL_READ_BURSTS_RECEIVED = 15,
+       CP_R0_DATA_BEATS_READ = 16,
+       CP_R1_DATA_BEATS_READ = 17,
+       CP_R2_DATA_BEATS_READ = 18,
+       CP_R3_DATA_BEATS_READ = 19,
+       CP_R4_DATA_BEATS_READ = 20,
+       CP_TOTAL_DATA_BEATS_READ = 21,
+       VGT_R0_REQUESTS = 22,
+       VGT_R1_REQUESTS = 23,
+       VGT_TOTAL_REQUESTS = 24,
+       VGT_R0_READ_BURSTS_RECEIVED = 25,
+       VGT_R1_READ_BURSTS_RECEIVED = 26,
+       VGT_TOTAL_READ_BURSTS_RECEIVED = 27,
+       VGT_R0_DATA_BEATS_READ = 28,
+       VGT_R1_DATA_BEATS_READ = 29,
+       VGT_TOTAL_DATA_BEATS_READ = 30,
+       TC_TOTAL_REQUESTS = 31,
+       TC_ROQ_REQUESTS = 32,
+       TC_INFO_SENT = 33,
+       TC_READ_BURSTS_RECEIVED = 34,
+       TC_DATA_BEATS_READ = 35,
+       TCD_BURSTS_READ = 36,
+       RB_REQUESTS = 37,
+       RB_DATA_BYTES_WRITTEN = 38,
+       RB_WRITE_CLEAN_RESPONSES = 39,
+       AXI_READ_REQUESTS_ID_0 = 40,
+       AXI_READ_REQUESTS_ID_1 = 41,
+       AXI_READ_REQUESTS_ID_2 = 42,
+       AXI_READ_REQUESTS_ID_3 = 43,
+       AXI_READ_REQUESTS_ID_4 = 44,
+       AXI_READ_REQUESTS_ID_5 = 45,
+       AXI_READ_REQUESTS_ID_6 = 46,
+       AXI_READ_REQUESTS_ID_7 = 47,
+       AXI_TOTAL_READ_REQUESTS = 48,
+       AXI_WRITE_REQUESTS_ID_0 = 49,
+       AXI_WRITE_REQUESTS_ID_1 = 50,
+       AXI_WRITE_REQUESTS_ID_2 = 51,
+       AXI_WRITE_REQUESTS_ID_3 = 52,
+       AXI_WRITE_REQUESTS_ID_4 = 53,
+       AXI_WRITE_REQUESTS_ID_5 = 54,
+       AXI_WRITE_REQUESTS_ID_6 = 55,
+       AXI_WRITE_REQUESTS_ID_7 = 56,
+       AXI_TOTAL_WRITE_REQUESTS = 57,
+       AXI_TOTAL_REQUESTS_ID_0 = 58,
+       AXI_TOTAL_REQUESTS_ID_1 = 59,
+       AXI_TOTAL_REQUESTS_ID_2 = 60,
+       AXI_TOTAL_REQUESTS_ID_3 = 61,
+       AXI_TOTAL_REQUESTS_ID_4 = 62,
+       AXI_TOTAL_REQUESTS_ID_5 = 63,
+       AXI_TOTAL_REQUESTS_ID_6 = 64,
+       AXI_TOTAL_REQUESTS_ID_7 = 65,
+       AXI_TOTAL_REQUESTS = 66,
+       AXI_READ_CHANNEL_BURSTS_ID_0 = 67,
+       AXI_READ_CHANNEL_BURSTS_ID_1 = 68,
+       AXI_READ_CHANNEL_BURSTS_ID_2 = 69,
+       AXI_READ_CHANNEL_BURSTS_ID_3 = 70,
+       AXI_READ_CHANNEL_BURSTS_ID_4 = 71,
+       AXI_READ_CHANNEL_BURSTS_ID_5 = 72,
+       AXI_READ_CHANNEL_BURSTS_ID_6 = 73,
+       AXI_READ_CHANNEL_BURSTS_ID_7 = 74,
+       AXI_READ_CHANNEL_TOTAL_BURSTS = 75,
+       AXI_READ_CHANNEL_DATA_BEATS_READ_ID_0 = 76,
+       AXI_READ_CHANNEL_DATA_BEATS_READ_ID_1 = 77,
+       AXI_READ_CHANNEL_DATA_BEATS_READ_ID_2 = 78,
+       AXI_READ_CHANNEL_DATA_BEATS_READ_ID_3 = 79,
+       AXI_READ_CHANNEL_DATA_BEATS_READ_ID_4 = 80,
+       AXI_READ_CHANNEL_DATA_BEATS_READ_ID_5 = 81,
+       AXI_READ_CHANNEL_DATA_BEATS_READ_ID_6 = 82,
+       AXI_READ_CHANNEL_DATA_BEATS_READ_ID_7 = 83,
+       AXI_READ_CHANNEL_TOTAL_DATA_BEATS_READ = 84,
+       AXI_WRITE_CHANNEL_BURSTS_ID_0 = 85,
+       AXI_WRITE_CHANNEL_BURSTS_ID_1 = 86,
+       AXI_WRITE_CHANNEL_BURSTS_ID_2 = 87,
+       AXI_WRITE_CHANNEL_BURSTS_ID_3 = 88,
+       AXI_WRITE_CHANNEL_BURSTS_ID_4 = 89,
+       AXI_WRITE_CHANNEL_BURSTS_ID_5 = 90,
+       AXI_WRITE_CHANNEL_BURSTS_ID_6 = 91,
+       AXI_WRITE_CHANNEL_BURSTS_ID_7 = 92,
+       AXI_WRITE_CHANNEL_TOTAL_BURSTS = 93,
+       AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_0 = 94,
+       AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_1 = 95,
+       AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_2 = 96,
+       AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_3 = 97,
+       AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_4 = 98,
+       AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_5 = 99,
+       AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_6 = 100,
+       AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_7 = 101,
+       AXI_WRITE_CHANNEL_TOTAL_DATA_BYTES_WRITTEN = 102,
+       AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_0 = 103,
+       AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_1 = 104,
+       AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_2 = 105,
+       AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_3 = 106,
+       AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_4 = 107,
+       AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_5 = 108,
+       AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_6 = 109,
+       AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_7 = 110,
+       AXI_WRITE_RESPONSE_CHANNEL_TOTAL_RESPONSES = 111,
+       TOTAL_MMU_MISSES = 112,
+       MMU_READ_MISSES = 113,
+       MMU_WRITE_MISSES = 114,
+       TOTAL_MMU_HITS = 115,
+       MMU_READ_HITS = 116,
+       MMU_WRITE_HITS = 117,
+       SPLIT_MODE_TC_HITS = 118,
+       SPLIT_MODE_TC_MISSES = 119,
+       SPLIT_MODE_NON_TC_HITS = 120,
+       SPLIT_MODE_NON_TC_MISSES = 121,
+       STALL_AWAITING_TLB_MISS_FETCH = 122,
+       MMU_TLB_MISS_READ_BURSTS_RECEIVED = 123,
+       MMU_TLB_MISS_DATA_BEATS_READ = 124,
+       CP_CYCLES_HELD_OFF = 125,
+       VGT_CYCLES_HELD_OFF = 126,
+       TC_CYCLES_HELD_OFF = 127,
+       TC_ROQ_CYCLES_HELD_OFF = 128,
+       TC_CYCLES_HELD_OFF_TCD_FULL = 129,
+       RB_CYCLES_HELD_OFF = 130,
+       TOTAL_CYCLES_ANY_CLNT_HELD_OFF = 131,
+       TLB_MISS_CYCLES_HELD_OFF = 132,
+       AXI_READ_REQUEST_HELD_OFF = 133,
+       AXI_WRITE_REQUEST_HELD_OFF = 134,
+       AXI_REQUEST_HELD_OFF = 135,
+       AXI_REQUEST_HELD_OFF_INFLIGHT_LIMIT = 136,
+       AXI_WRITE_DATA_HELD_OFF = 137,
+       CP_SAME_PAGE_BANK_REQUESTS = 138,
+       VGT_SAME_PAGE_BANK_REQUESTS = 139,
+       TC_SAME_PAGE_BANK_REQUESTS = 140,
+       TC_ARB_HOLD_SAME_PAGE_BANK_REQUESTS = 141,
+       RB_SAME_PAGE_BANK_REQUESTS = 142,
+       TOTAL_SAME_PAGE_BANK_REQUESTS = 143,
+       CP_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 144,
+       VGT_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 145,
+       TC_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 146,
+       RB_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 147,
+       TOTAL_SAME_PAGE_BANK_KILLED_FAIRNESS_LIMIT = 148,
+       TOTAL_MH_READ_REQUESTS = 149,
+       TOTAL_MH_WRITE_REQUESTS = 150,
+       TOTAL_MH_REQUESTS = 151,
+       MH_BUSY = 152,
+       CP_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 153,
+       VGT_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 154,
+       TC_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 155,
+       RB_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 156,
+       TC_ROQ_N_VALID_ENTRIES = 157,
+       ARQ_N_ENTRIES = 158,
+       WDB_N_ENTRIES = 159,
+       MH_READ_LATENCY_OUTST_REQ_SUM = 160,
+       MC_READ_LATENCY_OUTST_REQ_SUM = 161,
+       MC_TOTAL_READ_REQUESTS = 162,
+       ELAPSED_CYCLES_MH_GATED_CLK = 163,
+       ELAPSED_CLK_CYCLES = 164,
+       CP_W_16B_REQUESTS = 165,
+       CP_W_32B_REQUESTS = 166,
+       TC_16B_REQUESTS = 167,
+       TC_32B_REQUESTS = 168,
+       PA_REQUESTS = 169,
+       PA_DATA_BYTES_WRITTEN = 170,
+       PA_WRITE_CLEAN_RESPONSES = 171,
+       PA_CYCLES_HELD_OFF = 172,
+       AXI_READ_REQUEST_DATA_BEATS_ID_0 = 173,
+       AXI_READ_REQUEST_DATA_BEATS_ID_1 = 174,
+       AXI_READ_REQUEST_DATA_BEATS_ID_2 = 175,
+       AXI_READ_REQUEST_DATA_BEATS_ID_3 = 176,
+       AXI_READ_REQUEST_DATA_BEATS_ID_4 = 177,
+       AXI_READ_REQUEST_DATA_BEATS_ID_5 = 178,
+       AXI_READ_REQUEST_DATA_BEATS_ID_6 = 179,
+       AXI_READ_REQUEST_DATA_BEATS_ID_7 = 180,
+       AXI_TOTAL_READ_REQUEST_DATA_BEATS = 181,
+};
+
 enum adreno_mmu_clnt_beh {
        BEH_NEVR = 0,
        BEH_TRAN_RNG = 1,
@@ -268,9 +1118,9 @@ enum sq_tex_border_color {
 };
 
 enum sq_tex_sign {
-       SQ_TEX_SIGN_UNISIGNED = 0,
+       SQ_TEX_SIGN_UNSIGNED = 0,
        SQ_TEX_SIGN_SIGNED = 1,
-       SQ_TEX_SIGN_UNISIGNED_BIASED = 2,
+       SQ_TEX_SIGN_UNSIGNED_BIASED = 2,
        SQ_TEX_SIGN_GAMMA = 3,
 };
 
@@ -1842,6 +2692,10 @@ static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_Y(uint32_t val)
 
 #define REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE                 0x00002380
 
+#define REG_A2XX_PA_SU_POLY_OFFSET_FRONT_OFFSET                        0x00002381
+
+#define REG_A2XX_PA_SU_POLY_OFFSET_BACK_SCALE                  0x00002382
+
 #define REG_A2XX_PA_SU_POLY_OFFSET_BACK_OFFSET                 0x00002383
 
 #define REG_A2XX_SQ_CONSTANT_0                                 0x00004000
@@ -1858,6 +2712,220 @@ static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_Y(uint32_t val)
 
 #define REG_A2XX_COHER_STATUS_PM4                              0x00000a2b
 
+#define REG_A2XX_PA_SU_PERFCOUNTER0_SELECT                     0x00000c88
+
+#define REG_A2XX_PA_SU_PERFCOUNTER1_SELECT                     0x00000c89
+
+#define REG_A2XX_PA_SU_PERFCOUNTER2_SELECT                     0x00000c8a
+
+#define REG_A2XX_PA_SU_PERFCOUNTER3_SELECT                     0x00000c8b
+
+#define REG_A2XX_PA_SU_PERFCOUNTER0_LOW                                0x00000c8c
+
+#define REG_A2XX_PA_SU_PERFCOUNTER0_HI                         0x00000c8d
+
+#define REG_A2XX_PA_SU_PERFCOUNTER1_LOW                                0x00000c8e
+
+#define REG_A2XX_PA_SU_PERFCOUNTER1_HI                         0x00000c8f
+
+#define REG_A2XX_PA_SU_PERFCOUNTER2_LOW                                0x00000c90
+
+#define REG_A2XX_PA_SU_PERFCOUNTER2_HI                         0x00000c91
+
+#define REG_A2XX_PA_SU_PERFCOUNTER3_LOW                                0x00000c92
+
+#define REG_A2XX_PA_SU_PERFCOUNTER3_HI                         0x00000c93
+
+#define REG_A2XX_PA_SC_PERFCOUNTER0_SELECT                     0x00000c98
+
+#define REG_A2XX_PA_SC_PERFCOUNTER0_LOW                                0x00000c99
+
+#define REG_A2XX_PA_SC_PERFCOUNTER0_HI                         0x00000c9a
+
+#define REG_A2XX_VGT_PERFCOUNTER0_SELECT                       0x00000c48
+
+#define REG_A2XX_VGT_PERFCOUNTER1_SELECT                       0x00000c49
+
+#define REG_A2XX_VGT_PERFCOUNTER2_SELECT                       0x00000c4a
+
+#define REG_A2XX_VGT_PERFCOUNTER3_SELECT                       0x00000c4b
+
+#define REG_A2XX_VGT_PERFCOUNTER0_LOW                          0x00000c4c
+
+#define REG_A2XX_VGT_PERFCOUNTER1_LOW                          0x00000c4e
+
+#define REG_A2XX_VGT_PERFCOUNTER2_LOW                          0x00000c50
+
+#define REG_A2XX_VGT_PERFCOUNTER3_LOW                          0x00000c52
+
+#define REG_A2XX_VGT_PERFCOUNTER0_HI                           0x00000c4d
+
+#define REG_A2XX_VGT_PERFCOUNTER1_HI                           0x00000c4f
+
+#define REG_A2XX_VGT_PERFCOUNTER2_HI                           0x00000c51
+
+#define REG_A2XX_VGT_PERFCOUNTER3_HI                           0x00000c53
+
+#define REG_A2XX_TCR_PERFCOUNTER0_SELECT                       0x00000e05
+
+#define REG_A2XX_TCR_PERFCOUNTER1_SELECT                       0x00000e08
+
+#define REG_A2XX_TCR_PERFCOUNTER0_HI                           0x00000e06
+
+#define REG_A2XX_TCR_PERFCOUNTER1_HI                           0x00000e09
+
+#define REG_A2XX_TCR_PERFCOUNTER0_LOW                          0x00000e07
+
+#define REG_A2XX_TCR_PERFCOUNTER1_LOW                          0x00000e0a
+
+#define REG_A2XX_TP0_PERFCOUNTER0_SELECT                       0x00000e1f
+
+#define REG_A2XX_TP0_PERFCOUNTER0_HI                           0x00000e20
+
+#define REG_A2XX_TP0_PERFCOUNTER0_LOW                          0x00000e21
+
+#define REG_A2XX_TP0_PERFCOUNTER1_SELECT                       0x00000e22
+
+#define REG_A2XX_TP0_PERFCOUNTER1_HI                           0x00000e23
+
+#define REG_A2XX_TP0_PERFCOUNTER1_LOW                          0x00000e24
+
+#define REG_A2XX_TCM_PERFCOUNTER0_SELECT                       0x00000e54
+
+#define REG_A2XX_TCM_PERFCOUNTER1_SELECT                       0x00000e57
+
+#define REG_A2XX_TCM_PERFCOUNTER0_HI                           0x00000e55
+
+#define REG_A2XX_TCM_PERFCOUNTER1_HI                           0x00000e58
+
+#define REG_A2XX_TCM_PERFCOUNTER0_LOW                          0x00000e56
+
+#define REG_A2XX_TCM_PERFCOUNTER1_LOW                          0x00000e59
+
+#define REG_A2XX_TCF_PERFCOUNTER0_SELECT                       0x00000e5a
+
+#define REG_A2XX_TCF_PERFCOUNTER1_SELECT                       0x00000e5d
+
+#define REG_A2XX_TCF_PERFCOUNTER2_SELECT                       0x00000e60
+
+#define REG_A2XX_TCF_PERFCOUNTER3_SELECT                       0x00000e63
+
+#define REG_A2XX_TCF_PERFCOUNTER4_SELECT                       0x00000e66
+
+#define REG_A2XX_TCF_PERFCOUNTER5_SELECT                       0x00000e69
+
+#define REG_A2XX_TCF_PERFCOUNTER6_SELECT                       0x00000e6c
+
+#define REG_A2XX_TCF_PERFCOUNTER7_SELECT                       0x00000e6f
+
+#define REG_A2XX_TCF_PERFCOUNTER8_SELECT                       0x00000e72
+
+#define REG_A2XX_TCF_PERFCOUNTER9_SELECT                       0x00000e75
+
+#define REG_A2XX_TCF_PERFCOUNTER10_SELECT                      0x00000e78
+
+#define REG_A2XX_TCF_PERFCOUNTER11_SELECT                      0x00000e7b
+
+#define REG_A2XX_TCF_PERFCOUNTER0_HI                           0x00000e5b
+
+#define REG_A2XX_TCF_PERFCOUNTER1_HI                           0x00000e5e
+
+#define REG_A2XX_TCF_PERFCOUNTER2_HI                           0x00000e61
+
+#define REG_A2XX_TCF_PERFCOUNTER3_HI                           0x00000e64
+
+#define REG_A2XX_TCF_PERFCOUNTER4_HI                           0x00000e67
+
+#define REG_A2XX_TCF_PERFCOUNTER5_HI                           0x00000e6a
+
+#define REG_A2XX_TCF_PERFCOUNTER6_HI                           0x00000e6d
+
+#define REG_A2XX_TCF_PERFCOUNTER7_HI                           0x00000e70
+
+#define REG_A2XX_TCF_PERFCOUNTER8_HI                           0x00000e73
+
+#define REG_A2XX_TCF_PERFCOUNTER9_HI                           0x00000e76
+
+#define REG_A2XX_TCF_PERFCOUNTER10_HI                          0x00000e79
+
+#define REG_A2XX_TCF_PERFCOUNTER11_HI                          0x00000e7c
+
+#define REG_A2XX_TCF_PERFCOUNTER0_LOW                          0x00000e5c
+
+#define REG_A2XX_TCF_PERFCOUNTER1_LOW                          0x00000e5f
+
+#define REG_A2XX_TCF_PERFCOUNTER2_LOW                          0x00000e62
+
+#define REG_A2XX_TCF_PERFCOUNTER3_LOW                          0x00000e65
+
+#define REG_A2XX_TCF_PERFCOUNTER4_LOW                          0x00000e68
+
+#define REG_A2XX_TCF_PERFCOUNTER5_LOW                          0x00000e6b
+
+#define REG_A2XX_TCF_PERFCOUNTER6_LOW                          0x00000e6e
+
+#define REG_A2XX_TCF_PERFCOUNTER7_LOW                          0x00000e71
+
+#define REG_A2XX_TCF_PERFCOUNTER8_LOW                          0x00000e74
+
+#define REG_A2XX_TCF_PERFCOUNTER9_LOW                          0x00000e77
+
+#define REG_A2XX_TCF_PERFCOUNTER10_LOW                         0x00000e7a
+
+#define REG_A2XX_TCF_PERFCOUNTER11_LOW                         0x00000e7d
+
+#define REG_A2XX_SQ_PERFCOUNTER0_SELECT                                0x00000dc8
+
+#define REG_A2XX_SQ_PERFCOUNTER1_SELECT                                0x00000dc9
+
+#define REG_A2XX_SQ_PERFCOUNTER2_SELECT                                0x00000dca
+
+#define REG_A2XX_SQ_PERFCOUNTER3_SELECT                                0x00000dcb
+
+#define REG_A2XX_SQ_PERFCOUNTER0_LOW                           0x00000dcc
+
+#define REG_A2XX_SQ_PERFCOUNTER0_HI                            0x00000dcd
+
+#define REG_A2XX_SQ_PERFCOUNTER1_LOW                           0x00000dce
+
+#define REG_A2XX_SQ_PERFCOUNTER1_HI                            0x00000dcf
+
+#define REG_A2XX_SQ_PERFCOUNTER2_LOW                           0x00000dd0
+
+#define REG_A2XX_SQ_PERFCOUNTER2_HI                            0x00000dd1
+
+#define REG_A2XX_SQ_PERFCOUNTER3_LOW                           0x00000dd2
+
+#define REG_A2XX_SQ_PERFCOUNTER3_HI                            0x00000dd3
+
+#define REG_A2XX_SX_PERFCOUNTER0_SELECT                                0x00000dd4
+
+#define REG_A2XX_SX_PERFCOUNTER0_LOW                           0x00000dd8
+
+#define REG_A2XX_SX_PERFCOUNTER0_HI                            0x00000dd9
+
+#define REG_A2XX_MH_PERFCOUNTER0_SELECT                                0x00000a46
+
+#define REG_A2XX_MH_PERFCOUNTER1_SELECT                                0x00000a4a
+
+#define REG_A2XX_MH_PERFCOUNTER0_CONFIG                                0x00000a47
+
+#define REG_A2XX_MH_PERFCOUNTER1_CONFIG                                0x00000a4b
+
+#define REG_A2XX_MH_PERFCOUNTER0_LOW                           0x00000a48
+
+#define REG_A2XX_MH_PERFCOUNTER1_LOW                           0x00000a4c
+
+#define REG_A2XX_MH_PERFCOUNTER0_HI                            0x00000a49
+
+#define REG_A2XX_MH_PERFCOUNTER1_HI                            0x00000a4d
+
+#define REG_A2XX_RB_PERFCOUNTER0_SELECT                                0x00000f04
+
+#define REG_A2XX_RB_PERFCOUNTER0_LOW                           0x00000f08
+
+#define REG_A2XX_RB_PERFCOUNTER0_HI                            0x00000f09
+
 #define REG_A2XX_SQ_TEX_0                                      0x00000000
 #define A2XX_SQ_TEX_0_TYPE__MASK                               0x00000003
 #define A2XX_SQ_TEX_0_TYPE__SHIFT                              0
@@ -1913,7 +2981,7 @@ static inline uint32_t A2XX_SQ_TEX_0_PITCH(uint32_t val)
 {
        return ((val >> 5) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK;
 }
-#define A2XX_SQ_TEX_0_TILED                                    0x00000002
+#define A2XX_SQ_TEX_0_TILED                                    0x80000000
 
 #define REG_A2XX_SQ_TEX_1                                      0x00000001
 #define A2XX_SQ_TEX_1_FORMAT__MASK                             0x0000003f
@@ -2001,7 +3069,7 @@ static inline uint32_t A2XX_SQ_TEX_3_SWIZ_W(enum sq_tex_swiz val)
 }
 #define A2XX_SQ_TEX_3_EXP_ADJUST__MASK                         0x0007e000
 #define A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT                                13
-static inline uint32_t A2XX_SQ_TEX_3_EXP_ADJUST(uint32_t val)
+static inline uint32_t A2XX_SQ_TEX_3_EXP_ADJUST(int32_t val)
 {
        return ((val) << A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT) & A2XX_SQ_TEX_3_EXP_ADJUST__MASK;
 }
index 60f6472..6021f8d 100644 (file)
@@ -408,7 +408,7 @@ a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
        struct msm_gem_address_space *aspace;
 
        aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
-               SZ_16M + 0xfff * SZ_64K);
+               0xfff * SZ_64K);
 
        if (IS_ERR(aspace) && !IS_ERR(mmu))
                mmu->funcs->destroy(mmu);
index 17059f2..16f9ef4 100644 (file)
@@ -8,19 +8,21 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/adreno.xml               (    501 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  42463 bytes, from 2018-11-19 13:44:03)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  14201 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  43052 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 140790 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2018-09-14 13:03:07)
-- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml                     (    594 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml        (   1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml                (  90159 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml       (  14386 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml          (  65048 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml                (  84226 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml                ( 112556 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml                ( 149461 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml                ( 184695 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml            (  11218 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml               (   1773 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml (   4559 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml    (   2872 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
@@ -48,7 +50,9 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
 enum a3xx_tile_mode {
        LINEAR = 0,
+       TILE_4X4 = 1,
        TILE_32X32 = 2,
+       TILE_4X2 = 3,
 };
 
 enum a3xx_state_block_id {
@@ -123,6 +127,7 @@ enum a3xx_vtx_fmt {
        VFMT_2_10_10_10_UNORM = 61,
        VFMT_2_10_10_10_SINT = 62,
        VFMT_2_10_10_10_SNORM = 63,
+       VFMT_NONE = 255,
 };
 
 enum a3xx_tex_fmt {
@@ -206,15 +211,7 @@ enum a3xx_tex_fmt {
        TFMT_ETC2_RGBA8 = 116,
        TFMT_ETC2_RGB8A1 = 117,
        TFMT_ETC2_RGB8 = 118,
-};
-
-enum a3xx_tex_fetchsize {
-       TFETCH_DISABLE = 0,
-       TFETCH_1_BYTE = 1,
-       TFETCH_2_BYTE = 2,
-       TFETCH_4_BYTE = 3,
-       TFETCH_8_BYTE = 4,
-       TFETCH_16_BYTE = 5,
+       TFMT_NONE = 255,
 };
 
 enum a3xx_color_fmt {
@@ -228,8 +225,8 @@ enum a3xx_color_fmt {
        RB_R8G8B8A8_SINT = 11,
        RB_R8G8_UNORM = 12,
        RB_R8G8_SNORM = 13,
-       RB_R8_UINT = 14,
-       RB_R8_SINT = 15,
+       RB_R8G8_UINT = 14,
+       RB_R8G8_SINT = 15,
        RB_R10G10B10A2_UNORM = 16,
        RB_A2R10G10B10_UNORM = 17,
        RB_R10G10B10A2_UINT = 18,
@@ -261,6 +258,7 @@ enum a3xx_color_fmt {
        RB_R32_UINT = 56,
        RB_R32G32_UINT = 57,
        RB_R32G32B32A32_UINT = 59,
+       RB_NONE = 255,
 };
 
 enum a3xx_cp_perfcounter_select {
@@ -932,6 +930,9 @@ static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460
 
 #define REG_A3XX_GRAS_CL_CLIP_CNTL                             0x00002040
 #define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER                 0x00001000
+#define A3XX_GRAS_CL_CLIP_CNTL_IJ_NON_PERSP_CENTER             0x00002000
+#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTROID               0x00004000
+#define A3XX_GRAS_CL_CLIP_CNTL_IJ_NON_PERSP_CENTROID           0x00008000
 #define A3XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE                    0x00010000
 #define A3XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE               0x00020000
 #define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE             0x00080000
@@ -1170,10 +1171,12 @@ static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
 }
 #define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE              0x00001000
 #define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM                     0x00002000
-#define A3XX_RB_RENDER_CONTROL_XCOORD                          0x00004000
-#define A3XX_RB_RENDER_CONTROL_YCOORD                          0x00008000
-#define A3XX_RB_RENDER_CONTROL_ZCOORD                          0x00010000
-#define A3XX_RB_RENDER_CONTROL_WCOORD                          0x00020000
+#define A3XX_RB_RENDER_CONTROL_COORD_MASK__MASK                        0x0003c000
+#define A3XX_RB_RENDER_CONTROL_COORD_MASK__SHIFT               14
+static inline uint32_t A3XX_RB_RENDER_CONTROL_COORD_MASK(uint32_t val)
+{
+       return ((val) << A3XX_RB_RENDER_CONTROL_COORD_MASK__SHIFT) & A3XX_RB_RENDER_CONTROL_COORD_MASK__MASK;
+}
 #define A3XX_RB_RENDER_CONTROL_I_CLAMP_ENABLE                  0x00080000
 #define A3XX_RB_RENDER_CONTROL_COV_VALUE_OUTPUT_ENABLE         0x00100000
 #define A3XX_RB_RENDER_CONTROL_ALPHA_TEST                      0x00400000
@@ -1755,11 +1758,29 @@ static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
 }
 
 #define REG_A3XX_HLSQ_CONTROL_3_REG                            0x00002203
-#define A3XX_HLSQ_CONTROL_3_REG_REGID__MASK                    0x000000ff
-#define A3XX_HLSQ_CONTROL_3_REG_REGID__SHIFT                   0
-static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val)
+#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__MASK       0x000000ff
+#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__SHIFT      0
+static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID(uint32_t val)
+{
+       return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__MASK    0x0000ff00
+#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__SHIFT   8
+static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID(uint32_t val)
+{
+       return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__MASK     0x00ff0000
+#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__SHIFT    16
+static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID(uint32_t val)
+{
+       return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__MASK  0xff000000
+#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__SHIFT 24
+static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID(uint32_t val)
 {
-       return ((val) << A3XX_HLSQ_CONTROL_3_REG_REGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_REGID__MASK;
+       return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__MASK;
 }
 
 #define REG_A3XX_HLSQ_VS_CONTROL_REG                           0x00002204
@@ -1944,8 +1965,6 @@ static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
 
 #define REG_A3XX_VFD_INDEX_OFFSET                              0x00002245
 
-#define REG_A3XX_VFD_INDEX_OFFSET                              0x00002245
-
 static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; }
 
 static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; }
@@ -3107,7 +3126,12 @@ static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val)
 }
 
 #define REG_A3XX_TEX_CONST_0                                   0x00000000
-#define A3XX_TEX_CONST_0_TILED                                 0x00000001
+#define A3XX_TEX_CONST_0_TILE_MODE__MASK                       0x00000003
+#define A3XX_TEX_CONST_0_TILE_MODE__SHIFT                      0
+static inline uint32_t A3XX_TEX_CONST_0_TILE_MODE(enum a3xx_tile_mode val)
+{
+       return ((val) << A3XX_TEX_CONST_0_TILE_MODE__SHIFT) & A3XX_TEX_CONST_0_TILE_MODE__MASK;
+}
 #define A3XX_TEX_CONST_0_SRGB                                  0x00000004
 #define A3XX_TEX_CONST_0_SWIZ_X__MASK                          0x00000070
 #define A3XX_TEX_CONST_0_SWIZ_X__SHIFT                         4
@@ -3172,11 +3196,11 @@ static inline uint32_t A3XX_TEX_CONST_1_WIDTH(uint32_t val)
 {
        return ((val) << A3XX_TEX_CONST_1_WIDTH__SHIFT) & A3XX_TEX_CONST_1_WIDTH__MASK;
 }
-#define A3XX_TEX_CONST_1_FETCHSIZE__MASK                       0xf0000000
-#define A3XX_TEX_CONST_1_FETCHSIZE__SHIFT                      28
-static inline uint32_t A3XX_TEX_CONST_1_FETCHSIZE(enum a3xx_tex_fetchsize val)
+#define A3XX_TEX_CONST_1_PITCHALIGN__MASK                      0xf0000000
+#define A3XX_TEX_CONST_1_PITCHALIGN__SHIFT                     28
+static inline uint32_t A3XX_TEX_CONST_1_PITCHALIGN(uint32_t val)
 {
-       return ((val) << A3XX_TEX_CONST_1_FETCHSIZE__SHIFT) & A3XX_TEX_CONST_1_FETCHSIZE__MASK;
+       return ((val) << A3XX_TEX_CONST_1_PITCHALIGN__SHIFT) & A3XX_TEX_CONST_1_PITCHALIGN__MASK;
 }
 
 #define REG_A3XX_TEX_CONST_2                                   0x00000002
index 9b51e25..a7eaf2c 100644 (file)
@@ -8,19 +8,21 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/adreno.xml               (    501 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  42463 bytes, from 2018-11-19 13:44:03)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  14201 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  43052 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 140790 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2018-09-14 13:03:07)
-- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml                     (    594 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml        (   1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml                (  90159 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml       (  14386 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml          (  65048 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml                (  84226 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml                ( 112556 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml                ( 149461 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml                ( 184695 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml            (  11218 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml               (   1773 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml (   4559 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml    (   2872 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
@@ -91,6 +93,7 @@ enum a4xx_color_fmt {
        RB4_R32G32B32A32_FLOAT = 60,
        RB4_R32G32B32A32_UINT = 61,
        RB4_R32G32B32A32_SINT = 62,
+       RB4_NONE = 255,
 };
 
 enum a4xx_tile_mode {
@@ -161,6 +164,7 @@ enum a4xx_vtx_fmt {
        VFMT4_2_10_10_10_UNORM = 61,
        VFMT4_2_10_10_10_SINT = 62,
        VFMT4_2_10_10_10_SNORM = 63,
+       VFMT4_NONE = 255,
 };
 
 enum a4xx_tex_fmt {
@@ -248,14 +252,7 @@ enum a4xx_tex_fmt {
        TFMT4_ASTC_10x10 = 122,
        TFMT4_ASTC_12x10 = 123,
        TFMT4_ASTC_12x12 = 124,
-};
-
-enum a4xx_tex_fetchsize {
-       TFETCH4_1_BYTE = 0,
-       TFETCH4_2_BYTE = 1,
-       TFETCH4_4_BYTE = 2,
-       TFETCH4_8_BYTE = 3,
-       TFETCH4_16_BYTE = 4,
+       TFMT4_NONE = 255,
 };
 
 enum a4xx_depth_format {
@@ -949,10 +946,12 @@ static inline uint32_t A4XX_RB_MSAA_CONTROL_SAMPLES(uint32_t val)
 }
 
 #define REG_A4XX_RB_RENDER_CONTROL2                            0x000020a3
-#define A4XX_RB_RENDER_CONTROL2_XCOORD                         0x00000001
-#define A4XX_RB_RENDER_CONTROL2_YCOORD                         0x00000002
-#define A4XX_RB_RENDER_CONTROL2_ZCOORD                         0x00000004
-#define A4XX_RB_RENDER_CONTROL2_WCOORD                         0x00000008
+#define A4XX_RB_RENDER_CONTROL2_COORD_MASK__MASK               0x0000000f
+#define A4XX_RB_RENDER_CONTROL2_COORD_MASK__SHIFT              0
+static inline uint32_t A4XX_RB_RENDER_CONTROL2_COORD_MASK(uint32_t val)
+{
+       return ((val) << A4XX_RB_RENDER_CONTROL2_COORD_MASK__SHIFT) & A4XX_RB_RENDER_CONTROL2_COORD_MASK__MASK;
+}
 #define A4XX_RB_RENDER_CONTROL2_SAMPLEMASK                     0x00000010
 #define A4XX_RB_RENDER_CONTROL2_FACENESS                       0x00000020
 #define A4XX_RB_RENDER_CONTROL2_SAMPLEID                       0x00000040
@@ -963,7 +962,10 @@ static inline uint32_t A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES(uint32_t val)
        return ((val) << A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__SHIFT) & A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__MASK;
 }
 #define A4XX_RB_RENDER_CONTROL2_SAMPLEID_HR                    0x00000800
-#define A4XX_RB_RENDER_CONTROL2_VARYING                                0x00001000
+#define A4XX_RB_RENDER_CONTROL2_IJ_PERSP_PIXEL                 0x00001000
+#define A4XX_RB_RENDER_CONTROL2_IJ_PERSP_CENTROID              0x00002000
+#define A4XX_RB_RENDER_CONTROL2_IJ_PERSP_SAMPLE                        0x00004000
+#define A4XX_RB_RENDER_CONTROL2_SIZE                           0x00008000
 
 static inline uint32_t REG_A4XX_RB_MRT(uint32_t i0) { return 0x000020a4 + 0x5*i0; }
 
@@ -1877,10 +1879,6 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP_REG(uint32_t i0) { return 0x
 
 #define REG_A4XX_RBBM_PERFCTR_TP_0_HI                          0x00000115
 
-#define REG_A4XX_RBBM_PERFCTR_TP_0_LO                          0x00000114
-
-#define REG_A4XX_RBBM_PERFCTR_TP_0_HI                          0x00000115
-
 #define REG_A4XX_RBBM_PERFCTR_TP_1_LO                          0x00000116
 
 #define REG_A4XX_RBBM_PERFCTR_TP_1_HI                          0x00000117
@@ -2061,8 +2059,6 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0)
 
 #define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1                  0x0000009a
 
-#define REG_A4XX_RBBM_PERFCTR_PWR_1_LO                         0x00000168
-
 #define REG_A4XX_RBBM_PERFCTR_CTL                              0x00000170
 
 #define REG_A4XX_RBBM_PERFCTR_LOAD_CMD0                                0x00000171
@@ -2210,8 +2206,18 @@ static inline uint32_t A4XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
 {
        return ((val) << A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A4XX_CP_PROTECT_REG_MASK_LEN__MASK;
 }
-#define A4XX_CP_PROTECT_REG_TRAP_WRITE                         0x20000000
-#define A4XX_CP_PROTECT_REG_TRAP_READ                          0x40000000
+#define A4XX_CP_PROTECT_REG_TRAP_WRITE__MASK                   0x20000000
+#define A4XX_CP_PROTECT_REG_TRAP_WRITE__SHIFT                  29
+static inline uint32_t A4XX_CP_PROTECT_REG_TRAP_WRITE(uint32_t val)
+{
+       return ((val) << A4XX_CP_PROTECT_REG_TRAP_WRITE__SHIFT) & A4XX_CP_PROTECT_REG_TRAP_WRITE__MASK;
+}
+#define A4XX_CP_PROTECT_REG_TRAP_READ__MASK                    0x40000000
+#define A4XX_CP_PROTECT_REG_TRAP_READ__SHIFT                   30
+static inline uint32_t A4XX_CP_PROTECT_REG_TRAP_READ(uint32_t val)
+{
+       return ((val) << A4XX_CP_PROTECT_REG_TRAP_READ__SHIFT) & A4XX_CP_PROTECT_REG_TRAP_READ__MASK;
+}
 
 #define REG_A4XX_CP_PROTECT_CTRL                               0x00000250
 
@@ -3151,8 +3157,9 @@ static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_GS(uint32_t val)
 #define A4XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE               0x00020000
 #define A4XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z                 0x00400000
 
-#define REG_A4XX_GRAS_CLEAR_CNTL                               0x00002003
-#define A4XX_GRAS_CLEAR_CNTL_NOT_FASTCLEAR                     0x00000001
+#define REG_A4XX_GRAS_CNTL                                     0x00002003
+#define A4XX_GRAS_CNTL_IJ_PERSP                                        0x00000001
+#define A4XX_GRAS_CNTL_IJ_LINEAR                               0x00000002
 
 #define REG_A4XX_GRAS_CL_GB_CLIP_ADJ                           0x00002004
 #define A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK                    0x000003ff
@@ -3524,14 +3531,44 @@ static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID(uint32_t val)
 }
 
 #define REG_A4XX_HLSQ_CONTROL_3_REG                            0x000023c3
-#define A4XX_HLSQ_CONTROL_3_REG_REGID__MASK                    0x000000ff
-#define A4XX_HLSQ_CONTROL_3_REG_REGID__SHIFT                   0
-static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val)
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK           0x000000ff
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT          0
+static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK;
+}
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK          0x0000ff00
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT         8
+static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK;
+}
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK                0x00ff0000
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT       16
+static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val)
 {
-       return ((val) << A4XX_HLSQ_CONTROL_3_REG_REGID__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_REGID__MASK;
+       return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK;
+}
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK       0xff000000
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT      24
+static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK;
 }
 
 #define REG_A4XX_HLSQ_CONTROL_4_REG                            0x000023c4
+#define A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK          0x000000ff
+#define A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT         0
+static inline uint32_t A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK;
+}
+#define A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK         0x0000ff00
+#define A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT                8
+static inline uint32_t A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val)
+{
+       return ((val) << A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK;
+}
 
 #define REG_A4XX_HLSQ_VS_CONTROL_REG                           0x000023c5
 #define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK             0x000000ff
@@ -4115,11 +4152,11 @@ static inline uint32_t A4XX_TEX_CONST_1_WIDTH(uint32_t val)
 }
 
 #define REG_A4XX_TEX_CONST_2                                   0x00000002
-#define A4XX_TEX_CONST_2_FETCHSIZE__MASK                       0x0000000f
-#define A4XX_TEX_CONST_2_FETCHSIZE__SHIFT                      0
-static inline uint32_t A4XX_TEX_CONST_2_FETCHSIZE(enum a4xx_tex_fetchsize val)
+#define A4XX_TEX_CONST_2_PITCHALIGN__MASK                      0x0000000f
+#define A4XX_TEX_CONST_2_PITCHALIGN__SHIFT                     0
+static inline uint32_t A4XX_TEX_CONST_2_PITCHALIGN(uint32_t val)
 {
-       return ((val) << A4XX_TEX_CONST_2_FETCHSIZE__SHIFT) & A4XX_TEX_CONST_2_FETCHSIZE__MASK;
+       return ((val) << A4XX_TEX_CONST_2_PITCHALIGN__SHIFT) & A4XX_TEX_CONST_2_PITCHALIGN__MASK;
 }
 #define A4XX_TEX_CONST_2_PITCH__MASK                           0x3ffffe00
 #define A4XX_TEX_CONST_2_PITCH__SHIFT                          9
index 4a61d4e..346cc6f 100644 (file)
@@ -8,19 +8,21 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/ubuntu/envytools/envytools/rnndb/./adreno.xml             (    501 bytes, from 2019-05-29 01:28:15)
-- /home/ubuntu/envytools/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2019-05-29 01:28:15)
-- /home/ubuntu/envytools/envytools/rnndb/adreno/a2xx.xml          (  79608 bytes, from 2019-05-29 01:28:15)
-- /home/ubuntu/envytools/envytools/rnndb/adreno/adreno_common.xml (  14239 bytes, from 2019-05-29 01:28:15)
-- /home/ubuntu/envytools/envytools/rnndb/adreno/adreno_pm4.xml    (  43155 bytes, from 2019-05-29 01:28:15)
-- /home/ubuntu/envytools/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2019-05-29 01:28:15)
-- /home/ubuntu/envytools/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2019-05-29 01:28:15)
-- /home/ubuntu/envytools/envytools/rnndb/adreno/a5xx.xml          ( 147291 bytes, from 2019-05-29 14:51:41)
-- /home/ubuntu/envytools/envytools/rnndb/adreno/a6xx.xml          ( 148461 bytes, from 2019-05-29 01:28:15)
-- /home/ubuntu/envytools/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2019-05-29 01:28:15)
-- /home/ubuntu/envytools/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2019-05-29 01:28:15)
-
-Copyright (C) 2013-2019 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml                     (    594 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml        (   1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml                (  90159 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml       (  14386 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml          (  65048 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml                (  84226 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml                ( 112556 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml                ( 149461 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml                ( 184695 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml            (  11218 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml               (   1773 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml (   4559 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml    (   2872 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
@@ -91,6 +93,7 @@ enum a5xx_color_fmt {
        RB5_R32G32B32A32_FLOAT = 130,
        RB5_R32G32B32A32_UINT = 131,
        RB5_R32G32B32A32_SINT = 132,
+       RB5_NONE = 255,
 };
 
 enum a5xx_tile_mode {
@@ -165,6 +168,7 @@ enum a5xx_vtx_fmt {
        VFMT5_32_32_32_32_UINT = 131,
        VFMT5_32_32_32_32_SINT = 132,
        VFMT5_32_32_32_32_FIXED = 133,
+       VFMT5_NONE = 255,
 };
 
 enum a5xx_tex_fmt {
@@ -250,14 +254,7 @@ enum a5xx_tex_fmt {
        TFMT5_ASTC_10x10 = 204,
        TFMT5_ASTC_12x10 = 205,
        TFMT5_ASTC_12x12 = 206,
-};
-
-enum a5xx_tex_fetchsize {
-       TFETCH5_1_BYTE = 0,
-       TFETCH5_2_BYTE = 1,
-       TFETCH5_4_BYTE = 2,
-       TFETCH5_8_BYTE = 3,
-       TFETCH5_16_BYTE = 4,
+       TFMT5_NONE = 255,
 };
 
 enum a5xx_depth_format {
@@ -1052,8 +1049,18 @@ static inline uint32_t A5XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
 {
        return ((val) << A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A5XX_CP_PROTECT_REG_MASK_LEN__MASK;
 }
-#define A5XX_CP_PROTECT_REG_TRAP_WRITE                         0x20000000
-#define A5XX_CP_PROTECT_REG_TRAP_READ                          0x40000000
+#define A5XX_CP_PROTECT_REG_TRAP_WRITE__MASK                   0x20000000
+#define A5XX_CP_PROTECT_REG_TRAP_WRITE__SHIFT                  29
+static inline uint32_t A5XX_CP_PROTECT_REG_TRAP_WRITE(uint32_t val)
+{
+       return ((val) << A5XX_CP_PROTECT_REG_TRAP_WRITE__SHIFT) & A5XX_CP_PROTECT_REG_TRAP_WRITE__MASK;
+}
+#define A5XX_CP_PROTECT_REG_TRAP_READ__MASK                    0x40000000
+#define A5XX_CP_PROTECT_REG_TRAP_READ__SHIFT                   30
+static inline uint32_t A5XX_CP_PROTECT_REG_TRAP_READ(uint32_t val)
+{
+       return ((val) << A5XX_CP_PROTECT_REG_TRAP_READ__SHIFT) & A5XX_CP_PROTECT_REG_TRAP_READ__MASK;
+}
 
 #define REG_A5XX_CP_PROTECT_CNTL                               0x000008a0
 
@@ -1825,37 +1832,192 @@ static inline uint32_t A5XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
 #define REG_A5XX_RBBM_ALWAYSON_COUNTER_HI                      0x000004d3
 
 #define REG_A5XX_RBBM_STATUS                                   0x000004f5
-#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB                      0x80000000
-#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP                   0x40000000
-#define A5XX_RBBM_STATUS_HLSQ_BUSY                             0x20000000
-#define A5XX_RBBM_STATUS_VSC_BUSY                              0x10000000
-#define A5XX_RBBM_STATUS_TPL1_BUSY                             0x08000000
-#define A5XX_RBBM_STATUS_SP_BUSY                               0x04000000
-#define A5XX_RBBM_STATUS_UCHE_BUSY                             0x02000000
-#define A5XX_RBBM_STATUS_VPC_BUSY                              0x01000000
-#define A5XX_RBBM_STATUS_VFDP_BUSY                             0x00800000
-#define A5XX_RBBM_STATUS_VFD_BUSY                              0x00400000
-#define A5XX_RBBM_STATUS_TESS_BUSY                             0x00200000
-#define A5XX_RBBM_STATUS_PC_VSD_BUSY                           0x00100000
-#define A5XX_RBBM_STATUS_PC_DCALL_BUSY                         0x00080000
-#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY                       0x00040000
-#define A5XX_RBBM_STATUS_DCOM_BUSY                             0x00020000
-#define A5XX_RBBM_STATUS_COM_BUSY                              0x00010000
-#define A5XX_RBBM_STATUS_LRZ_BUZY                              0x00008000
-#define A5XX_RBBM_STATUS_A2D_DSP_BUSY                          0x00004000
-#define A5XX_RBBM_STATUS_CCUFCHE_BUSY                          0x00002000
-#define A5XX_RBBM_STATUS_RB_BUSY                               0x00001000
-#define A5XX_RBBM_STATUS_RAS_BUSY                              0x00000800
-#define A5XX_RBBM_STATUS_TSE_BUSY                              0x00000400
-#define A5XX_RBBM_STATUS_VBIF_BUSY                             0x00000200
-#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST                 0x00000100
-#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST                      0x00000080
-#define A5XX_RBBM_STATUS_CP_BUSY                               0x00000040
-#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY                      0x00000020
-#define A5XX_RBBM_STATUS_CP_CRASH_BUSY                         0x00000010
-#define A5XX_RBBM_STATUS_CP_ETS_BUSY                           0x00000008
-#define A5XX_RBBM_STATUS_CP_PFP_BUSY                           0x00000004
-#define A5XX_RBBM_STATUS_CP_ME_BUSY                            0x00000002
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB__MASK                        0x80000000
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB__SHIFT               31
+static inline uint32_t A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB__SHIFT) & A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB__MASK;
+}
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP__MASK             0x40000000
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP__SHIFT            30
+static inline uint32_t A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP__SHIFT) & A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP__MASK;
+}
+#define A5XX_RBBM_STATUS_HLSQ_BUSY__MASK                       0x20000000
+#define A5XX_RBBM_STATUS_HLSQ_BUSY__SHIFT                      29
+static inline uint32_t A5XX_RBBM_STATUS_HLSQ_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_HLSQ_BUSY__SHIFT) & A5XX_RBBM_STATUS_HLSQ_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_VSC_BUSY__MASK                                0x10000000
+#define A5XX_RBBM_STATUS_VSC_BUSY__SHIFT                       28
+static inline uint32_t A5XX_RBBM_STATUS_VSC_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_VSC_BUSY__SHIFT) & A5XX_RBBM_STATUS_VSC_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_TPL1_BUSY__MASK                       0x08000000
+#define A5XX_RBBM_STATUS_TPL1_BUSY__SHIFT                      27
+static inline uint32_t A5XX_RBBM_STATUS_TPL1_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_TPL1_BUSY__SHIFT) & A5XX_RBBM_STATUS_TPL1_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_SP_BUSY__MASK                         0x04000000
+#define A5XX_RBBM_STATUS_SP_BUSY__SHIFT                                26
+static inline uint32_t A5XX_RBBM_STATUS_SP_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_SP_BUSY__SHIFT) & A5XX_RBBM_STATUS_SP_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_UCHE_BUSY__MASK                       0x02000000
+#define A5XX_RBBM_STATUS_UCHE_BUSY__SHIFT                      25
+static inline uint32_t A5XX_RBBM_STATUS_UCHE_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_UCHE_BUSY__SHIFT) & A5XX_RBBM_STATUS_UCHE_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_VPC_BUSY__MASK                                0x01000000
+#define A5XX_RBBM_STATUS_VPC_BUSY__SHIFT                       24
+static inline uint32_t A5XX_RBBM_STATUS_VPC_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_VPC_BUSY__SHIFT) & A5XX_RBBM_STATUS_VPC_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_VFDP_BUSY__MASK                       0x00800000
+#define A5XX_RBBM_STATUS_VFDP_BUSY__SHIFT                      23
+static inline uint32_t A5XX_RBBM_STATUS_VFDP_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_VFDP_BUSY__SHIFT) & A5XX_RBBM_STATUS_VFDP_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_VFD_BUSY__MASK                                0x00400000
+#define A5XX_RBBM_STATUS_VFD_BUSY__SHIFT                       22
+static inline uint32_t A5XX_RBBM_STATUS_VFD_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_VFD_BUSY__SHIFT) & A5XX_RBBM_STATUS_VFD_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_TESS_BUSY__MASK                       0x00200000
+#define A5XX_RBBM_STATUS_TESS_BUSY__SHIFT                      21
+static inline uint32_t A5XX_RBBM_STATUS_TESS_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_TESS_BUSY__SHIFT) & A5XX_RBBM_STATUS_TESS_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_PC_VSD_BUSY__MASK                     0x00100000
+#define A5XX_RBBM_STATUS_PC_VSD_BUSY__SHIFT                    20
+static inline uint32_t A5XX_RBBM_STATUS_PC_VSD_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_PC_VSD_BUSY__SHIFT) & A5XX_RBBM_STATUS_PC_VSD_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_PC_DCALL_BUSY__MASK                   0x00080000
+#define A5XX_RBBM_STATUS_PC_DCALL_BUSY__SHIFT                  19
+static inline uint32_t A5XX_RBBM_STATUS_PC_DCALL_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_PC_DCALL_BUSY__SHIFT) & A5XX_RBBM_STATUS_PC_DCALL_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY__MASK                 0x00040000
+#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY__SHIFT                        18
+static inline uint32_t A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY__SHIFT) & A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_DCOM_BUSY__MASK                       0x00020000
+#define A5XX_RBBM_STATUS_DCOM_BUSY__SHIFT                      17
+static inline uint32_t A5XX_RBBM_STATUS_DCOM_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_DCOM_BUSY__SHIFT) & A5XX_RBBM_STATUS_DCOM_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_COM_BUSY__MASK                                0x00010000
+#define A5XX_RBBM_STATUS_COM_BUSY__SHIFT                       16
+static inline uint32_t A5XX_RBBM_STATUS_COM_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_COM_BUSY__SHIFT) & A5XX_RBBM_STATUS_COM_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_LRZ_BUZY__MASK                                0x00008000
+#define A5XX_RBBM_STATUS_LRZ_BUZY__SHIFT                       15
+static inline uint32_t A5XX_RBBM_STATUS_LRZ_BUZY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_LRZ_BUZY__SHIFT) & A5XX_RBBM_STATUS_LRZ_BUZY__MASK;
+}
+#define A5XX_RBBM_STATUS_A2D_DSP_BUSY__MASK                    0x00004000
+#define A5XX_RBBM_STATUS_A2D_DSP_BUSY__SHIFT                   14
+static inline uint32_t A5XX_RBBM_STATUS_A2D_DSP_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_A2D_DSP_BUSY__SHIFT) & A5XX_RBBM_STATUS_A2D_DSP_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_CCUFCHE_BUSY__MASK                    0x00002000
+#define A5XX_RBBM_STATUS_CCUFCHE_BUSY__SHIFT                   13
+static inline uint32_t A5XX_RBBM_STATUS_CCUFCHE_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_CCUFCHE_BUSY__SHIFT) & A5XX_RBBM_STATUS_CCUFCHE_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_RB_BUSY__MASK                         0x00001000
+#define A5XX_RBBM_STATUS_RB_BUSY__SHIFT                                12
+static inline uint32_t A5XX_RBBM_STATUS_RB_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_RB_BUSY__SHIFT) & A5XX_RBBM_STATUS_RB_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_RAS_BUSY__MASK                                0x00000800
+#define A5XX_RBBM_STATUS_RAS_BUSY__SHIFT                       11
+static inline uint32_t A5XX_RBBM_STATUS_RAS_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_RAS_BUSY__SHIFT) & A5XX_RBBM_STATUS_RAS_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_TSE_BUSY__MASK                                0x00000400
+#define A5XX_RBBM_STATUS_TSE_BUSY__SHIFT                       10
+static inline uint32_t A5XX_RBBM_STATUS_TSE_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_TSE_BUSY__SHIFT) & A5XX_RBBM_STATUS_TSE_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_VBIF_BUSY__MASK                       0x00000200
+#define A5XX_RBBM_STATUS_VBIF_BUSY__SHIFT                      9
+static inline uint32_t A5XX_RBBM_STATUS_VBIF_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_VBIF_BUSY__SHIFT) & A5XX_RBBM_STATUS_VBIF_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST__MASK           0x00000100
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST__SHIFT          8
+static inline uint32_t A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST__SHIFT) & A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST__MASK;
+}
+#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST__MASK                        0x00000080
+#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST__SHIFT               7
+static inline uint32_t A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST__SHIFT) & A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST__MASK;
+}
+#define A5XX_RBBM_STATUS_CP_BUSY__MASK                         0x00000040
+#define A5XX_RBBM_STATUS_CP_BUSY__SHIFT                                6
+static inline uint32_t A5XX_RBBM_STATUS_CP_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_CP_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY__MASK                        0x00000020
+#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY__SHIFT               5
+static inline uint32_t A5XX_RBBM_STATUS_GPMU_MASTER_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_GPMU_MASTER_BUSY__SHIFT) & A5XX_RBBM_STATUS_GPMU_MASTER_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_CP_CRASH_BUSY__MASK                   0x00000010
+#define A5XX_RBBM_STATUS_CP_CRASH_BUSY__SHIFT                  4
+static inline uint32_t A5XX_RBBM_STATUS_CP_CRASH_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_CP_CRASH_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_CRASH_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_CP_ETS_BUSY__MASK                     0x00000008
+#define A5XX_RBBM_STATUS_CP_ETS_BUSY__SHIFT                    3
+static inline uint32_t A5XX_RBBM_STATUS_CP_ETS_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_CP_ETS_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_ETS_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_CP_PFP_BUSY__MASK                     0x00000004
+#define A5XX_RBBM_STATUS_CP_PFP_BUSY__SHIFT                    2
+static inline uint32_t A5XX_RBBM_STATUS_CP_PFP_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_CP_PFP_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_PFP_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_CP_ME_BUSY__MASK                      0x00000002
+#define A5XX_RBBM_STATUS_CP_ME_BUSY__SHIFT                     1
+static inline uint32_t A5XX_RBBM_STATUS_CP_ME_BUSY(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_STATUS_CP_ME_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_ME_BUSY__MASK;
+}
 #define A5XX_RBBM_STATUS_HI_BUSY                               0x00000001
 
 #define REG_A5XX_RBBM_STATUS3                                  0x00000530
@@ -1884,14 +2046,6 @@ static inline uint32_t A5XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
 
 #define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_HI                    0x0000046a
 
-#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0                       0x0000046b
-
-#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1                       0x0000046c
-
-#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2                       0x0000046d
-
-#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3                       0x0000046e
-
 #define REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED                  0x0000046f
 
 #define REG_A5XX_RBBM_AHB_ERROR                                        0x000004ed
@@ -2455,8 +2609,6 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
 
 #define REG_A5XX_GPMU_PWR_COL_BINNING_CTRL                     0x0000a894
 
-#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL                      0x0000a8a3
-
 #define REG_A5XX_GPMU_WFI_CONFIG                               0x0000a8c1
 
 #define REG_A5XX_GPMU_RBBM_INTR_INFO                           0x0000a8d6
@@ -2659,12 +2811,16 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
 #define REG_A5XX_UNKNOWN_E004                                  0x0000e004
 
 #define REG_A5XX_GRAS_CNTL                                     0x0000e005
-#define A5XX_GRAS_CNTL_VARYING                                 0x00000001
-#define A5XX_GRAS_CNTL_UNK3                                    0x00000008
-#define A5XX_GRAS_CNTL_XCOORD                                  0x00000040
-#define A5XX_GRAS_CNTL_YCOORD                                  0x00000080
-#define A5XX_GRAS_CNTL_ZCOORD                                  0x00000100
-#define A5XX_GRAS_CNTL_WCOORD                                  0x00000200
+#define A5XX_GRAS_CNTL_IJ_PERSP_PIXEL                          0x00000001
+#define A5XX_GRAS_CNTL_IJ_PERSP_CENTROID                       0x00000002
+#define A5XX_GRAS_CNTL_IJ_PERSP_SAMPLE                         0x00000004
+#define A5XX_GRAS_CNTL_SIZE                                    0x00000008
+#define A5XX_GRAS_CNTL_COORD_MASK__MASK                                0x000003c0
+#define A5XX_GRAS_CNTL_COORD_MASK__SHIFT                       6
+static inline uint32_t A5XX_GRAS_CNTL_COORD_MASK(uint32_t val)
+{
+       return ((val) << A5XX_GRAS_CNTL_COORD_MASK__SHIFT) & A5XX_GRAS_CNTL_COORD_MASK__MASK;
+}
 
 #define REG_A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ                    0x0000e006
 #define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK             0x000003ff
@@ -2991,12 +3147,16 @@ static inline uint32_t A5XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val
 #define A5XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE                    0x00000004
 
 #define REG_A5XX_RB_RENDER_CONTROL0                            0x0000e144
-#define A5XX_RB_RENDER_CONTROL0_VARYING                                0x00000001
-#define A5XX_RB_RENDER_CONTROL0_UNK3                           0x00000008
-#define A5XX_RB_RENDER_CONTROL0_XCOORD                         0x00000040
-#define A5XX_RB_RENDER_CONTROL0_YCOORD                         0x00000080
-#define A5XX_RB_RENDER_CONTROL0_ZCOORD                         0x00000100
-#define A5XX_RB_RENDER_CONTROL0_WCOORD                         0x00000200
+#define A5XX_RB_RENDER_CONTROL0_IJ_PERSP_PIXEL                 0x00000001
+#define A5XX_RB_RENDER_CONTROL0_IJ_PERSP_CENTROID              0x00000002
+#define A5XX_RB_RENDER_CONTROL0_IJ_PERSP_SAMPLE                        0x00000004
+#define A5XX_RB_RENDER_CONTROL0_SIZE                           0x00000008
+#define A5XX_RB_RENDER_CONTROL0_COORD_MASK__MASK               0x000003c0
+#define A5XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT              6
+static inline uint32_t A5XX_RB_RENDER_CONTROL0_COORD_MASK(uint32_t val)
+{
+       return ((val) << A5XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT) & A5XX_RB_RENDER_CONTROL0_COORD_MASK__MASK;
+}
 
 #define REG_A5XX_RB_RENDER_CONTROL1                            0x0000e145
 #define A5XX_RB_RENDER_CONTROL1_SAMPLEMASK                     0x00000001
@@ -4450,16 +4610,52 @@ static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val)
 {
        return ((val) << A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK;
 }
+#define A5XX_HLSQ_CONTROL_2_REG_SIZE__MASK                     0xff000000
+#define A5XX_HLSQ_CONTROL_2_REG_SIZE__SHIFT                    24
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SIZE(uint32_t val)
+{
+       return ((val) << A5XX_HLSQ_CONTROL_2_REG_SIZE__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SIZE__MASK;
+}
 
 #define REG_A5XX_HLSQ_CONTROL_3_REG                            0x0000e787
-#define A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK         0x000000ff
-#define A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__SHIFT                0
-static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID(uint32_t val)
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK           0x000000ff
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT          0
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val)
+{
+       return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK;
+}
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK          0x0000ff00
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT         8
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val)
+{
+       return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK;
+}
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK                0x00ff0000
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT       16
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val)
 {
-       return ((val) << A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK;
+       return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK;
+}
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK       0xff000000
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT      24
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val)
+{
+       return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK;
 }
 
 #define REG_A5XX_HLSQ_CONTROL_4_REG                            0x0000e788
+#define A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK          0x000000ff
+#define A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT         0
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val)
+{
+       return ((val) << A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK;
+}
+#define A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK         0x0000ff00
+#define A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT                8
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val)
+{
+       return ((val) << A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK;
+}
 #define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK             0x00ff0000
 #define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT            16
 static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
@@ -4855,10 +5051,26 @@ static inline uint32_t A5XX_RB_2D_DST_SIZE_ARRAY_PITCH(uint32_t val)
 
 #define REG_A5XX_RB_2D_SRC_FLAGS_HI                            0x00002141
 
+#define REG_A5XX_RB_2D_SRC_FLAGS_PITCH                         0x00002142
+#define A5XX_RB_2D_SRC_FLAGS_PITCH__MASK                       0xffffffff
+#define A5XX_RB_2D_SRC_FLAGS_PITCH__SHIFT                      0
+static inline uint32_t A5XX_RB_2D_SRC_FLAGS_PITCH(uint32_t val)
+{
+       return ((val >> 6) << A5XX_RB_2D_SRC_FLAGS_PITCH__SHIFT) & A5XX_RB_2D_SRC_FLAGS_PITCH__MASK;
+}
+
 #define REG_A5XX_RB_2D_DST_FLAGS_LO                            0x00002143
 
 #define REG_A5XX_RB_2D_DST_FLAGS_HI                            0x00002144
 
+#define REG_A5XX_RB_2D_DST_FLAGS_PITCH                         0x00002145
+#define A5XX_RB_2D_DST_FLAGS_PITCH__MASK                       0xffffffff
+#define A5XX_RB_2D_DST_FLAGS_PITCH__SHIFT                      0
+static inline uint32_t A5XX_RB_2D_DST_FLAGS_PITCH(uint32_t val)
+{
+       return ((val >> 6) << A5XX_RB_2D_DST_FLAGS_PITCH__SHIFT) & A5XX_RB_2D_DST_FLAGS_PITCH__MASK;
+}
+
 #define REG_A5XX_GRAS_2D_BLIT_CNTL                             0x00002180
 
 #define REG_A5XX_GRAS_2D_SRC_INFO                              0x00002181
@@ -5059,11 +5271,11 @@ static inline uint32_t A5XX_TEX_CONST_1_HEIGHT(uint32_t val)
 }
 
 #define REG_A5XX_TEX_CONST_2                                   0x00000002
-#define A5XX_TEX_CONST_2_FETCHSIZE__MASK                       0x0000000f
-#define A5XX_TEX_CONST_2_FETCHSIZE__SHIFT                      0
-static inline uint32_t A5XX_TEX_CONST_2_FETCHSIZE(enum a5xx_tex_fetchsize val)
+#define A5XX_TEX_CONST_2_PITCHALIGN__MASK                      0x0000000f
+#define A5XX_TEX_CONST_2_PITCHALIGN__SHIFT                     0
+static inline uint32_t A5XX_TEX_CONST_2_PITCHALIGN(uint32_t val)
 {
-       return ((val) << A5XX_TEX_CONST_2_FETCHSIZE__SHIFT) & A5XX_TEX_CONST_2_FETCHSIZE__MASK;
+       return ((val) << A5XX_TEX_CONST_2_PITCHALIGN__SHIFT) & A5XX_TEX_CONST_2_PITCHALIGN__MASK;
 }
 #define A5XX_TEX_CONST_2_PITCH__MASK                           0x1fffff80
 #define A5XX_TEX_CONST_2_PITCH__SHIFT                          7
@@ -5085,6 +5297,13 @@ static inline uint32_t A5XX_TEX_CONST_3_ARRAY_PITCH(uint32_t val)
 {
        return ((val >> 12) << A5XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A5XX_TEX_CONST_3_ARRAY_PITCH__MASK;
 }
+#define A5XX_TEX_CONST_3_MIN_LAYERSZ__MASK                     0x07800000
+#define A5XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT                    23
+static inline uint32_t A5XX_TEX_CONST_3_MIN_LAYERSZ(uint32_t val)
+{
+       return ((val >> 12) << A5XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT) & A5XX_TEX_CONST_3_MIN_LAYERSZ__MASK;
+}
+#define A5XX_TEX_CONST_3_TILE_ALL                              0x08000000
 #define A5XX_TEX_CONST_3_FLAG                                  0x10000000
 
 #define REG_A5XX_TEX_CONST_4                                   0x00000004
@@ -5197,5 +5416,21 @@ static inline uint32_t A5XX_SSBO_2_1_BASE_HI(uint32_t val)
        return ((val) << A5XX_SSBO_2_1_BASE_HI__SHIFT) & A5XX_SSBO_2_1_BASE_HI__MASK;
 }
 
+#define REG_A5XX_UBO_0                                         0x00000000
+#define A5XX_UBO_0_BASE_LO__MASK                               0xffffffff
+#define A5XX_UBO_0_BASE_LO__SHIFT                              0
+static inline uint32_t A5XX_UBO_0_BASE_LO(uint32_t val)
+{
+       return ((val) << A5XX_UBO_0_BASE_LO__SHIFT) & A5XX_UBO_0_BASE_LO__MASK;
+}
+
+#define REG_A5XX_UBO_1                                         0x00000001
+#define A5XX_UBO_1_BASE_HI__MASK                               0x0001ffff
+#define A5XX_UBO_1_BASE_HI__SHIFT                              0
+static inline uint32_t A5XX_UBO_1_BASE_HI(uint32_t val)
+{
+       return ((val) << A5XX_UBO_1_BASE_HI__SHIFT) & A5XX_UBO_1_BASE_HI__MASK;
+}
+
 
 #endif /* A5XX_XML */
index 0e1933e..9e63a19 100644 (file)
@@ -186,7 +186,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
         * timestamp is written to the memory and then triggers the interrupt
         */
        OUT_PKT7(ring, CP_EVENT_WRITE, 4);
-       OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
+       OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS) |
+               CP_EVENT_WRITE_0_IRQ);
        OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
        OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
        OUT_RING(ring, submit->seqno);
@@ -730,7 +731,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
         */
        if (adreno_is_a530(adreno_gpu)) {
                OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
-               OUT_RING(gpu->rb[0], 0x0F);
+               OUT_RING(gpu->rb[0], CP_EVENT_WRITE_0_EVENT(STAT_EVENT));
 
                gpu->funcs->flush(gpu, gpu->rb[0]);
                if (!a5xx_idle(gpu, gpu->rb[0]))
index 47840b7..920c5e6 100644 (file)
@@ -8,19 +8,21 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/adreno.xml               (    501 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  42463 bytes, from 2018-11-19 13:44:03)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  14201 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  43052 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-12-02 17:29:54)
-- /home/smasetty/playarea/envytools/rnndb/adreno/a6xx.xml     ( 161969 bytes, from 2019-11-29 07:18:16)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2018-09-14 13:03:07)
-- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2019 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml                     (    594 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml        (   1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml                (  90159 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml       (  14386 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml          (  65048 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml                (  84226 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml                ( 112556 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml                ( 149461 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml                ( 184695 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml            (  11218 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml               (   1773 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml (   4559 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml    (   2872 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
@@ -46,219 +48,134 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 */
 
 
-enum a6xx_color_fmt {
-       RB6_A8_UNORM = 2,
-       RB6_R8_UNORM = 3,
-       RB6_R8_SNORM = 4,
-       RB6_R8_UINT = 5,
-       RB6_R8_SINT = 6,
-       RB6_R4G4B4A4_UNORM = 8,
-       RB6_R5G5B5A1_UNORM = 10,
-       RB6_R5G6B5_UNORM = 14,
-       RB6_R8G8_UNORM = 15,
-       RB6_R8G8_SNORM = 16,
-       RB6_R8G8_UINT = 17,
-       RB6_R8G8_SINT = 18,
-       RB6_R16_UNORM = 21,
-       RB6_R16_SNORM = 22,
-       RB6_R16_FLOAT = 23,
-       RB6_R16_UINT = 24,
-       RB6_R16_SINT = 25,
-       RB6_R8G8B8A8_UNORM = 48,
-       RB6_R8G8B8_UNORM = 49,
-       RB6_R8G8B8A8_SNORM = 50,
-       RB6_R8G8B8A8_UINT = 51,
-       RB6_R8G8B8A8_SINT = 52,
-       RB6_R10G10B10A2_UNORM = 55,
-       RB6_R10G10B10A2_UINT = 58,
-       RB6_R11G11B10_FLOAT = 66,
-       RB6_R16G16_UNORM = 67,
-       RB6_R16G16_SNORM = 68,
-       RB6_R16G16_FLOAT = 69,
-       RB6_R16G16_UINT = 70,
-       RB6_R16G16_SINT = 71,
-       RB6_R32_FLOAT = 74,
-       RB6_R32_UINT = 75,
-       RB6_R32_SINT = 76,
-       RB6_R16G16B16A16_UNORM = 96,
-       RB6_R16G16B16A16_SNORM = 97,
-       RB6_R16G16B16A16_FLOAT = 98,
-       RB6_R16G16B16A16_UINT = 99,
-       RB6_R16G16B16A16_SINT = 100,
-       RB6_R32G32_FLOAT = 103,
-       RB6_R32G32_UINT = 104,
-       RB6_R32G32_SINT = 105,
-       RB6_R32G32B32A32_FLOAT = 130,
-       RB6_R32G32B32A32_UINT = 131,
-       RB6_R32G32B32A32_SINT = 132,
-       RB6_X8Z24_UNORM = 160,
-};
-
 enum a6xx_tile_mode {
        TILE6_LINEAR = 0,
        TILE6_2 = 2,
        TILE6_3 = 3,
 };
 
-enum a6xx_vtx_fmt {
-       VFMT6_8_UNORM = 3,
-       VFMT6_8_SNORM = 4,
-       VFMT6_8_UINT = 5,
-       VFMT6_8_SINT = 6,
-       VFMT6_8_8_UNORM = 15,
-       VFMT6_8_8_SNORM = 16,
-       VFMT6_8_8_UINT = 17,
-       VFMT6_8_8_SINT = 18,
-       VFMT6_16_UNORM = 21,
-       VFMT6_16_SNORM = 22,
-       VFMT6_16_FLOAT = 23,
-       VFMT6_16_UINT = 24,
-       VFMT6_16_SINT = 25,
-       VFMT6_8_8_8_UNORM = 33,
-       VFMT6_8_8_8_SNORM = 34,
-       VFMT6_8_8_8_UINT = 35,
-       VFMT6_8_8_8_SINT = 36,
-       VFMT6_8_8_8_8_UNORM = 48,
-       VFMT6_8_8_8_8_SNORM = 50,
-       VFMT6_8_8_8_8_UINT = 51,
-       VFMT6_8_8_8_8_SINT = 52,
-       VFMT6_10_10_10_2_UNORM = 54,
-       VFMT6_10_10_10_2_SNORM = 57,
-       VFMT6_10_10_10_2_UINT = 58,
-       VFMT6_10_10_10_2_SINT = 59,
-       VFMT6_11_11_10_FLOAT = 66,
-       VFMT6_16_16_UNORM = 67,
-       VFMT6_16_16_SNORM = 68,
-       VFMT6_16_16_FLOAT = 69,
-       VFMT6_16_16_UINT = 70,
-       VFMT6_16_16_SINT = 71,
-       VFMT6_32_UNORM = 72,
-       VFMT6_32_SNORM = 73,
-       VFMT6_32_FLOAT = 74,
-       VFMT6_32_UINT = 75,
-       VFMT6_32_SINT = 76,
-       VFMT6_32_FIXED = 77,
-       VFMT6_16_16_16_UNORM = 88,
-       VFMT6_16_16_16_SNORM = 89,
-       VFMT6_16_16_16_FLOAT = 90,
-       VFMT6_16_16_16_UINT = 91,
-       VFMT6_16_16_16_SINT = 92,
-       VFMT6_16_16_16_16_UNORM = 96,
-       VFMT6_16_16_16_16_SNORM = 97,
-       VFMT6_16_16_16_16_FLOAT = 98,
-       VFMT6_16_16_16_16_UINT = 99,
-       VFMT6_16_16_16_16_SINT = 100,
-       VFMT6_32_32_UNORM = 101,
-       VFMT6_32_32_SNORM = 102,
-       VFMT6_32_32_FLOAT = 103,
-       VFMT6_32_32_UINT = 104,
-       VFMT6_32_32_SINT = 105,
-       VFMT6_32_32_FIXED = 106,
-       VFMT6_32_32_32_UNORM = 112,
-       VFMT6_32_32_32_SNORM = 113,
-       VFMT6_32_32_32_UINT = 114,
-       VFMT6_32_32_32_SINT = 115,
-       VFMT6_32_32_32_FLOAT = 116,
-       VFMT6_32_32_32_FIXED = 117,
-       VFMT6_32_32_32_32_UNORM = 128,
-       VFMT6_32_32_32_32_SNORM = 129,
-       VFMT6_32_32_32_32_FLOAT = 130,
-       VFMT6_32_32_32_32_UINT = 131,
-       VFMT6_32_32_32_32_SINT = 132,
-       VFMT6_32_32_32_32_FIXED = 133,
-};
-
-enum a6xx_tex_fmt {
-       TFMT6_A8_UNORM = 2,
-       TFMT6_8_UNORM = 3,
-       TFMT6_8_SNORM = 4,
-       TFMT6_8_UINT = 5,
-       TFMT6_8_SINT = 6,
-       TFMT6_4_4_4_4_UNORM = 8,
-       TFMT6_5_5_5_1_UNORM = 10,
-       TFMT6_5_6_5_UNORM = 14,
-       TFMT6_8_8_UNORM = 15,
-       TFMT6_8_8_SNORM = 16,
-       TFMT6_8_8_UINT = 17,
-       TFMT6_8_8_SINT = 18,
-       TFMT6_L8_A8_UNORM = 19,
-       TFMT6_16_UNORM = 21,
-       TFMT6_16_SNORM = 22,
-       TFMT6_16_FLOAT = 23,
-       TFMT6_16_UINT = 24,
-       TFMT6_16_SINT = 25,
-       TFMT6_8_8_8_8_UNORM = 48,
-       TFMT6_8_8_8_UNORM = 49,
-       TFMT6_8_8_8_8_SNORM = 50,
-       TFMT6_8_8_8_8_UINT = 51,
-       TFMT6_8_8_8_8_SINT = 52,
-       TFMT6_9_9_9_E5_FLOAT = 53,
-       TFMT6_10_10_10_2_UNORM = 54,
-       TFMT6_10_10_10_2_UINT = 58,
-       TFMT6_11_11_10_FLOAT = 66,
-       TFMT6_16_16_UNORM = 67,
-       TFMT6_16_16_SNORM = 68,
-       TFMT6_16_16_FLOAT = 69,
-       TFMT6_16_16_UINT = 70,
-       TFMT6_16_16_SINT = 71,
-       TFMT6_32_FLOAT = 74,
-       TFMT6_32_UINT = 75,
-       TFMT6_32_SINT = 76,
-       TFMT6_16_16_16_16_UNORM = 96,
-       TFMT6_16_16_16_16_SNORM = 97,
-       TFMT6_16_16_16_16_FLOAT = 98,
-       TFMT6_16_16_16_16_UINT = 99,
-       TFMT6_16_16_16_16_SINT = 100,
-       TFMT6_32_32_FLOAT = 103,
-       TFMT6_32_32_UINT = 104,
-       TFMT6_32_32_SINT = 105,
-       TFMT6_32_32_32_UINT = 114,
-       TFMT6_32_32_32_SINT = 115,
-       TFMT6_32_32_32_FLOAT = 116,
-       TFMT6_32_32_32_32_FLOAT = 130,
-       TFMT6_32_32_32_32_UINT = 131,
-       TFMT6_32_32_32_32_SINT = 132,
-       TFMT6_X8Z24_UNORM = 160,
-       TFMT6_ETC2_RG11_UNORM = 171,
-       TFMT6_ETC2_RG11_SNORM = 172,
-       TFMT6_ETC2_R11_UNORM = 173,
-       TFMT6_ETC2_R11_SNORM = 174,
-       TFMT6_ETC1 = 175,
-       TFMT6_ETC2_RGB8 = 176,
-       TFMT6_ETC2_RGBA8 = 177,
-       TFMT6_ETC2_RGB8A1 = 178,
-       TFMT6_DXT1 = 179,
-       TFMT6_DXT3 = 180,
-       TFMT6_DXT5 = 181,
-       TFMT6_RGTC1_UNORM = 183,
-       TFMT6_RGTC1_SNORM = 184,
-       TFMT6_RGTC2_UNORM = 187,
-       TFMT6_RGTC2_SNORM = 188,
-       TFMT6_BPTC_UFLOAT = 190,
-       TFMT6_BPTC_FLOAT = 191,
-       TFMT6_BPTC = 192,
-       TFMT6_ASTC_4x4 = 193,
-       TFMT6_ASTC_5x4 = 194,
-       TFMT6_ASTC_5x5 = 195,
-       TFMT6_ASTC_6x5 = 196,
-       TFMT6_ASTC_6x6 = 197,
-       TFMT6_ASTC_8x5 = 198,
-       TFMT6_ASTC_8x6 = 199,
-       TFMT6_ASTC_8x8 = 200,
-       TFMT6_ASTC_10x5 = 201,
-       TFMT6_ASTC_10x6 = 202,
-       TFMT6_ASTC_10x8 = 203,
-       TFMT6_ASTC_10x10 = 204,
-       TFMT6_ASTC_12x10 = 205,
-       TFMT6_ASTC_12x12 = 206,
+enum a6xx_format {
+       FMT6_A8_UNORM = 2,
+       FMT6_8_UNORM = 3,
+       FMT6_8_SNORM = 4,
+       FMT6_8_UINT = 5,
+       FMT6_8_SINT = 6,
+       FMT6_4_4_4_4_UNORM = 8,
+       FMT6_5_5_5_1_UNORM = 10,
+       FMT6_1_5_5_5_UNORM = 12,
+       FMT6_5_6_5_UNORM = 14,
+       FMT6_8_8_UNORM = 15,
+       FMT6_8_8_SNORM = 16,
+       FMT6_8_8_UINT = 17,
+       FMT6_8_8_SINT = 18,
+       FMT6_L8_A8_UNORM = 19,
+       FMT6_16_UNORM = 21,
+       FMT6_16_SNORM = 22,
+       FMT6_16_FLOAT = 23,
+       FMT6_16_UINT = 24,
+       FMT6_16_SINT = 25,
+       FMT6_8_8_8_UNORM = 33,
+       FMT6_8_8_8_SNORM = 34,
+       FMT6_8_8_8_UINT = 35,
+       FMT6_8_8_8_SINT = 36,
+       FMT6_8_8_8_8_UNORM = 48,
+       FMT6_8_8_8_X8_UNORM = 49,
+       FMT6_8_8_8_8_SNORM = 50,
+       FMT6_8_8_8_8_UINT = 51,
+       FMT6_8_8_8_8_SINT = 52,
+       FMT6_9_9_9_E5_FLOAT = 53,
+       FMT6_10_10_10_2_UNORM = 54,
+       FMT6_10_10_10_2_UNORM_DEST = 55,
+       FMT6_10_10_10_2_SNORM = 57,
+       FMT6_10_10_10_2_UINT = 58,
+       FMT6_10_10_10_2_SINT = 59,
+       FMT6_11_11_10_FLOAT = 66,
+       FMT6_16_16_UNORM = 67,
+       FMT6_16_16_SNORM = 68,
+       FMT6_16_16_FLOAT = 69,
+       FMT6_16_16_UINT = 70,
+       FMT6_16_16_SINT = 71,
+       FMT6_32_UNORM = 72,
+       FMT6_32_SNORM = 73,
+       FMT6_32_FLOAT = 74,
+       FMT6_32_UINT = 75,
+       FMT6_32_SINT = 76,
+       FMT6_32_FIXED = 77,
+       FMT6_16_16_16_UNORM = 88,
+       FMT6_16_16_16_SNORM = 89,
+       FMT6_16_16_16_FLOAT = 90,
+       FMT6_16_16_16_UINT = 91,
+       FMT6_16_16_16_SINT = 92,
+       FMT6_16_16_16_16_UNORM = 96,
+       FMT6_16_16_16_16_SNORM = 97,
+       FMT6_16_16_16_16_FLOAT = 98,
+       FMT6_16_16_16_16_UINT = 99,
+       FMT6_16_16_16_16_SINT = 100,
+       FMT6_32_32_UNORM = 101,
+       FMT6_32_32_SNORM = 102,
+       FMT6_32_32_FLOAT = 103,
+       FMT6_32_32_UINT = 104,
+       FMT6_32_32_SINT = 105,
+       FMT6_32_32_FIXED = 106,
+       FMT6_32_32_32_UNORM = 112,
+       FMT6_32_32_32_SNORM = 113,
+       FMT6_32_32_32_UINT = 114,
+       FMT6_32_32_32_SINT = 115,
+       FMT6_32_32_32_FLOAT = 116,
+       FMT6_32_32_32_FIXED = 117,
+       FMT6_32_32_32_32_UNORM = 128,
+       FMT6_32_32_32_32_SNORM = 129,
+       FMT6_32_32_32_32_FLOAT = 130,
+       FMT6_32_32_32_32_UINT = 131,
+       FMT6_32_32_32_32_SINT = 132,
+       FMT6_32_32_32_32_FIXED = 133,
+       FMT6_G8R8B8R8_422_UNORM = 140,
+       FMT6_R8G8R8B8_422_UNORM = 141,
+       FMT6_R8_G8B8_2PLANE_420_UNORM = 142,
+       FMT6_R8_G8_B8_3PLANE_420_UNORM = 144,
+       FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8 = 145,
+       FMT6_8_PLANE_UNORM = 148,
+       FMT6_Z24_UNORM_S8_UINT = 160,
+       FMT6_ETC2_RG11_UNORM = 171,
+       FMT6_ETC2_RG11_SNORM = 172,
+       FMT6_ETC2_R11_UNORM = 173,
+       FMT6_ETC2_R11_SNORM = 174,
+       FMT6_ETC1 = 175,
+       FMT6_ETC2_RGB8 = 176,
+       FMT6_ETC2_RGBA8 = 177,
+       FMT6_ETC2_RGB8A1 = 178,
+       FMT6_DXT1 = 179,
+       FMT6_DXT3 = 180,
+       FMT6_DXT5 = 181,
+       FMT6_RGTC1_UNORM = 183,
+       FMT6_RGTC1_SNORM = 184,
+       FMT6_RGTC2_UNORM = 187,
+       FMT6_RGTC2_SNORM = 188,
+       FMT6_BPTC_UFLOAT = 190,
+       FMT6_BPTC_FLOAT = 191,
+       FMT6_BPTC = 192,
+       FMT6_ASTC_4x4 = 193,
+       FMT6_ASTC_5x4 = 194,
+       FMT6_ASTC_5x5 = 195,
+       FMT6_ASTC_6x5 = 196,
+       FMT6_ASTC_6x6 = 197,
+       FMT6_ASTC_8x5 = 198,
+       FMT6_ASTC_8x6 = 199,
+       FMT6_ASTC_8x8 = 200,
+       FMT6_ASTC_10x5 = 201,
+       FMT6_ASTC_10x6 = 202,
+       FMT6_ASTC_10x8 = 203,
+       FMT6_ASTC_10x10 = 204,
+       FMT6_ASTC_12x10 = 205,
+       FMT6_ASTC_12x12 = 206,
+       FMT6_S8Z24_UINT = 234,
+       FMT6_NONE = 255,
 };
 
-enum a6xx_tex_fetchsize {
-       TFETCH6_1_BYTE = 0,
-       TFETCH6_2_BYTE = 1,
-       TFETCH6_4_BYTE = 2,
-       TFETCH6_8_BYTE = 3,
-       TFETCH6_16_BYTE = 4,
+enum a6xx_polygon_mode {
+       POLYMODE6_POINTS = 1,
+       POLYMODE6_LINES = 2,
+       POLYMODE6_TRIANGLES = 3,
 };
 
 enum a6xx_depth_format {
@@ -951,10 +868,50 @@ enum a6xx_cmp_perfcounter_select {
        PERF_CMPDECMP_2D_PIXELS = 39,
 };
 
+enum a6xx_2d_ifmt {
+       R2D_UNORM8 = 16,
+       R2D_INT32 = 7,
+       R2D_INT16 = 6,
+       R2D_INT8 = 5,
+       R2D_FLOAT32 = 4,
+       R2D_FLOAT16 = 3,
+       R2D_UNORM8_SRGB = 1,
+       R2D_RAW = 0,
+};
+
+enum a6xx_ztest_mode {
+       A6XX_EARLY_Z = 0,
+       A6XX_LATE_Z = 1,
+       A6XX_EARLY_LRZ_LATE_Z = 2,
+};
+
+enum a6xx_rotation {
+       ROTATE_0 = 0,
+       ROTATE_90 = 1,
+       ROTATE_180 = 2,
+       ROTATE_270 = 3,
+       ROTATE_HFLIP = 4,
+       ROTATE_VFLIP = 5,
+};
+
+enum a6xx_tess_spacing {
+       TESS_EQUAL = 0,
+       TESS_FRACTIONAL_ODD = 2,
+       TESS_FRACTIONAL_EVEN = 3,
+};
+
+enum a6xx_tess_output {
+       TESS_POINTS = 0,
+       TESS_LINES = 1,
+       TESS_CW_TRIS = 2,
+       TESS_CCW_TRIS = 3,
+};
+
 enum a6xx_tex_filter {
        A6XX_TEX_NEAREST = 0,
        A6XX_TEX_LINEAR = 1,
        A6XX_TEX_ANISO = 2,
+       A6XX_TEX_CUBIC = 3,
 };
 
 enum a6xx_tex_clamp {
@@ -973,6 +930,12 @@ enum a6xx_tex_aniso {
        A6XX_TEX_ANISO_16 = 4,
 };
 
+enum a6xx_reduction_mode {
+       A6XX_REDUCTION_MODE_AVERAGE = 0,
+       A6XX_REDUCTION_MODE_MIN = 1,
+       A6XX_REDUCTION_MODE_MAX = 2,
+};
+
 enum a6xx_tex_swiz {
        A6XX_TEX_X = 0,
        A6XX_TEX_Y = 1,
@@ -1035,6 +998,9 @@ enum a6xx_tex_type {
 
 #define REG_A6XX_CP_SQE_CNTL                                   0x00000808
 
+#define REG_A6XX_CP_CP2GMU_STATUS                              0x00000812
+#define A6XX_CP_CP2GMU_STATUS_IFPC                             0x00000001
+
 #define REG_A6XX_CP_HW_FAULT                                   0x00000821
 
 #define REG_A6XX_CP_INTERRUPT_STATUS                           0x00000823
@@ -1050,8 +1016,44 @@ enum a6xx_tex_type {
 #define REG_A6XX_CP_APRIV_CNTL                                 0x00000844
 
 #define REG_A6XX_CP_ROQ_THRESHOLDS_1                           0x000008c1
+#define A6XX_CP_ROQ_THRESHOLDS_1_RB_LO__MASK                   0x000000ff
+#define A6XX_CP_ROQ_THRESHOLDS_1_RB_LO__SHIFT                  0
+static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_RB_LO(uint32_t val)
+{
+       return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_RB_LO__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_RB_LO__MASK;
+}
+#define A6XX_CP_ROQ_THRESHOLDS_1_RB_HI__MASK                   0x0000ff00
+#define A6XX_CP_ROQ_THRESHOLDS_1_RB_HI__SHIFT                  8
+static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_RB_HI(uint32_t val)
+{
+       return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_RB_HI__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_RB_HI__MASK;
+}
+#define A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__MASK               0x00ff0000
+#define A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__SHIFT              16
+static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_IB1_START(uint32_t val)
+{
+       return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__MASK;
+}
+#define A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__MASK               0xff000000
+#define A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__SHIFT              24
+static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_IB2_START(uint32_t val)
+{
+       return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__MASK;
+}
 
 #define REG_A6XX_CP_ROQ_THRESHOLDS_2                           0x000008c2
+#define A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__MASK               0x000001ff
+#define A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__SHIFT              0
+static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_2_SDS_START(uint32_t val)
+{
+       return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__MASK;
+}
+#define A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__MASK                        0xffff0000
+#define A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__SHIFT               16
+static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE(uint32_t val)
+{
+       return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__MASK;
+}
 
 #define REG_A6XX_CP_MEM_POOL_SIZE                              0x000008c3
 
@@ -1170,6 +1172,36 @@ static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
 
 #define REG_A6XX_CP_IB2_REM_SIZE                               0x0000092d
 
+#define REG_A6XX_CP_SDS_BASE                                   0x0000092e
+
+#define REG_A6XX_CP_SDS_BASE_HI                                        0x0000092f
+
+#define REG_A6XX_CP_SDS_REM_SIZE                               0x0000092e
+
+#define REG_A6XX_CP_BIN_SIZE_ADDRESS                           0x00000931
+
+#define REG_A6XX_CP_BIN_SIZE_ADDRESS_HI                                0x00000932
+
+#define REG_A6XX_CP_BIN_DATA_ADDR                              0x00000934
+
+#define REG_A6XX_CP_BIN_DATA_ADDR_HI                           0x00000935
+
+#define REG_A6XX_CP_CSQ_IB1_STAT                               0x00000949
+#define A6XX_CP_CSQ_IB1_STAT_REM__MASK                         0xffff0000
+#define A6XX_CP_CSQ_IB1_STAT_REM__SHIFT                                16
+static inline uint32_t A6XX_CP_CSQ_IB1_STAT_REM(uint32_t val)
+{
+       return ((val) << A6XX_CP_CSQ_IB1_STAT_REM__SHIFT) & A6XX_CP_CSQ_IB1_STAT_REM__MASK;
+}
+
+#define REG_A6XX_CP_CSQ_IB2_STAT                               0x0000094a
+#define A6XX_CP_CSQ_IB2_STAT_REM__MASK                         0xffff0000
+#define A6XX_CP_CSQ_IB2_STAT_REM__SHIFT                                16
+static inline uint32_t A6XX_CP_CSQ_IB2_STAT_REM(uint32_t val)
+{
+       return ((val) << A6XX_CP_CSQ_IB2_STAT_REM__SHIFT) & A6XX_CP_CSQ_IB2_STAT_REM__MASK;
+}
+
 #define REG_A6XX_CP_ALWAYS_ON_COUNTER_LO                       0x00000980
 
 #define REG_A6XX_CP_ALWAYS_ON_COUNTER_HI                       0x00000981
@@ -1211,6 +1243,7 @@ static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
 #define A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER                 0x00000001
 
 #define REG_A6XX_RBBM_STATUS3                                  0x00000213
+#define A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT                        0x01000000
 
 #define REG_A6XX_RBBM_VBIF_GX_RESET_STATUS                     0x00000215
 
@@ -1428,18 +1461,6 @@ static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
 
 #define REG_A6XX_RBBM_PERFCTR_TSE_2_LO                         0x0000046a
 
-#define REG_A6XX_RBBM_PERFCTR_CCU_4_HI                         0x00000465
-
-#define REG_A6XX_RBBM_PERFCTR_TSE_0_LO                         0x00000466
-
-#define REG_A6XX_RBBM_PERFCTR_TSE_0_HI                         0x00000467
-
-#define REG_A6XX_RBBM_PERFCTR_TSE_1_LO                         0x00000468
-
-#define REG_A6XX_RBBM_PERFCTR_TSE_1_HI                         0x00000469
-
-#define REG_A6XX_RBBM_PERFCTR_TSE_2_LO                         0x0000046a
-
 #define REG_A6XX_RBBM_PERFCTR_TSE_2_HI                         0x0000046b
 
 #define REG_A6XX_RBBM_PERFCTR_TSE_3_LO                         0x0000046c
@@ -1752,6 +1773,50 @@ static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
 
 #define REG_A6XX_RBBM_ISDB_CNT                                 0x00000533
 
+#define REG_A6XX_RBBM_PRIMCTR_0_LO                             0x00000540
+
+#define REG_A6XX_RBBM_PRIMCTR_0_HI                             0x00000541
+
+#define REG_A6XX_RBBM_PRIMCTR_1_LO                             0x00000542
+
+#define REG_A6XX_RBBM_PRIMCTR_1_HI                             0x00000543
+
+#define REG_A6XX_RBBM_PRIMCTR_2_LO                             0x00000544
+
+#define REG_A6XX_RBBM_PRIMCTR_2_HI                             0x00000545
+
+#define REG_A6XX_RBBM_PRIMCTR_3_LO                             0x00000546
+
+#define REG_A6XX_RBBM_PRIMCTR_3_HI                             0x00000547
+
+#define REG_A6XX_RBBM_PRIMCTR_4_LO                             0x00000548
+
+#define REG_A6XX_RBBM_PRIMCTR_4_HI                             0x00000549
+
+#define REG_A6XX_RBBM_PRIMCTR_5_LO                             0x0000054a
+
+#define REG_A6XX_RBBM_PRIMCTR_5_HI                             0x0000054b
+
+#define REG_A6XX_RBBM_PRIMCTR_6_LO                             0x0000054c
+
+#define REG_A6XX_RBBM_PRIMCTR_6_HI                             0x0000054d
+
+#define REG_A6XX_RBBM_PRIMCTR_7_LO                             0x0000054e
+
+#define REG_A6XX_RBBM_PRIMCTR_7_HI                             0x0000054f
+
+#define REG_A6XX_RBBM_PRIMCTR_8_LO                             0x00000550
+
+#define REG_A6XX_RBBM_PRIMCTR_8_HI                             0x00000551
+
+#define REG_A6XX_RBBM_PRIMCTR_9_LO                             0x00000552
+
+#define REG_A6XX_RBBM_PRIMCTR_9_HI                             0x00000553
+
+#define REG_A6XX_RBBM_PRIMCTR_10_LO                            0x00000554
+
+#define REG_A6XX_RBBM_PRIMCTR_10_HI                            0x00000555
+
 #define REG_A6XX_RBBM_SECVID_TRUST_CNTL                                0x0000f400
 
 #define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO               0x0000f800
@@ -1768,6 +1833,9 @@ static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
 
 #define REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL                     0x00000011
 
+#define REG_A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD                    0x0000001c
+#define A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD_WAIT_GPU_IDLE          0x00000001
+
 #define REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL                  0x0000001f
 
 #define REG_A6XX_RBBM_INT_CLEAR_CMD                            0x00000037
@@ -1996,6 +2064,14 @@ static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
 
 #define REG_A6XX_RBBM_CLOCK_DELAY_HLSQ                         0x0000011c
 
+#define REG_A6XX_RBBM_CLOCK_HYST_HLSQ                          0x0000011d
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE                      0x00000120
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE                     0x00000121
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE                      0x00000122
+
 #define REG_A6XX_DBGC_CFG_DBGBUS_SEL_A                         0x00000600
 
 #define REG_A6XX_DBGC_CFG_DBGBUS_SEL_B                         0x00000601
@@ -2168,94 +2244,6 @@ static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
 
 #define REG_A6XX_VSC_PERFCTR_VSC_SEL_1                         0x00000cd9
 
-#define REG_A6XX_GRAS_ADDR_MODE_CNTL                           0x00008601
-
-#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_0                                0x00008610
-
-#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_1                                0x00008611
-
-#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_2                                0x00008612
-
-#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_3                                0x00008613
-
-#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_0                                0x00008614
-
-#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_1                                0x00008615
-
-#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_2                                0x00008616
-
-#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_3                                0x00008617
-
-#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_0                                0x00008618
-
-#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_1                                0x00008619
-
-#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_2                                0x0000861a
-
-#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_3                                0x0000861b
-
-#define REG_A6XX_RB_ADDR_MODE_CNTL                             0x00008e05
-
-#define REG_A6XX_RB_NC_MODE_CNTL                               0x00008e08
-
-#define REG_A6XX_RB_PERFCTR_RB_SEL_0                           0x00008e10
-
-#define REG_A6XX_RB_PERFCTR_RB_SEL_1                           0x00008e11
-
-#define REG_A6XX_RB_PERFCTR_RB_SEL_2                           0x00008e12
-
-#define REG_A6XX_RB_PERFCTR_RB_SEL_3                           0x00008e13
-
-#define REG_A6XX_RB_PERFCTR_RB_SEL_4                           0x00008e14
-
-#define REG_A6XX_RB_PERFCTR_RB_SEL_5                           0x00008e15
-
-#define REG_A6XX_RB_PERFCTR_RB_SEL_6                           0x00008e16
-
-#define REG_A6XX_RB_PERFCTR_RB_SEL_7                           0x00008e17
-
-#define REG_A6XX_RB_PERFCTR_CCU_SEL_0                          0x00008e18
-
-#define REG_A6XX_RB_PERFCTR_CCU_SEL_1                          0x00008e19
-
-#define REG_A6XX_RB_PERFCTR_CCU_SEL_2                          0x00008e1a
-
-#define REG_A6XX_RB_PERFCTR_CCU_SEL_3                          0x00008e1b
-
-#define REG_A6XX_RB_PERFCTR_CCU_SEL_4                          0x00008e1c
-
-#define REG_A6XX_RB_PERFCTR_CMP_SEL_0                          0x00008e2c
-
-#define REG_A6XX_RB_PERFCTR_CMP_SEL_1                          0x00008e2d
-
-#define REG_A6XX_RB_PERFCTR_CMP_SEL_2                          0x00008e2e
-
-#define REG_A6XX_RB_PERFCTR_CMP_SEL_3                          0x00008e2f
-
-#define REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD                   0x00008e3d
-
-#define REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE           0x00008e50
-
-#define REG_A6XX_PC_DBG_ECO_CNTL                               0x00009e00
-
-#define REG_A6XX_PC_ADDR_MODE_CNTL                             0x00009e01
-
-#define REG_A6XX_PC_PERFCTR_PC_SEL_0                           0x00009e34
-
-#define REG_A6XX_PC_PERFCTR_PC_SEL_1                           0x00009e35
-
-#define REG_A6XX_PC_PERFCTR_PC_SEL_2                           0x00009e36
-
-#define REG_A6XX_PC_PERFCTR_PC_SEL_3                           0x00009e37
-
-#define REG_A6XX_PC_PERFCTR_PC_SEL_4                           0x00009e38
-
-#define REG_A6XX_PC_PERFCTR_PC_SEL_5                           0x00009e39
-
-#define REG_A6XX_PC_PERFCTR_PC_SEL_6                           0x00009e3a
-
-#define REG_A6XX_PC_PERFCTR_PC_SEL_7                           0x00009e3b
-
 #define REG_A6XX_HLSQ_ADDR_MODE_CNTL                           0x0000be05
 
 #define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_0                       0x0000be10
@@ -2292,20 +2280,6 @@ static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
 
 #define REG_A6XX_VFD_PERFCTR_VFD_SEL_7                         0x0000a617
 
-#define REG_A6XX_VPC_ADDR_MODE_CNTL                            0x00009601
-
-#define REG_A6XX_VPC_PERFCTR_VPC_SEL_0                         0x00009604
-
-#define REG_A6XX_VPC_PERFCTR_VPC_SEL_1                         0x00009605
-
-#define REG_A6XX_VPC_PERFCTR_VPC_SEL_2                         0x00009606
-
-#define REG_A6XX_VPC_PERFCTR_VPC_SEL_3                         0x00009607
-
-#define REG_A6XX_VPC_PERFCTR_VPC_SEL_4                         0x00009608
-
-#define REG_A6XX_VPC_PERFCTR_VPC_SEL_5                         0x00009609
-
 #define REG_A6XX_UCHE_ADDR_MODE_CNTL                           0x00000e00
 
 #define REG_A6XX_UCHE_MODE_CNTL                                        0x00000e01
@@ -2581,21 +2555,6 @@ static inline uint32_t A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL(uint32_t val)
 
 #define REG_A6XX_GBIF_PWR_CNT_HIGH2                            0x00003cd1
 
-#define REG_A6XX_RB_WINDOW_OFFSET2                             0x000088d4
-#define A6XX_RB_WINDOW_OFFSET2_WINDOW_OFFSET_DISABLE           0x80000000
-#define A6XX_RB_WINDOW_OFFSET2_X__MASK                         0x00007fff
-#define A6XX_RB_WINDOW_OFFSET2_X__SHIFT                                0
-static inline uint32_t A6XX_RB_WINDOW_OFFSET2_X(uint32_t val)
-{
-       return ((val) << A6XX_RB_WINDOW_OFFSET2_X__SHIFT) & A6XX_RB_WINDOW_OFFSET2_X__MASK;
-}
-#define A6XX_RB_WINDOW_OFFSET2_Y__MASK                         0x7fff0000
-#define A6XX_RB_WINDOW_OFFSET2_Y__SHIFT                                16
-static inline uint32_t A6XX_RB_WINDOW_OFFSET2_Y(uint32_t val)
-{
-       return ((val) << A6XX_RB_WINDOW_OFFSET2_Y__SHIFT) & A6XX_RB_WINDOW_OFFSET2_Y__MASK;
-}
-
 #define REG_A6XX_SP_WINDOW_OFFSET                              0x0000b4d1
 #define A6XX_SP_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE            0x80000000
 #define A6XX_SP_WINDOW_OFFSET_X__MASK                          0x00007fff
@@ -2626,36 +2585,6 @@ static inline uint32_t A6XX_SP_TP_WINDOW_OFFSET_Y(uint32_t val)
        return ((val) << A6XX_SP_TP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_TP_WINDOW_OFFSET_Y__MASK;
 }
 
-#define REG_A6XX_GRAS_BIN_CONTROL                              0x000080a1
-#define A6XX_GRAS_BIN_CONTROL_BINW__MASK                       0x000000ff
-#define A6XX_GRAS_BIN_CONTROL_BINW__SHIFT                      0
-static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINW(uint32_t val)
-{
-       return ((val >> 5) << A6XX_GRAS_BIN_CONTROL_BINW__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINW__MASK;
-}
-#define A6XX_GRAS_BIN_CONTROL_BINH__MASK                       0x0001ff00
-#define A6XX_GRAS_BIN_CONTROL_BINH__SHIFT                      8
-static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINH(uint32_t val)
-{
-       return ((val >> 4) << A6XX_GRAS_BIN_CONTROL_BINH__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINH__MASK;
-}
-#define A6XX_GRAS_BIN_CONTROL_BINNING_PASS                     0x00040000
-#define A6XX_GRAS_BIN_CONTROL_USE_VIZ                          0x00200000
-
-#define REG_A6XX_RB_BIN_CONTROL2                               0x000088d3
-#define A6XX_RB_BIN_CONTROL2_BINW__MASK                                0x000000ff
-#define A6XX_RB_BIN_CONTROL2_BINW__SHIFT                       0
-static inline uint32_t A6XX_RB_BIN_CONTROL2_BINW(uint32_t val)
-{
-       return ((val >> 5) << A6XX_RB_BIN_CONTROL2_BINW__SHIFT) & A6XX_RB_BIN_CONTROL2_BINW__MASK;
-}
-#define A6XX_RB_BIN_CONTROL2_BINH__MASK                                0x0001ff00
-#define A6XX_RB_BIN_CONTROL2_BINH__SHIFT                       8
-static inline uint32_t A6XX_RB_BIN_CONTROL2_BINH(uint32_t val)
-{
-       return ((val >> 4) << A6XX_RB_BIN_CONTROL2_BINH__SHIFT) & A6XX_RB_BIN_CONTROL2_BINH__MASK;
-}
-
 #define REG_A6XX_VSC_BIN_SIZE                                  0x00000c02
 #define A6XX_VSC_BIN_SIZE_WIDTH__MASK                          0x000000ff
 #define A6XX_VSC_BIN_SIZE_WIDTH__SHIFT                         0
@@ -2670,9 +2599,11 @@ static inline uint32_t A6XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
        return ((val >> 4) << A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A6XX_VSC_BIN_SIZE_HEIGHT__MASK;
 }
 
-#define REG_A6XX_VSC_SIZE_ADDRESS_LO                           0x00000c03
+#define REG_A6XX_VSC_DRAW_STRM_SIZE_ADDRESS_LO                 0x00000c03
+
+#define REG_A6XX_VSC_DRAW_STRM_SIZE_ADDRESS_HI                 0x00000c04
 
-#define REG_A6XX_VSC_SIZE_ADDRESS_HI                           0x00000c04
+#define REG_A6XX_VSC_DRAW_STRM_SIZE_ADDRESS                    0x00000c03
 
 #define REG_A6XX_VSC_BIN_COUNT                                 0x00000c06
 #define A6XX_VSC_BIN_COUNT_NX__MASK                            0x000007fe
@@ -2716,114 +2647,188 @@ static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
        return ((val) << A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_H__MASK;
 }
 
-#define REG_A6XX_VSC_PIPE_DATA2_ADDRESS_LO                     0x00000c30
+#define REG_A6XX_VSC_PRIM_STRM_ADDRESS_LO                      0x00000c30
 
-#define REG_A6XX_VSC_PIPE_DATA2_ADDRESS_HI                     0x00000c31
+#define REG_A6XX_VSC_PRIM_STRM_ADDRESS_HI                      0x00000c31
 
-#define REG_A6XX_VSC_PIPE_DATA2_PITCH                          0x00000c32
+#define REG_A6XX_VSC_PRIM_STRM_ADDRESS                         0x00000c30
 
-#define REG_A6XX_VSC_PIPE_DATA2_ARRAY_PITCH                    0x00000c33
-#define A6XX_VSC_PIPE_DATA2_ARRAY_PITCH__MASK                  0xffffffff
-#define A6XX_VSC_PIPE_DATA2_ARRAY_PITCH__SHIFT                 0
-static inline uint32_t A6XX_VSC_PIPE_DATA2_ARRAY_PITCH(uint32_t val)
-{
-       return ((val >> 4) << A6XX_VSC_PIPE_DATA2_ARRAY_PITCH__SHIFT) & A6XX_VSC_PIPE_DATA2_ARRAY_PITCH__MASK;
-}
+#define REG_A6XX_VSC_PRIM_STRM_PITCH                           0x00000c32
 
-#define REG_A6XX_VSC_PIPE_DATA_ADDRESS_LO                      0x00000c34
+#define REG_A6XX_VSC_PRIM_STRM_LIMIT                           0x00000c33
 
-#define REG_A6XX_VSC_PIPE_DATA_ADDRESS_HI                      0x00000c35
+#define REG_A6XX_VSC_DRAW_STRM_ADDRESS_LO                      0x00000c34
 
-#define REG_A6XX_VSC_PIPE_DATA_PITCH                           0x00000c36
+#define REG_A6XX_VSC_DRAW_STRM_ADDRESS_HI                      0x00000c35
 
-#define REG_A6XX_VSC_PIPE_DATA_ARRAY_PITCH                     0x00000c37
-#define A6XX_VSC_PIPE_DATA_ARRAY_PITCH__MASK                   0xffffffff
-#define A6XX_VSC_PIPE_DATA_ARRAY_PITCH__SHIFT                  0
-static inline uint32_t A6XX_VSC_PIPE_DATA_ARRAY_PITCH(uint32_t val)
-{
-       return ((val >> 4) << A6XX_VSC_PIPE_DATA_ARRAY_PITCH__SHIFT) & A6XX_VSC_PIPE_DATA_ARRAY_PITCH__MASK;
-}
+#define REG_A6XX_VSC_DRAW_STRM_ADDRESS                         0x00000c34
+
+#define REG_A6XX_VSC_DRAW_STRM_PITCH                           0x00000c36
+
+#define REG_A6XX_VSC_DRAW_STRM_LIMIT                           0x00000c37
+
+static inline uint32_t REG_A6XX_VSC_STATE(uint32_t i0) { return 0x00000c38 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_STATE_REG(uint32_t i0) { return 0x00000c38 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_PRIM_STRM_SIZE(uint32_t i0) { return 0x00000c58 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_PRIM_STRM_SIZE_REG(uint32_t i0) { return 0x00000c58 + 0x1*i0; }
 
-static inline uint32_t REG_A6XX_VSC_SIZE(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
+static inline uint32_t REG_A6XX_VSC_DRAW_STRM_SIZE(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
 
-static inline uint32_t REG_A6XX_VSC_SIZE_REG(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
+static inline uint32_t REG_A6XX_VSC_DRAW_STRM_SIZE_REG(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
 
 #define REG_A6XX_UCHE_UNKNOWN_0E12                             0x00000e12
 
-#define REG_A6XX_GRAS_UNKNOWN_8000                             0x00008000
+#define REG_A6XX_GRAS_CL_CNTL                                  0x00008000
+#define A6XX_GRAS_CL_CNTL_CLIP_DISABLE                         0x00000001
+#define A6XX_GRAS_CL_CNTL_ZNEAR_CLIP_DISABLE                   0x00000002
+#define A6XX_GRAS_CL_CNTL_ZFAR_CLIP_DISABLE                    0x00000004
+#define A6XX_GRAS_CL_CNTL_UNK5                                 0x00000020
+#define A6XX_GRAS_CL_CNTL_ZERO_GB_SCALE_Z                      0x00000040
+#define A6XX_GRAS_CL_CNTL_VP_CLIP_CODE_IGNORE                  0x00000080
+#define A6XX_GRAS_CL_CNTL_VP_XFORM_DISABLE                     0x00000100
+#define A6XX_GRAS_CL_CNTL_PERSP_DIVISION_DISABLE               0x00000200
+
+#define REG_A6XX_GRAS_VS_CL_CNTL                               0x00008001
+#define A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK                   0x000000ff
+#define A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT                  0
+static inline uint32_t A6XX_GRAS_VS_CL_CNTL_CLIP_MASK(uint32_t val)
+{
+       return ((val) << A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT) & A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK;
+}
+#define A6XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK                   0x0000ff00
+#define A6XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT                  8
+static inline uint32_t A6XX_GRAS_VS_CL_CNTL_CULL_MASK(uint32_t val)
+{
+       return ((val) << A6XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT) & A6XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK;
+}
+
+#define REG_A6XX_GRAS_DS_CL_CNTL                               0x00008002
+#define A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__MASK                   0x000000ff
+#define A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__SHIFT                  0
+static inline uint32_t A6XX_GRAS_DS_CL_CNTL_CLIP_MASK(uint32_t val)
+{
+       return ((val) << A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__SHIFT) & A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__MASK;
+}
+#define A6XX_GRAS_DS_CL_CNTL_CULL_MASK__MASK                   0x0000ff00
+#define A6XX_GRAS_DS_CL_CNTL_CULL_MASK__SHIFT                  8
+static inline uint32_t A6XX_GRAS_DS_CL_CNTL_CULL_MASK(uint32_t val)
+{
+       return ((val) << A6XX_GRAS_DS_CL_CNTL_CULL_MASK__SHIFT) & A6XX_GRAS_DS_CL_CNTL_CULL_MASK__MASK;
+}
 
-#define REG_A6XX_GRAS_UNKNOWN_8001                             0x00008001
+#define REG_A6XX_GRAS_GS_CL_CNTL                               0x00008003
+#define A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__MASK                   0x000000ff
+#define A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__SHIFT                  0
+static inline uint32_t A6XX_GRAS_GS_CL_CNTL_CLIP_MASK(uint32_t val)
+{
+       return ((val) << A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__SHIFT) & A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__MASK;
+}
+#define A6XX_GRAS_GS_CL_CNTL_CULL_MASK__MASK                   0x0000ff00
+#define A6XX_GRAS_GS_CL_CNTL_CULL_MASK__SHIFT                  8
+static inline uint32_t A6XX_GRAS_GS_CL_CNTL_CULL_MASK(uint32_t val)
+{
+       return ((val) << A6XX_GRAS_GS_CL_CNTL_CULL_MASK__SHIFT) & A6XX_GRAS_GS_CL_CNTL_CULL_MASK__MASK;
+}
 
-#define REG_A6XX_GRAS_UNKNOWN_8004                             0x00008004
+#define REG_A6XX_GRAS_MAX_LAYER_INDEX                          0x00008004
 
 #define REG_A6XX_GRAS_CNTL                                     0x00008005
-#define A6XX_GRAS_CNTL_VARYING                                 0x00000001
-#define A6XX_GRAS_CNTL_UNK3                                    0x00000008
-#define A6XX_GRAS_CNTL_XCOORD                                  0x00000040
-#define A6XX_GRAS_CNTL_YCOORD                                  0x00000080
-#define A6XX_GRAS_CNTL_ZCOORD                                  0x00000100
-#define A6XX_GRAS_CNTL_WCOORD                                  0x00000200
+#define A6XX_GRAS_CNTL_IJ_PERSP_PIXEL                          0x00000001
+#define A6XX_GRAS_CNTL_IJ_PERSP_CENTROID                       0x00000002
+#define A6XX_GRAS_CNTL_IJ_PERSP_SAMPLE                         0x00000004
+#define A6XX_GRAS_CNTL_SIZE                                    0x00000008
+#define A6XX_GRAS_CNTL_UNK4                                    0x00000010
+#define A6XX_GRAS_CNTL_SIZE_PERSAMP                            0x00000020
+#define A6XX_GRAS_CNTL_COORD_MASK__MASK                                0x000003c0
+#define A6XX_GRAS_CNTL_COORD_MASK__SHIFT                       6
+static inline uint32_t A6XX_GRAS_CNTL_COORD_MASK(uint32_t val)
+{
+       return ((val) << A6XX_GRAS_CNTL_COORD_MASK__SHIFT) & A6XX_GRAS_CNTL_COORD_MASK__MASK;
+}
 
 #define REG_A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ                    0x00008006
-#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK             0x000003ff
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK             0x000001ff
 #define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT            0
 static inline uint32_t A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val)
 {
        return ((val) << A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK;
 }
-#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK             0x000ffc00
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK             0x0007fc00
 #define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT            10
 static inline uint32_t A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val)
 {
        return ((val) << A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK;
 }
 
-#define REG_A6XX_GRAS_CL_VPORT_XOFFSET_0                       0x00008010
-#define A6XX_GRAS_CL_VPORT_XOFFSET_0__MASK                     0xffffffff
-#define A6XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT                    0
-static inline uint32_t A6XX_GRAS_CL_VPORT_XOFFSET_0(float val)
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT(uint32_t i0) { return 0x00008010 + 0x6*i0; }
+
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT_XOFFSET(uint32_t i0) { return 0x00008010 + 0x6*i0; }
+#define A6XX_GRAS_CL_VPORT_XOFFSET__MASK                       0xffffffff
+#define A6XX_GRAS_CL_VPORT_XOFFSET__SHIFT                      0
+static inline uint32_t A6XX_GRAS_CL_VPORT_XOFFSET(float val)
+{
+       return ((fui(val)) << A6XX_GRAS_CL_VPORT_XOFFSET__SHIFT) & A6XX_GRAS_CL_VPORT_XOFFSET__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT_XSCALE(uint32_t i0) { return 0x00008011 + 0x6*i0; }
+#define A6XX_GRAS_CL_VPORT_XSCALE__MASK                                0xffffffff
+#define A6XX_GRAS_CL_VPORT_XSCALE__SHIFT                       0
+static inline uint32_t A6XX_GRAS_CL_VPORT_XSCALE(float val)
 {
-       return ((fui(val)) << A6XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A6XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
+       return ((fui(val)) << A6XX_GRAS_CL_VPORT_XSCALE__SHIFT) & A6XX_GRAS_CL_VPORT_XSCALE__MASK;
 }
 
-#define REG_A6XX_GRAS_CL_VPORT_XSCALE_0                                0x00008011
-#define A6XX_GRAS_CL_VPORT_XSCALE_0__MASK                      0xffffffff
-#define A6XX_GRAS_CL_VPORT_XSCALE_0__SHIFT                     0
-static inline uint32_t A6XX_GRAS_CL_VPORT_XSCALE_0(float val)
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT_YOFFSET(uint32_t i0) { return 0x00008012 + 0x6*i0; }
+#define A6XX_GRAS_CL_VPORT_YOFFSET__MASK                       0xffffffff
+#define A6XX_GRAS_CL_VPORT_YOFFSET__SHIFT                      0
+static inline uint32_t A6XX_GRAS_CL_VPORT_YOFFSET(float val)
 {
-       return ((fui(val)) << A6XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A6XX_GRAS_CL_VPORT_XSCALE_0__MASK;
+       return ((fui(val)) << A6XX_GRAS_CL_VPORT_YOFFSET__SHIFT) & A6XX_GRAS_CL_VPORT_YOFFSET__MASK;
 }
 
-#define REG_A6XX_GRAS_CL_VPORT_YOFFSET_0                       0x00008012
-#define A6XX_GRAS_CL_VPORT_YOFFSET_0__MASK                     0xffffffff
-#define A6XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT                    0
-static inline uint32_t A6XX_GRAS_CL_VPORT_YOFFSET_0(float val)
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT_YSCALE(uint32_t i0) { return 0x00008013 + 0x6*i0; }
+#define A6XX_GRAS_CL_VPORT_YSCALE__MASK                                0xffffffff
+#define A6XX_GRAS_CL_VPORT_YSCALE__SHIFT                       0
+static inline uint32_t A6XX_GRAS_CL_VPORT_YSCALE(float val)
 {
-       return ((fui(val)) << A6XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A6XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
+       return ((fui(val)) << A6XX_GRAS_CL_VPORT_YSCALE__SHIFT) & A6XX_GRAS_CL_VPORT_YSCALE__MASK;
 }
 
-#define REG_A6XX_GRAS_CL_VPORT_YSCALE_0                                0x00008013
-#define A6XX_GRAS_CL_VPORT_YSCALE_0__MASK                      0xffffffff
-#define A6XX_GRAS_CL_VPORT_YSCALE_0__SHIFT                     0
-static inline uint32_t A6XX_GRAS_CL_VPORT_YSCALE_0(float val)
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT_ZOFFSET(uint32_t i0) { return 0x00008014 + 0x6*i0; }
+#define A6XX_GRAS_CL_VPORT_ZOFFSET__MASK                       0xffffffff
+#define A6XX_GRAS_CL_VPORT_ZOFFSET__SHIFT                      0
+static inline uint32_t A6XX_GRAS_CL_VPORT_ZOFFSET(float val)
 {
-       return ((fui(val)) << A6XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A6XX_GRAS_CL_VPORT_YSCALE_0__MASK;
+       return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZOFFSET__SHIFT) & A6XX_GRAS_CL_VPORT_ZOFFSET__MASK;
 }
 
-#define REG_A6XX_GRAS_CL_VPORT_ZOFFSET_0                       0x00008014
-#define A6XX_GRAS_CL_VPORT_ZOFFSET_0__MASK                     0xffffffff
-#define A6XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT                    0
-static inline uint32_t A6XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT_ZSCALE(uint32_t i0) { return 0x00008015 + 0x6*i0; }
+#define A6XX_GRAS_CL_VPORT_ZSCALE__MASK                                0xffffffff
+#define A6XX_GRAS_CL_VPORT_ZSCALE__SHIFT                       0
+static inline uint32_t A6XX_GRAS_CL_VPORT_ZSCALE(float val)
+{
+       return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZSCALE__SHIFT) & A6XX_GRAS_CL_VPORT_ZSCALE__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_CL_Z_CLAMP(uint32_t i0) { return 0x00008070 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_GRAS_CL_Z_CLAMP_MIN(uint32_t i0) { return 0x00008070 + 0x2*i0; }
+#define A6XX_GRAS_CL_Z_CLAMP_MIN__MASK                         0xffffffff
+#define A6XX_GRAS_CL_Z_CLAMP_MIN__SHIFT                                0
+static inline uint32_t A6XX_GRAS_CL_Z_CLAMP_MIN(float val)
 {
-       return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A6XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
+       return ((fui(val)) << A6XX_GRAS_CL_Z_CLAMP_MIN__SHIFT) & A6XX_GRAS_CL_Z_CLAMP_MIN__MASK;
 }
 
-#define REG_A6XX_GRAS_CL_VPORT_ZSCALE_0                                0x00008015
-#define A6XX_GRAS_CL_VPORT_ZSCALE_0__MASK                      0xffffffff
-#define A6XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT                     0
-static inline uint32_t A6XX_GRAS_CL_VPORT_ZSCALE_0(float val)
+static inline uint32_t REG_A6XX_GRAS_CL_Z_CLAMP_MAX(uint32_t i0) { return 0x00008071 + 0x2*i0; }
+#define A6XX_GRAS_CL_Z_CLAMP_MAX__MASK                         0xffffffff
+#define A6XX_GRAS_CL_Z_CLAMP_MAX__SHIFT                                0
+static inline uint32_t A6XX_GRAS_CL_Z_CLAMP_MAX(float val)
 {
-       return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A6XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
+       return ((fui(val)) << A6XX_GRAS_CL_Z_CLAMP_MAX__SHIFT) & A6XX_GRAS_CL_Z_CLAMP_MAX__MASK;
 }
 
 #define REG_A6XX_GRAS_SU_CNTL                                  0x00008090
@@ -2837,7 +2842,19 @@ static inline uint32_t A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(float val)
        return ((((int32_t)(val * 4.0))) << A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT) & A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
 }
 #define A6XX_GRAS_SU_CNTL_POLY_OFFSET                          0x00000800
+#define A6XX_GRAS_SU_CNTL_UNK12__MASK                          0x00001000
+#define A6XX_GRAS_SU_CNTL_UNK12__SHIFT                         12
+static inline uint32_t A6XX_GRAS_SU_CNTL_UNK12(uint32_t val)
+{
+       return ((val) << A6XX_GRAS_SU_CNTL_UNK12__SHIFT) & A6XX_GRAS_SU_CNTL_UNK12__MASK;
+}
 #define A6XX_GRAS_SU_CNTL_MSAA_ENABLE                          0x00002000
+#define A6XX_GRAS_SU_CNTL_UNK15__MASK                          0x007f8000
+#define A6XX_GRAS_SU_CNTL_UNK15__SHIFT                         15
+static inline uint32_t A6XX_GRAS_SU_CNTL_UNK15(uint32_t val)
+{
+       return ((val) << A6XX_GRAS_SU_CNTL_UNK15__SHIFT) & A6XX_GRAS_SU_CNTL_UNK15__MASK;
+}
 
 #define REG_A6XX_GRAS_SU_POINT_MINMAX                          0x00008091
 #define A6XX_GRAS_SU_POINT_MINMAX_MIN__MASK                    0x0000ffff
@@ -2854,7 +2871,7 @@ static inline uint32_t A6XX_GRAS_SU_POINT_MINMAX_MAX(float val)
 }
 
 #define REG_A6XX_GRAS_SU_POINT_SIZE                            0x00008092
-#define A6XX_GRAS_SU_POINT_SIZE__MASK                          0xffffffff
+#define A6XX_GRAS_SU_POINT_SIZE__MASK                          0x0000ffff
 #define A6XX_GRAS_SU_POINT_SIZE__SHIFT                         0
 static inline uint32_t A6XX_GRAS_SU_POINT_SIZE(float val)
 {
@@ -2862,7 +2879,12 @@ static inline uint32_t A6XX_GRAS_SU_POINT_SIZE(float val)
 }
 
 #define REG_A6XX_GRAS_SU_DEPTH_PLANE_CNTL                      0x00008094
-#define A6XX_GRAS_SU_DEPTH_PLANE_CNTL_FRAG_WRITES_Z            0x00000001
+#define A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__MASK             0x00000003
+#define A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__SHIFT            0
+static inline uint32_t A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE(enum a6xx_ztest_mode val)
+{
+       return ((val) << A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__SHIFT) & A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__MASK;
+}
 
 #define REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE                     0x00008095
 #define A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK                   0xffffffff
@@ -2895,13 +2917,65 @@ static inline uint32_t A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_dep
 {
        return ((val) << A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
 }
+#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3__MASK              0x00000008
+#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3__SHIFT             3
+static inline uint32_t A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3(uint32_t val)
+{
+       return ((val) << A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3__SHIFT) & A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3__MASK;
+}
 
 #define REG_A6XX_GRAS_UNKNOWN_8099                             0x00008099
 
-#define REG_A6XX_GRAS_UNKNOWN_809B                             0x0000809b
+#define REG_A6XX_GRAS_UNKNOWN_809A                             0x0000809a
+
+#define REG_A6XX_GRAS_VS_LAYER_CNTL                            0x0000809b
+#define A6XX_GRAS_VS_LAYER_CNTL_WRITES_LAYER                   0x00000001
+#define A6XX_GRAS_VS_LAYER_CNTL_WRITES_VIEW                    0x00000002
+
+#define REG_A6XX_GRAS_GS_LAYER_CNTL                            0x0000809c
+#define A6XX_GRAS_GS_LAYER_CNTL_WRITES_LAYER                   0x00000001
+#define A6XX_GRAS_GS_LAYER_CNTL_WRITES_VIEW                    0x00000002
+
+#define REG_A6XX_GRAS_DS_LAYER_CNTL                            0x0000809d
+#define A6XX_GRAS_DS_LAYER_CNTL_WRITES_LAYER                   0x00000001
+#define A6XX_GRAS_DS_LAYER_CNTL_WRITES_VIEW                    0x00000002
 
 #define REG_A6XX_GRAS_UNKNOWN_80A0                             0x000080a0
 
+#define REG_A6XX_GRAS_BIN_CONTROL                              0x000080a1
+#define A6XX_GRAS_BIN_CONTROL_BINW__MASK                       0x0000003f
+#define A6XX_GRAS_BIN_CONTROL_BINW__SHIFT                      0
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINW(uint32_t val)
+{
+       return ((val >> 5) << A6XX_GRAS_BIN_CONTROL_BINW__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINW__MASK;
+}
+#define A6XX_GRAS_BIN_CONTROL_BINH__MASK                       0x00007f00
+#define A6XX_GRAS_BIN_CONTROL_BINH__SHIFT                      8
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINH(uint32_t val)
+{
+       return ((val >> 4) << A6XX_GRAS_BIN_CONTROL_BINH__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINH__MASK;
+}
+#define A6XX_GRAS_BIN_CONTROL_BINNING_PASS                     0x00040000
+#define A6XX_GRAS_BIN_CONTROL_UNK19__MASK                      0x00080000
+#define A6XX_GRAS_BIN_CONTROL_UNK19__SHIFT                     19
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_UNK19(uint32_t val)
+{
+       return ((val) << A6XX_GRAS_BIN_CONTROL_UNK19__SHIFT) & A6XX_GRAS_BIN_CONTROL_UNK19__MASK;
+}
+#define A6XX_GRAS_BIN_CONTROL_UNK20__MASK                      0x00100000
+#define A6XX_GRAS_BIN_CONTROL_UNK20__SHIFT                     20
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_UNK20(uint32_t val)
+{
+       return ((val) << A6XX_GRAS_BIN_CONTROL_UNK20__SHIFT) & A6XX_GRAS_BIN_CONTROL_UNK20__MASK;
+}
+#define A6XX_GRAS_BIN_CONTROL_USE_VIZ                          0x00200000
+#define A6XX_GRAS_BIN_CONTROL_UNK22__MASK                      0x0fc00000
+#define A6XX_GRAS_BIN_CONTROL_UNK22__SHIFT                     22
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_UNK22(uint32_t val)
+{
+       return ((val) << A6XX_GRAS_BIN_CONTROL_UNK22__SHIFT) & A6XX_GRAS_BIN_CONTROL_UNK22__MASK;
+}
+
 #define REG_A6XX_GRAS_RAS_MSAA_CNTL                            0x000080a2
 #define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK                  0x00000003
 #define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT                 0
@@ -2909,6 +2983,18 @@ static inline uint32_t A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples va
 {
        return ((val) << A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK;
 }
+#define A6XX_GRAS_RAS_MSAA_CNTL_UNK2__MASK                     0x00000004
+#define A6XX_GRAS_RAS_MSAA_CNTL_UNK2__SHIFT                    2
+static inline uint32_t A6XX_GRAS_RAS_MSAA_CNTL_UNK2(uint32_t val)
+{
+       return ((val) << A6XX_GRAS_RAS_MSAA_CNTL_UNK2__SHIFT) & A6XX_GRAS_RAS_MSAA_CNTL_UNK2__MASK;
+}
+#define A6XX_GRAS_RAS_MSAA_CNTL_UNK3__MASK                     0x00000008
+#define A6XX_GRAS_RAS_MSAA_CNTL_UNK3__SHIFT                    3
+static inline uint32_t A6XX_GRAS_RAS_MSAA_CNTL_UNK3(uint32_t val)
+{
+       return ((val) << A6XX_GRAS_RAS_MSAA_CNTL_UNK3__SHIFT) & A6XX_GRAS_RAS_MSAA_CNTL_UNK3__MASK;
+}
 
 #define REG_A6XX_GRAS_DEST_MSAA_CNTL                           0x000080a3
 #define A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__MASK                 0x00000003
@@ -2919,83 +3005,180 @@ static inline uint32_t A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples v
 }
 #define A6XX_GRAS_DEST_MSAA_CNTL_MSAA_DISABLE                  0x00000004
 
-#define REG_A6XX_GRAS_UNKNOWN_80A4                             0x000080a4
+#define REG_A6XX_GRAS_SAMPLE_CONFIG                            0x000080a4
+#define A6XX_GRAS_SAMPLE_CONFIG_UNK0                           0x00000001
+#define A6XX_GRAS_SAMPLE_CONFIG_LOCATION_ENABLE                        0x00000002
 
-#define REG_A6XX_GRAS_UNKNOWN_80A5                             0x000080a5
+#define REG_A6XX_GRAS_SAMPLE_LOCATION_0                                0x000080a5
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK           0x0000000f
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT          0
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK           0x000000f0
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT          4
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK           0x00000f00
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT          8
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK           0x0000f000
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT          12
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK           0x000f0000
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT          16
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK           0x00f00000
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT          20
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK           0x0f000000
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT          24
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK           0xf0000000
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT          28
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK;
+}
 
-#define REG_A6XX_GRAS_UNKNOWN_80A6                             0x000080a6
+#define REG_A6XX_GRAS_SAMPLE_LOCATION_1                                0x000080a6
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK           0x0000000f
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT          0
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK           0x000000f0
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT          4
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK           0x00000f00
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT          8
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK           0x0000f000
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT          12
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK           0x000f0000
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT          16
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK           0x00f00000
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT          20
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK           0x0f000000
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT          24
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK           0xf0000000
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT          28
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK;
+}
 
 #define REG_A6XX_GRAS_UNKNOWN_80AF                             0x000080af
 
-#define REG_A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0                   0x000080b0
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK               0x00007fff
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT              0
-static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(uint32_t val)
+static inline uint32_t REG_A6XX_GRAS_SC_SCREEN_SCISSOR(uint32_t i0) { return 0x000080b0 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_GRAS_SC_SCREEN_SCISSOR_TL(uint32_t i0) { return 0x000080b0 + 0x2*i0; }
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK                 0x0000ffff
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT                        0
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
 {
-       return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK;
+       return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK;
 }
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK               0x7fff0000
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT              16
-static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(uint32_t val)
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK                 0xffff0000
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT                        16
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
 {
-       return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK;
+       return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK;
 }
 
-#define REG_A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0                   0x000080b1
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK               0x00007fff
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT              0
-static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X(uint32_t val)
+static inline uint32_t REG_A6XX_GRAS_SC_SCREEN_SCISSOR_BR(uint32_t i0) { return 0x000080b1 + 0x2*i0; }
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK                 0x0000ffff
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT                        0
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
 {
-       return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK;
+       return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK;
 }
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK               0x7fff0000
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT              16
-static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y(uint32_t val)
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK                 0xffff0000
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT                        16
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
 {
-       return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK;
+       return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK;
 }
 
-#define REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0                 0x000080d0
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE       0x80000000
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK             0x00007fff
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT            0
-static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(uint32_t val)
+static inline uint32_t REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR(uint32_t i0) { return 0x000080d0 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL(uint32_t i0) { return 0x000080d0 + 0x2*i0; }
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__MASK               0x0000ffff
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__SHIFT              0
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X(uint32_t val)
 {
-       return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK;
+       return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__MASK;
 }
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK             0x7fff0000
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT            16
-static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(uint32_t val)
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__MASK               0xffff0000
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__SHIFT              16
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y(uint32_t val)
 {
-       return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK;
+       return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__MASK;
 }
 
-#define REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0                 0x000080d1
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE       0x80000000
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK             0x00007fff
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT            0
-static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X(uint32_t val)
+static inline uint32_t REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR(uint32_t i0) { return 0x000080d1 + 0x2*i0; }
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__MASK               0x0000ffff
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__SHIFT              0
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X(uint32_t val)
 {
-       return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK;
+       return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__MASK;
 }
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK             0x7fff0000
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT            16
-static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y(uint32_t val)
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__MASK               0xffff0000
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__SHIFT              16
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y(uint32_t val)
 {
-       return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK;
+       return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__MASK;
 }
 
 #define REG_A6XX_GRAS_SC_WINDOW_SCISSOR_TL                     0x000080f0
-#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE   0x80000000
-#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK                 0x00007fff
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK                 0x00003fff
 #define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT                        0
 static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
 {
        return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
 }
-#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK                 0x7fff0000
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK                 0x3fff0000
 #define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT                        16
 static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
 {
@@ -3003,14 +3186,13 @@ static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
 }
 
 #define REG_A6XX_GRAS_SC_WINDOW_SCISSOR_BR                     0x000080f1
-#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE   0x80000000
-#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK                 0x00007fff
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK                 0x00003fff
 #define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT                        0
 static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
 {
        return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
 }
-#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK                 0x7fff0000
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK                 0x3fff0000
 #define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT                        16
 static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
 {
@@ -3021,15 +3203,21 @@ static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
 #define A6XX_GRAS_LRZ_CNTL_ENABLE                              0x00000001
 #define A6XX_GRAS_LRZ_CNTL_LRZ_WRITE                           0x00000002
 #define A6XX_GRAS_LRZ_CNTL_GREATER                             0x00000004
-#define A6XX_GRAS_LRZ_CNTL_UNK3                                        0x00000008
-#define A6XX_GRAS_LRZ_CNTL_UNK4                                        0x00000010
+#define A6XX_GRAS_LRZ_CNTL_FC_ENABLE                           0x00000008
+#define A6XX_GRAS_LRZ_CNTL_Z_TEST_ENABLE                       0x00000010
+#define A6XX_GRAS_LRZ_CNTL_UNK5__MASK                          0x000003e0
+#define A6XX_GRAS_LRZ_CNTL_UNK5__SHIFT                         5
+static inline uint32_t A6XX_GRAS_LRZ_CNTL_UNK5(uint32_t val)
+{
+       return ((val) << A6XX_GRAS_LRZ_CNTL_UNK5__SHIFT) & A6XX_GRAS_LRZ_CNTL_UNK5__MASK;
+}
 
 #define REG_A6XX_GRAS_UNKNOWN_8101                             0x00008101
 
 #define REG_A6XX_GRAS_2D_BLIT_INFO                             0x00008102
 #define A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__MASK              0x000000ff
 #define A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__SHIFT             0
-static inline uint32_t A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+static inline uint32_t A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT(enum a6xx_format val)
 {
        return ((val) << A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__SHIFT) & A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__MASK;
 }
@@ -3038,78 +3226,128 @@ static inline uint32_t A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT(enum a6xx_color_fmt v
 
 #define REG_A6XX_GRAS_LRZ_BUFFER_BASE_HI                       0x00008104
 
+#define REG_A6XX_GRAS_LRZ_BUFFER_BASE                          0x00008103
+#define A6XX_GRAS_LRZ_BUFFER_BASE__MASK                                0xffffffff
+#define A6XX_GRAS_LRZ_BUFFER_BASE__SHIFT                       0
+static inline uint32_t A6XX_GRAS_LRZ_BUFFER_BASE(uint32_t val)
+{
+       return ((val) << A6XX_GRAS_LRZ_BUFFER_BASE__SHIFT) & A6XX_GRAS_LRZ_BUFFER_BASE__MASK;
+}
+
 #define REG_A6XX_GRAS_LRZ_BUFFER_PITCH                         0x00008105
-#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK                 0x000007ff
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK                 0x000000ff
 #define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT                        0
 static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH(uint32_t val)
 {
        return ((val >> 5) << A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK;
 }
-#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK           0x003ff800
-#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT          11
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK           0x1ffffc00
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT          10
 static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
 {
-       return ((val >> 5) << A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK;
+       return ((val >> 4) << A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK;
 }
 
 #define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO            0x00008106
 
 #define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI            0x00008107
 
-#define REG_A6XX_GRAS_UNKNOWN_8109                             0x00008109
+#define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE               0x00008106
+#define A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__MASK             0xffffffff
+#define A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__SHIFT            0
+static inline uint32_t A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(uint32_t val)
+{
+       return ((val) << A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__SHIFT) & A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__MASK;
+}
+
+#define REG_A6XX_GRAS_SAMPLE_CNTL                              0x00008109
+#define A6XX_GRAS_SAMPLE_CNTL_PER_SAMP_MODE                    0x00000001
+
+#define REG_A6XX_GRAS_UNKNOWN_810A                             0x0000810a
+#define A6XX_GRAS_UNKNOWN_810A_UNK0__MASK                      0x000007ff
+#define A6XX_GRAS_UNKNOWN_810A_UNK0__SHIFT                     0
+static inline uint32_t A6XX_GRAS_UNKNOWN_810A_UNK0(uint32_t val)
+{
+       return ((val) << A6XX_GRAS_UNKNOWN_810A_UNK0__SHIFT) & A6XX_GRAS_UNKNOWN_810A_UNK0__MASK;
+}
+#define A6XX_GRAS_UNKNOWN_810A_UNK16__MASK                     0x07ff0000
+#define A6XX_GRAS_UNKNOWN_810A_UNK16__SHIFT                    16
+static inline uint32_t A6XX_GRAS_UNKNOWN_810A_UNK16(uint32_t val)
+{
+       return ((val) << A6XX_GRAS_UNKNOWN_810A_UNK16__SHIFT) & A6XX_GRAS_UNKNOWN_810A_UNK16__MASK;
+}
+#define A6XX_GRAS_UNKNOWN_810A_UNK28__MASK                     0xf0000000
+#define A6XX_GRAS_UNKNOWN_810A_UNK28__SHIFT                    28
+static inline uint32_t A6XX_GRAS_UNKNOWN_810A_UNK28(uint32_t val)
+{
+       return ((val) << A6XX_GRAS_UNKNOWN_810A_UNK28__SHIFT) & A6XX_GRAS_UNKNOWN_810A_UNK28__MASK;
+}
 
 #define REG_A6XX_GRAS_UNKNOWN_8110                             0x00008110
 
 #define REG_A6XX_GRAS_2D_BLIT_CNTL                             0x00008400
+#define A6XX_GRAS_2D_BLIT_CNTL_ROTATE__MASK                    0x00000007
+#define A6XX_GRAS_2D_BLIT_CNTL_ROTATE__SHIFT                   0
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_ROTATE(enum a6xx_rotation val)
+{
+       return ((val) << A6XX_GRAS_2D_BLIT_CNTL_ROTATE__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_ROTATE__MASK;
+}
+#define A6XX_GRAS_2D_BLIT_CNTL_UNK3__MASK                      0x00000078
+#define A6XX_GRAS_2D_BLIT_CNTL_UNK3__SHIFT                     3
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_UNK3(uint32_t val)
+{
+       return ((val) << A6XX_GRAS_2D_BLIT_CNTL_UNK3__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_UNK3__MASK;
+}
+#define A6XX_GRAS_2D_BLIT_CNTL_SOLID_COLOR                     0x00000080
 #define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK              0x0000ff00
 #define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT             8
-static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_color_fmt val)
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_format val)
 {
        return ((val) << A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
 }
 #define A6XX_GRAS_2D_BLIT_CNTL_SCISSOR                         0x00010000
-
-#define REG_A6XX_GRAS_2D_SRC_TL_X                              0x00008401
-#define A6XX_GRAS_2D_SRC_TL_X_X__MASK                          0x00ffff00
-#define A6XX_GRAS_2D_SRC_TL_X_X__SHIFT                         8
-static inline uint32_t A6XX_GRAS_2D_SRC_TL_X_X(uint32_t val)
+#define A6XX_GRAS_2D_BLIT_CNTL_UNK17__MASK                     0x00060000
+#define A6XX_GRAS_2D_BLIT_CNTL_UNK17__SHIFT                    17
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_UNK17(uint32_t val)
 {
-       return ((val) << A6XX_GRAS_2D_SRC_TL_X_X__SHIFT) & A6XX_GRAS_2D_SRC_TL_X_X__MASK;
+       return ((val) << A6XX_GRAS_2D_BLIT_CNTL_UNK17__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_UNK17__MASK;
 }
-
-#define REG_A6XX_GRAS_2D_SRC_BR_X                              0x00008402
-#define A6XX_GRAS_2D_SRC_BR_X_X__MASK                          0x00ffff00
-#define A6XX_GRAS_2D_SRC_BR_X_X__SHIFT                         8
-static inline uint32_t A6XX_GRAS_2D_SRC_BR_X_X(uint32_t val)
+#define A6XX_GRAS_2D_BLIT_CNTL_D24S8                           0x00080000
+#define A6XX_GRAS_2D_BLIT_CNTL_MASK__MASK                      0x00f00000
+#define A6XX_GRAS_2D_BLIT_CNTL_MASK__SHIFT                     20
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_MASK(uint32_t val)
 {
-       return ((val) << A6XX_GRAS_2D_SRC_BR_X_X__SHIFT) & A6XX_GRAS_2D_SRC_BR_X_X__MASK;
+       return ((val) << A6XX_GRAS_2D_BLIT_CNTL_MASK__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_MASK__MASK;
 }
-
-#define REG_A6XX_GRAS_2D_SRC_TL_Y                              0x00008403
-#define A6XX_GRAS_2D_SRC_TL_Y_Y__MASK                          0x00ffff00
-#define A6XX_GRAS_2D_SRC_TL_Y_Y__SHIFT                         8
-static inline uint32_t A6XX_GRAS_2D_SRC_TL_Y_Y(uint32_t val)
+#define A6XX_GRAS_2D_BLIT_CNTL_IFMT__MASK                      0x1f000000
+#define A6XX_GRAS_2D_BLIT_CNTL_IFMT__SHIFT                     24
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_IFMT(enum a6xx_2d_ifmt val)
 {
-       return ((val) << A6XX_GRAS_2D_SRC_TL_Y_Y__SHIFT) & A6XX_GRAS_2D_SRC_TL_Y_Y__MASK;
+       return ((val) << A6XX_GRAS_2D_BLIT_CNTL_IFMT__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_IFMT__MASK;
 }
-
-#define REG_A6XX_GRAS_2D_SRC_BR_Y                              0x00008404
-#define A6XX_GRAS_2D_SRC_BR_Y_Y__MASK                          0x00ffff00
-#define A6XX_GRAS_2D_SRC_BR_Y_Y__SHIFT                         8
-static inline uint32_t A6XX_GRAS_2D_SRC_BR_Y_Y(uint32_t val)
+#define A6XX_GRAS_2D_BLIT_CNTL_UNK29__MASK                     0x20000000
+#define A6XX_GRAS_2D_BLIT_CNTL_UNK29__SHIFT                    29
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_UNK29(uint32_t val)
 {
-       return ((val) << A6XX_GRAS_2D_SRC_BR_Y_Y__SHIFT) & A6XX_GRAS_2D_SRC_BR_Y_Y__MASK;
+       return ((val) << A6XX_GRAS_2D_BLIT_CNTL_UNK29__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_UNK29__MASK;
 }
 
+#define REG_A6XX_GRAS_2D_SRC_TL_X                              0x00008401
+
+#define REG_A6XX_GRAS_2D_SRC_BR_X                              0x00008402
+
+#define REG_A6XX_GRAS_2D_SRC_TL_Y                              0x00008403
+
+#define REG_A6XX_GRAS_2D_SRC_BR_Y                              0x00008404
+
 #define REG_A6XX_GRAS_2D_DST_TL                                        0x00008405
-#define A6XX_GRAS_2D_DST_TL_WINDOW_OFFSET_DISABLE              0x80000000
-#define A6XX_GRAS_2D_DST_TL_X__MASK                            0x00007fff
+#define A6XX_GRAS_2D_DST_TL_X__MASK                            0x00003fff
 #define A6XX_GRAS_2D_DST_TL_X__SHIFT                           0
 static inline uint32_t A6XX_GRAS_2D_DST_TL_X(uint32_t val)
 {
        return ((val) << A6XX_GRAS_2D_DST_TL_X__SHIFT) & A6XX_GRAS_2D_DST_TL_X__MASK;
 }
-#define A6XX_GRAS_2D_DST_TL_Y__MASK                            0x7fff0000
+#define A6XX_GRAS_2D_DST_TL_Y__MASK                            0x3fff0000
 #define A6XX_GRAS_2D_DST_TL_Y__SHIFT                           16
 static inline uint32_t A6XX_GRAS_2D_DST_TL_Y(uint32_t val)
 {
@@ -3117,71 +3355,131 @@ static inline uint32_t A6XX_GRAS_2D_DST_TL_Y(uint32_t val)
 }
 
 #define REG_A6XX_GRAS_2D_DST_BR                                        0x00008406
-#define A6XX_GRAS_2D_DST_BR_WINDOW_OFFSET_DISABLE              0x80000000
-#define A6XX_GRAS_2D_DST_BR_X__MASK                            0x00007fff
+#define A6XX_GRAS_2D_DST_BR_X__MASK                            0x00003fff
 #define A6XX_GRAS_2D_DST_BR_X__SHIFT                           0
 static inline uint32_t A6XX_GRAS_2D_DST_BR_X(uint32_t val)
 {
        return ((val) << A6XX_GRAS_2D_DST_BR_X__SHIFT) & A6XX_GRAS_2D_DST_BR_X__MASK;
 }
-#define A6XX_GRAS_2D_DST_BR_Y__MASK                            0x7fff0000
+#define A6XX_GRAS_2D_DST_BR_Y__MASK                            0x3fff0000
 #define A6XX_GRAS_2D_DST_BR_Y__SHIFT                           16
 static inline uint32_t A6XX_GRAS_2D_DST_BR_Y(uint32_t val)
 {
        return ((val) << A6XX_GRAS_2D_DST_BR_Y__SHIFT) & A6XX_GRAS_2D_DST_BR_Y__MASK;
 }
 
-#define REG_A6XX_GRAS_RESOLVE_CNTL_1                           0x0000840a
-#define A6XX_GRAS_RESOLVE_CNTL_1_WINDOW_OFFSET_DISABLE         0x80000000
-#define A6XX_GRAS_RESOLVE_CNTL_1_X__MASK                       0x00007fff
-#define A6XX_GRAS_RESOLVE_CNTL_1_X__SHIFT                      0
-static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_1_X(uint32_t val)
+#define REG_A6XX_GRAS_2D_UNKNOWN_8407                          0x00008407
+
+#define REG_A6XX_GRAS_2D_UNKNOWN_8408                          0x00008408
+
+#define REG_A6XX_GRAS_2D_UNKNOWN_8409                          0x00008409
+
+#define REG_A6XX_GRAS_2D_RESOLVE_CNTL_1                                0x0000840a
+#define A6XX_GRAS_2D_RESOLVE_CNTL_1_X__MASK                    0x00003fff
+#define A6XX_GRAS_2D_RESOLVE_CNTL_1_X__SHIFT                   0
+static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_1_X(uint32_t val)
 {
-       return ((val) << A6XX_GRAS_RESOLVE_CNTL_1_X__SHIFT) & A6XX_GRAS_RESOLVE_CNTL_1_X__MASK;
+       return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_1_X__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_1_X__MASK;
 }
-#define A6XX_GRAS_RESOLVE_CNTL_1_Y__MASK                       0x7fff0000
-#define A6XX_GRAS_RESOLVE_CNTL_1_Y__SHIFT                      16
-static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_1_Y(uint32_t val)
+#define A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__MASK                    0x3fff0000
+#define A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__SHIFT                   16
+static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_1_Y(uint32_t val)
 {
-       return ((val) << A6XX_GRAS_RESOLVE_CNTL_1_Y__SHIFT) & A6XX_GRAS_RESOLVE_CNTL_1_Y__MASK;
+       return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__MASK;
 }
 
-#define REG_A6XX_GRAS_RESOLVE_CNTL_2                           0x0000840b
-#define A6XX_GRAS_RESOLVE_CNTL_2_WINDOW_OFFSET_DISABLE         0x80000000
-#define A6XX_GRAS_RESOLVE_CNTL_2_X__MASK                       0x00007fff
-#define A6XX_GRAS_RESOLVE_CNTL_2_X__SHIFT                      0
-static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_2_X(uint32_t val)
+#define REG_A6XX_GRAS_2D_RESOLVE_CNTL_2                                0x0000840b
+#define A6XX_GRAS_2D_RESOLVE_CNTL_2_X__MASK                    0x00003fff
+#define A6XX_GRAS_2D_RESOLVE_CNTL_2_X__SHIFT                   0
+static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_2_X(uint32_t val)
 {
-       return ((val) << A6XX_GRAS_RESOLVE_CNTL_2_X__SHIFT) & A6XX_GRAS_RESOLVE_CNTL_2_X__MASK;
+       return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_2_X__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_2_X__MASK;
 }
-#define A6XX_GRAS_RESOLVE_CNTL_2_Y__MASK                       0x7fff0000
-#define A6XX_GRAS_RESOLVE_CNTL_2_Y__SHIFT                      16
-static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_2_Y(uint32_t val)
+#define A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__MASK                    0x3fff0000
+#define A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__SHIFT                   16
+static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_2_Y(uint32_t val)
 {
-       return ((val) << A6XX_GRAS_RESOLVE_CNTL_2_Y__SHIFT) & A6XX_GRAS_RESOLVE_CNTL_2_Y__MASK;
+       return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__MASK;
 }
 
 #define REG_A6XX_GRAS_UNKNOWN_8600                             0x00008600
 
+#define REG_A6XX_GRAS_ADDR_MODE_CNTL                           0x00008601
+
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_0                                0x00008610
+
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_1                                0x00008611
+
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_2                                0x00008612
+
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_3                                0x00008613
+
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_0                                0x00008614
+
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_1                                0x00008615
+
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_2                                0x00008616
+
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_3                                0x00008617
+
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_0                                0x00008618
+
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_1                                0x00008619
+
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_2                                0x0000861a
+
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_3                                0x0000861b
+
 #define REG_A6XX_RB_BIN_CONTROL                                        0x00008800
-#define A6XX_RB_BIN_CONTROL_BINW__MASK                         0x000000ff
+#define A6XX_RB_BIN_CONTROL_BINW__MASK                         0x0000003f
 #define A6XX_RB_BIN_CONTROL_BINW__SHIFT                                0
 static inline uint32_t A6XX_RB_BIN_CONTROL_BINW(uint32_t val)
 {
        return ((val >> 5) << A6XX_RB_BIN_CONTROL_BINW__SHIFT) & A6XX_RB_BIN_CONTROL_BINW__MASK;
 }
-#define A6XX_RB_BIN_CONTROL_BINH__MASK                         0x0001ff00
+#define A6XX_RB_BIN_CONTROL_BINH__MASK                         0x00007f00
 #define A6XX_RB_BIN_CONTROL_BINH__SHIFT                                8
 static inline uint32_t A6XX_RB_BIN_CONTROL_BINH(uint32_t val)
 {
        return ((val >> 4) << A6XX_RB_BIN_CONTROL_BINH__SHIFT) & A6XX_RB_BIN_CONTROL_BINH__MASK;
 }
 #define A6XX_RB_BIN_CONTROL_BINNING_PASS                       0x00040000
+#define A6XX_RB_BIN_CONTROL_UNK19__MASK                                0x00080000
+#define A6XX_RB_BIN_CONTROL_UNK19__SHIFT                       19
+static inline uint32_t A6XX_RB_BIN_CONTROL_UNK19(uint32_t val)
+{
+       return ((val) << A6XX_RB_BIN_CONTROL_UNK19__SHIFT) & A6XX_RB_BIN_CONTROL_UNK19__MASK;
+}
+#define A6XX_RB_BIN_CONTROL_UNK20__MASK                                0x00100000
+#define A6XX_RB_BIN_CONTROL_UNK20__SHIFT                       20
+static inline uint32_t A6XX_RB_BIN_CONTROL_UNK20(uint32_t val)
+{
+       return ((val) << A6XX_RB_BIN_CONTROL_UNK20__SHIFT) & A6XX_RB_BIN_CONTROL_UNK20__MASK;
+}
 #define A6XX_RB_BIN_CONTROL_USE_VIZ                            0x00200000
+#define A6XX_RB_BIN_CONTROL_UNK22__MASK                                0x07c00000
+#define A6XX_RB_BIN_CONTROL_UNK22__SHIFT                       22
+static inline uint32_t A6XX_RB_BIN_CONTROL_UNK22(uint32_t val)
+{
+       return ((val) << A6XX_RB_BIN_CONTROL_UNK22__SHIFT) & A6XX_RB_BIN_CONTROL_UNK22__MASK;
+}
 
 #define REG_A6XX_RB_RENDER_CNTL                                        0x00008801
+#define A6XX_RB_RENDER_CNTL_UNK3                               0x00000008
 #define A6XX_RB_RENDER_CNTL_UNK4                               0x00000010
+#define A6XX_RB_RENDER_CNTL_UNK5__MASK                         0x00000060
+#define A6XX_RB_RENDER_CNTL_UNK5__SHIFT                                5
+static inline uint32_t A6XX_RB_RENDER_CNTL_UNK5(uint32_t val)
+{
+       return ((val) << A6XX_RB_RENDER_CNTL_UNK5__SHIFT) & A6XX_RB_RENDER_CNTL_UNK5__MASK;
+}
 #define A6XX_RB_RENDER_CNTL_BINNING                            0x00000080
+#define A6XX_RB_RENDER_CNTL_UNK8__MASK                         0x00001f00
+#define A6XX_RB_RENDER_CNTL_UNK8__SHIFT                                8
+static inline uint32_t A6XX_RB_RENDER_CNTL_UNK8(uint32_t val)
+{
+       return ((val) << A6XX_RB_RENDER_CNTL_UNK8__SHIFT) & A6XX_RB_RENDER_CNTL_UNK8__MASK;
+}
 #define A6XX_RB_RENDER_CNTL_FLAG_DEPTH                         0x00004000
 #define A6XX_RB_RENDER_CNTL_FLAG_MRTS__MASK                    0x00ff0000
 #define A6XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT                   16
@@ -3197,6 +3495,18 @@ static inline uint32_t A6XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
 {
        return ((val) << A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK;
 }
+#define A6XX_RB_RAS_MSAA_CNTL_UNK2__MASK                       0x00000004
+#define A6XX_RB_RAS_MSAA_CNTL_UNK2__SHIFT                      2
+static inline uint32_t A6XX_RB_RAS_MSAA_CNTL_UNK2(uint32_t val)
+{
+       return ((val) << A6XX_RB_RAS_MSAA_CNTL_UNK2__SHIFT) & A6XX_RB_RAS_MSAA_CNTL_UNK2__MASK;
+}
+#define A6XX_RB_RAS_MSAA_CNTL_UNK3__MASK                       0x00000008
+#define A6XX_RB_RAS_MSAA_CNTL_UNK3__SHIFT                      3
+static inline uint32_t A6XX_RB_RAS_MSAA_CNTL_UNK3(uint32_t val)
+{
+       return ((val) << A6XX_RB_RAS_MSAA_CNTL_UNK3__SHIFT) & A6XX_RB_RAS_MSAA_CNTL_UNK3__MASK;
+}
 
 #define REG_A6XX_RB_DEST_MSAA_CNTL                             0x00008803
 #define A6XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK                   0x00000003
@@ -3207,28 +3517,141 @@ static inline uint32_t A6XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val
 }
 #define A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE                    0x00000004
 
-#define REG_A6XX_RB_UNKNOWN_8804                               0x00008804
+#define REG_A6XX_RB_SAMPLE_CONFIG                              0x00008804
+#define A6XX_RB_SAMPLE_CONFIG_UNK0                             0x00000001
+#define A6XX_RB_SAMPLE_CONFIG_LOCATION_ENABLE                  0x00000002
 
-#define REG_A6XX_RB_UNKNOWN_8805                               0x00008805
+#define REG_A6XX_RB_SAMPLE_LOCATION_0                          0x00008805
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK             0x0000000f
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT            0
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK             0x000000f0
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT            4
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK             0x00000f00
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT            8
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK             0x0000f000
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT            12
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK             0x000f0000
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT            16
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK             0x00f00000
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT            20
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK             0x0f000000
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT            24
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK             0xf0000000
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT            28
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK;
+}
 
-#define REG_A6XX_RB_UNKNOWN_8806                               0x00008806
+#define REG_A6XX_RB_SAMPLE_LOCATION_1                          0x00008806
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK             0x0000000f
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT            0
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK             0x000000f0
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT            4
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK             0x00000f00
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT            8
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK             0x0000f000
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT            12
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK             0x000f0000
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT            16
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK             0x00f00000
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT            20
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK             0x0f000000
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT            24
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK             0xf0000000
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT            28
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK;
+}
 
 #define REG_A6XX_RB_RENDER_CONTROL0                            0x00008809
-#define A6XX_RB_RENDER_CONTROL0_VARYING                                0x00000001
-#define A6XX_RB_RENDER_CONTROL0_UNK3                           0x00000008
-#define A6XX_RB_RENDER_CONTROL0_XCOORD                         0x00000040
-#define A6XX_RB_RENDER_CONTROL0_YCOORD                         0x00000080
-#define A6XX_RB_RENDER_CONTROL0_ZCOORD                         0x00000100
-#define A6XX_RB_RENDER_CONTROL0_WCOORD                         0x00000200
+#define A6XX_RB_RENDER_CONTROL0_IJ_PERSP_PIXEL                 0x00000001
+#define A6XX_RB_RENDER_CONTROL0_IJ_PERSP_CENTROID              0x00000002
+#define A6XX_RB_RENDER_CONTROL0_IJ_PERSP_SAMPLE                        0x00000004
+#define A6XX_RB_RENDER_CONTROL0_SIZE                           0x00000008
+#define A6XX_RB_RENDER_CONTROL0_UNK4                           0x00000010
+#define A6XX_RB_RENDER_CONTROL0_SIZE_PERSAMP                   0x00000020
+#define A6XX_RB_RENDER_CONTROL0_COORD_MASK__MASK               0x000003c0
+#define A6XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT              6
+static inline uint32_t A6XX_RB_RENDER_CONTROL0_COORD_MASK(uint32_t val)
+{
+       return ((val) << A6XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT) & A6XX_RB_RENDER_CONTROL0_COORD_MASK__MASK;
+}
 #define A6XX_RB_RENDER_CONTROL0_UNK10                          0x00000400
 
 #define REG_A6XX_RB_RENDER_CONTROL1                            0x0000880a
 #define A6XX_RB_RENDER_CONTROL1_SAMPLEMASK                     0x00000001
-#define A6XX_RB_RENDER_CONTROL1_FACENESS                       0x00000002
+#define A6XX_RB_RENDER_CONTROL1_UNK1                           0x00000002
+#define A6XX_RB_RENDER_CONTROL1_FACENESS                       0x00000004
 #define A6XX_RB_RENDER_CONTROL1_SAMPLEID                       0x00000008
+#define A6XX_RB_RENDER_CONTROL1_UNK4                           0x00000010
+#define A6XX_RB_RENDER_CONTROL1_UNK5                           0x00000020
+#define A6XX_RB_RENDER_CONTROL1_SIZE                           0x00000040
+#define A6XX_RB_RENDER_CONTROL1_UNK7                           0x00000080
+#define A6XX_RB_RENDER_CONTROL1_UNK8                           0x00000100
 
 #define REG_A6XX_RB_FS_OUTPUT_CNTL0                            0x0000880b
+#define A6XX_RB_FS_OUTPUT_CNTL0_DUAL_COLOR_IN_ENABLE           0x00000001
 #define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_Z                  0x00000002
+#define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_SAMPMASK           0x00000004
+#define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_STENCILREF         0x00000008
 
 #define REG_A6XX_RB_FS_OUTPUT_CNTL1                            0x0000880c
 #define A6XX_RB_FS_OUTPUT_CNTL1_MRT__MASK                      0x0000000f
@@ -3348,7 +3771,8 @@ static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7(enum adreno_rb_dithe
 #define A6XX_RB_SRGB_CNTL_SRGB_MRT6                            0x00000040
 #define A6XX_RB_SRGB_CNTL_SRGB_MRT7                            0x00000080
 
-#define REG_A6XX_RB_UNKNOWN_8810                               0x00008810
+#define REG_A6XX_RB_SAMPLE_CNTL                                        0x00008810
+#define A6XX_RB_SAMPLE_CNTL_PER_SAMP_MODE                      0x00000001
 
 #define REG_A6XX_RB_UNKNOWN_8811                               0x00008811
 
@@ -3426,7 +3850,7 @@ static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_r
 static inline uint32_t REG_A6XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x00008822 + 0x8*i0; }
 #define A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK                        0x000000ff
 #define A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT               0
-static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a6xx_format val)
 {
        return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
 }
@@ -3436,6 +3860,12 @@ static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a6xx_tile_mode
 {
        return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
 }
+#define A6XX_RB_MRT_BUF_INFO_UNK10__MASK                       0x00000400
+#define A6XX_RB_MRT_BUF_INFO_UNK10__SHIFT                      10
+static inline uint32_t A6XX_RB_MRT_BUF_INFO_UNK10(uint32_t val)
+{
+       return ((val) << A6XX_RB_MRT_BUF_INFO_UNK10__SHIFT) & A6XX_RB_MRT_BUF_INFO_UNK10__MASK;
+}
 #define A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK                  0x00006000
 #define A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT                 13
 static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
@@ -3444,7 +3874,7 @@ static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
 }
 
 static inline uint32_t REG_A6XX_RB_MRT_PITCH(uint32_t i0) { return 0x00008823 + 0x8*i0; }
-#define A6XX_RB_MRT_PITCH__MASK                                        0xffffffff
+#define A6XX_RB_MRT_PITCH__MASK                                        0x0000ffff
 #define A6XX_RB_MRT_PITCH__SHIFT                               0
 static inline uint32_t A6XX_RB_MRT_PITCH(uint32_t val)
 {
@@ -3452,7 +3882,7 @@ static inline uint32_t A6XX_RB_MRT_PITCH(uint32_t val)
 }
 
 static inline uint32_t REG_A6XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x00008824 + 0x8*i0; }
-#define A6XX_RB_MRT_ARRAY_PITCH__MASK                          0xffffffff
+#define A6XX_RB_MRT_ARRAY_PITCH__MASK                          0x1fffffff
 #define A6XX_RB_MRT_ARRAY_PITCH__SHIFT                         0
 static inline uint32_t A6XX_RB_MRT_ARRAY_PITCH(uint32_t val)
 {
@@ -3463,7 +3893,21 @@ static inline uint32_t REG_A6XX_RB_MRT_BASE_LO(uint32_t i0) { return 0x00008825
 
 static inline uint32_t REG_A6XX_RB_MRT_BASE_HI(uint32_t i0) { return 0x00008826 + 0x8*i0; }
 
+static inline uint32_t REG_A6XX_RB_MRT_BASE(uint32_t i0) { return 0x00008825 + 0x8*i0; }
+#define A6XX_RB_MRT_BASE__MASK                                 0xffffffff
+#define A6XX_RB_MRT_BASE__SHIFT                                        0
+static inline uint32_t A6XX_RB_MRT_BASE(uint32_t val)
+{
+       return ((val) << A6XX_RB_MRT_BASE__SHIFT) & A6XX_RB_MRT_BASE__MASK;
+}
+
 static inline uint32_t REG_A6XX_RB_MRT_BASE_GMEM(uint32_t i0) { return 0x00008827 + 0x8*i0; }
+#define A6XX_RB_MRT_BASE_GMEM__MASK                            0xfffff000
+#define A6XX_RB_MRT_BASE_GMEM__SHIFT                           12
+static inline uint32_t A6XX_RB_MRT_BASE_GMEM(uint32_t val)
+{
+       return ((val >> 12) << A6XX_RB_MRT_BASE_GMEM__SHIFT) & A6XX_RB_MRT_BASE_GMEM__MASK;
+}
 
 #define REG_A6XX_RB_BLEND_RED_F32                              0x00008860
 #define A6XX_RB_BLEND_RED_F32__MASK                            0xffffffff
@@ -3520,7 +3964,9 @@ static inline uint32_t A6XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
        return ((val) << A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
 }
 #define A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND                   0x00000100
+#define A6XX_RB_BLEND_CNTL_DUAL_COLOR_IN_ENABLE                        0x00000200
 #define A6XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE                   0x00000400
+#define A6XX_RB_BLEND_CNTL_ALPHA_TO_ONE                                0x00000800
 #define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK                   0xffff0000
 #define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT                  16
 static inline uint32_t A6XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
@@ -3529,7 +3975,12 @@ static inline uint32_t A6XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
 }
 
 #define REG_A6XX_RB_DEPTH_PLANE_CNTL                           0x00008870
-#define A6XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z                 0x00000001
+#define A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__MASK                  0x00000003
+#define A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__SHIFT                 0
+static inline uint32_t A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE(enum a6xx_ztest_mode val)
+{
+       return ((val) << A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__SHIFT) & A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__MASK;
+}
 
 #define REG_A6XX_RB_DEPTH_CNTL                                 0x00008871
 #define A6XX_RB_DEPTH_CNTL_Z_ENABLE                            0x00000001
@@ -3540,7 +3991,9 @@ static inline uint32_t A6XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val)
 {
        return ((val) << A6XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A6XX_RB_DEPTH_CNTL_ZFUNC__MASK;
 }
+#define A6XX_RB_DEPTH_CNTL_Z_CLAMP_ENABLE                      0x00000020
 #define A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE                       0x00000040
+#define A6XX_RB_DEPTH_CNTL_Z_BOUNDS_ENABLE                     0x00000080
 
 #define REG_A6XX_RB_DEPTH_BUFFER_INFO                          0x00008872
 #define A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK           0x00000007
@@ -3549,9 +4002,15 @@ static inline uint32_t A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_depth_fo
 {
        return ((val) << A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
 }
+#define A6XX_RB_DEPTH_BUFFER_INFO_UNK3__MASK                   0x00000018
+#define A6XX_RB_DEPTH_BUFFER_INFO_UNK3__SHIFT                  3
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_INFO_UNK3(uint32_t val)
+{
+       return ((val) << A6XX_RB_DEPTH_BUFFER_INFO_UNK3__SHIFT) & A6XX_RB_DEPTH_BUFFER_INFO_UNK3__MASK;
+}
 
 #define REG_A6XX_RB_DEPTH_BUFFER_PITCH                         0x00008873
-#define A6XX_RB_DEPTH_BUFFER_PITCH__MASK                       0xffffffff
+#define A6XX_RB_DEPTH_BUFFER_PITCH__MASK                       0x00003fff
 #define A6XX_RB_DEPTH_BUFFER_PITCH__SHIFT                      0
 static inline uint32_t A6XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
 {
@@ -3559,7 +4018,7 @@ static inline uint32_t A6XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
 }
 
 #define REG_A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH                   0x00008874
-#define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK                 0xffffffff
+#define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK                 0x0fffffff
 #define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT                        0
 static inline uint32_t A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
 {
@@ -3570,11 +4029,37 @@ static inline uint32_t A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
 
 #define REG_A6XX_RB_DEPTH_BUFFER_BASE_HI                       0x00008876
 
+#define REG_A6XX_RB_DEPTH_BUFFER_BASE                          0x00008875
+#define A6XX_RB_DEPTH_BUFFER_BASE__MASK                                0xffffffff
+#define A6XX_RB_DEPTH_BUFFER_BASE__SHIFT                       0
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_BASE(uint32_t val)
+{
+       return ((val) << A6XX_RB_DEPTH_BUFFER_BASE__SHIFT) & A6XX_RB_DEPTH_BUFFER_BASE__MASK;
+}
+
 #define REG_A6XX_RB_DEPTH_BUFFER_BASE_GMEM                     0x00008877
+#define A6XX_RB_DEPTH_BUFFER_BASE_GMEM__MASK                   0xfffff000
+#define A6XX_RB_DEPTH_BUFFER_BASE_GMEM__SHIFT                  12
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_BASE_GMEM(uint32_t val)
+{
+       return ((val >> 12) << A6XX_RB_DEPTH_BUFFER_BASE_GMEM__SHIFT) & A6XX_RB_DEPTH_BUFFER_BASE_GMEM__MASK;
+}
 
-#define REG_A6XX_RB_UNKNOWN_8878                               0x00008878
+#define REG_A6XX_RB_Z_BOUNDS_MIN                               0x00008878
+#define A6XX_RB_Z_BOUNDS_MIN__MASK                             0xffffffff
+#define A6XX_RB_Z_BOUNDS_MIN__SHIFT                            0
+static inline uint32_t A6XX_RB_Z_BOUNDS_MIN(float val)
+{
+       return ((fui(val)) << A6XX_RB_Z_BOUNDS_MIN__SHIFT) & A6XX_RB_Z_BOUNDS_MIN__MASK;
+}
 
-#define REG_A6XX_RB_UNKNOWN_8879                               0x00008879
+#define REG_A6XX_RB_Z_BOUNDS_MAX                               0x00008879
+#define A6XX_RB_Z_BOUNDS_MAX__MASK                             0xffffffff
+#define A6XX_RB_Z_BOUNDS_MAX__SHIFT                            0
+static inline uint32_t A6XX_RB_Z_BOUNDS_MAX(float val)
+{
+       return ((fui(val)) << A6XX_RB_Z_BOUNDS_MAX__SHIFT) & A6XX_RB_Z_BOUNDS_MAX__MASK;
+}
 
 #define REG_A6XX_RB_STENCIL_CONTROL                            0x00008880
 #define A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE                 0x00000001
@@ -3631,9 +4116,10 @@ static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op v
 
 #define REG_A6XX_RB_STENCIL_INFO                               0x00008881
 #define A6XX_RB_STENCIL_INFO_SEPARATE_STENCIL                  0x00000001
+#define A6XX_RB_STENCIL_INFO_UNK1                              0x00000002
 
 #define REG_A6XX_RB_STENCIL_BUFFER_PITCH                       0x00008882
-#define A6XX_RB_STENCIL_BUFFER_PITCH__MASK                     0xffffffff
+#define A6XX_RB_STENCIL_BUFFER_PITCH__MASK                     0x00000fff
 #define A6XX_RB_STENCIL_BUFFER_PITCH__SHIFT                    0
 static inline uint32_t A6XX_RB_STENCIL_BUFFER_PITCH(uint32_t val)
 {
@@ -3641,7 +4127,7 @@ static inline uint32_t A6XX_RB_STENCIL_BUFFER_PITCH(uint32_t val)
 }
 
 #define REG_A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH                 0x00008883
-#define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK               0xffffffff
+#define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK               0x00ffffff
 #define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT              0
 static inline uint32_t A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(uint32_t val)
 {
@@ -3652,7 +4138,21 @@ static inline uint32_t A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(uint32_t val)
 
 #define REG_A6XX_RB_STENCIL_BUFFER_BASE_HI                     0x00008885
 
+#define REG_A6XX_RB_STENCIL_BUFFER_BASE                                0x00008884
+#define A6XX_RB_STENCIL_BUFFER_BASE__MASK                      0xffffffff
+#define A6XX_RB_STENCIL_BUFFER_BASE__SHIFT                     0
+static inline uint32_t A6XX_RB_STENCIL_BUFFER_BASE(uint32_t val)
+{
+       return ((val) << A6XX_RB_STENCIL_BUFFER_BASE__SHIFT) & A6XX_RB_STENCIL_BUFFER_BASE__MASK;
+}
+
 #define REG_A6XX_RB_STENCIL_BUFFER_BASE_GMEM                   0x00008886
+#define A6XX_RB_STENCIL_BUFFER_BASE_GMEM__MASK                 0xfffff000
+#define A6XX_RB_STENCIL_BUFFER_BASE_GMEM__SHIFT                        12
+static inline uint32_t A6XX_RB_STENCIL_BUFFER_BASE_GMEM(uint32_t val)
+{
+       return ((val >> 12) << A6XX_RB_STENCIL_BUFFER_BASE_GMEM__SHIFT) & A6XX_RB_STENCIL_BUFFER_BASE_GMEM__MASK;
+}
 
 #define REG_A6XX_RB_STENCILREF                                 0x00008887
 #define A6XX_RB_STENCILREF_REF__MASK                           0x000000ff
@@ -3697,14 +4197,13 @@ static inline uint32_t A6XX_RB_STENCILWRMASK_BFWRMASK(uint32_t val)
 }
 
 #define REG_A6XX_RB_WINDOW_OFFSET                              0x00008890
-#define A6XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE            0x80000000
-#define A6XX_RB_WINDOW_OFFSET_X__MASK                          0x00007fff
+#define A6XX_RB_WINDOW_OFFSET_X__MASK                          0x00003fff
 #define A6XX_RB_WINDOW_OFFSET_X__SHIFT                         0
 static inline uint32_t A6XX_RB_WINDOW_OFFSET_X(uint32_t val)
 {
        return ((val) << A6XX_RB_WINDOW_OFFSET_X__SHIFT) & A6XX_RB_WINDOW_OFFSET_X__MASK;
 }
-#define A6XX_RB_WINDOW_OFFSET_Y__MASK                          0x7fff0000
+#define A6XX_RB_WINDOW_OFFSET_Y__MASK                          0x3fff0000
 #define A6XX_RB_WINDOW_OFFSET_Y__SHIFT                         16
 static inline uint32_t A6XX_RB_WINDOW_OFFSET_Y(uint32_t val)
 {
@@ -3712,22 +4211,50 @@ static inline uint32_t A6XX_RB_WINDOW_OFFSET_Y(uint32_t val)
 }
 
 #define REG_A6XX_RB_SAMPLE_COUNT_CONTROL                       0x00008891
+#define A6XX_RB_SAMPLE_COUNT_CONTROL_UNK0                      0x00000001
 #define A6XX_RB_SAMPLE_COUNT_CONTROL_COPY                      0x00000002
 
 #define REG_A6XX_RB_LRZ_CNTL                                   0x00008898
 #define A6XX_RB_LRZ_CNTL_ENABLE                                        0x00000001
 
+#define REG_A6XX_RB_Z_CLAMP_MIN                                        0x000088c0
+#define A6XX_RB_Z_CLAMP_MIN__MASK                              0xffffffff
+#define A6XX_RB_Z_CLAMP_MIN__SHIFT                             0
+static inline uint32_t A6XX_RB_Z_CLAMP_MIN(float val)
+{
+       return ((fui(val)) << A6XX_RB_Z_CLAMP_MIN__SHIFT) & A6XX_RB_Z_CLAMP_MIN__MASK;
+}
+
+#define REG_A6XX_RB_Z_CLAMP_MAX                                        0x000088c1
+#define A6XX_RB_Z_CLAMP_MAX__MASK                              0xffffffff
+#define A6XX_RB_Z_CLAMP_MAX__SHIFT                             0
+static inline uint32_t A6XX_RB_Z_CLAMP_MAX(float val)
+{
+       return ((fui(val)) << A6XX_RB_Z_CLAMP_MAX__SHIFT) & A6XX_RB_Z_CLAMP_MAX__MASK;
+}
+
 #define REG_A6XX_RB_UNKNOWN_88D0                               0x000088d0
+#define A6XX_RB_UNKNOWN_88D0_UNK0__MASK                                0x00001fff
+#define A6XX_RB_UNKNOWN_88D0_UNK0__SHIFT                       0
+static inline uint32_t A6XX_RB_UNKNOWN_88D0_UNK0(uint32_t val)
+{
+       return ((val) << A6XX_RB_UNKNOWN_88D0_UNK0__SHIFT) & A6XX_RB_UNKNOWN_88D0_UNK0__MASK;
+}
+#define A6XX_RB_UNKNOWN_88D0_UNK16__MASK                       0x07ff0000
+#define A6XX_RB_UNKNOWN_88D0_UNK16__SHIFT                      16
+static inline uint32_t A6XX_RB_UNKNOWN_88D0_UNK16(uint32_t val)
+{
+       return ((val) << A6XX_RB_UNKNOWN_88D0_UNK16__SHIFT) & A6XX_RB_UNKNOWN_88D0_UNK16__MASK;
+}
 
 #define REG_A6XX_RB_BLIT_SCISSOR_TL                            0x000088d1
-#define A6XX_RB_BLIT_SCISSOR_TL_WINDOW_OFFSET_DISABLE          0x80000000
-#define A6XX_RB_BLIT_SCISSOR_TL_X__MASK                                0x00007fff
+#define A6XX_RB_BLIT_SCISSOR_TL_X__MASK                                0x00003fff
 #define A6XX_RB_BLIT_SCISSOR_TL_X__SHIFT                       0
 static inline uint32_t A6XX_RB_BLIT_SCISSOR_TL_X(uint32_t val)
 {
        return ((val) << A6XX_RB_BLIT_SCISSOR_TL_X__SHIFT) & A6XX_RB_BLIT_SCISSOR_TL_X__MASK;
 }
-#define A6XX_RB_BLIT_SCISSOR_TL_Y__MASK                                0x7fff0000
+#define A6XX_RB_BLIT_SCISSOR_TL_Y__MASK                                0x3fff0000
 #define A6XX_RB_BLIT_SCISSOR_TL_Y__SHIFT                       16
 static inline uint32_t A6XX_RB_BLIT_SCISSOR_TL_Y(uint32_t val)
 {
@@ -3735,20 +4262,47 @@ static inline uint32_t A6XX_RB_BLIT_SCISSOR_TL_Y(uint32_t val)
 }
 
 #define REG_A6XX_RB_BLIT_SCISSOR_BR                            0x000088d2
-#define A6XX_RB_BLIT_SCISSOR_BR_WINDOW_OFFSET_DISABLE          0x80000000
-#define A6XX_RB_BLIT_SCISSOR_BR_X__MASK                                0x00007fff
+#define A6XX_RB_BLIT_SCISSOR_BR_X__MASK                                0x00003fff
 #define A6XX_RB_BLIT_SCISSOR_BR_X__SHIFT                       0
 static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_X(uint32_t val)
 {
        return ((val) << A6XX_RB_BLIT_SCISSOR_BR_X__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_X__MASK;
 }
-#define A6XX_RB_BLIT_SCISSOR_BR_Y__MASK                                0x7fff0000
+#define A6XX_RB_BLIT_SCISSOR_BR_Y__MASK                                0x3fff0000
 #define A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT                       16
 static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_Y(uint32_t val)
 {
        return ((val) << A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_Y__MASK;
 }
 
+#define REG_A6XX_RB_BIN_CONTROL2                               0x000088d3
+#define A6XX_RB_BIN_CONTROL2_BINW__MASK                                0x0000003f
+#define A6XX_RB_BIN_CONTROL2_BINW__SHIFT                       0
+static inline uint32_t A6XX_RB_BIN_CONTROL2_BINW(uint32_t val)
+{
+       return ((val >> 5) << A6XX_RB_BIN_CONTROL2_BINW__SHIFT) & A6XX_RB_BIN_CONTROL2_BINW__MASK;
+}
+#define A6XX_RB_BIN_CONTROL2_BINH__MASK                                0x00007f00
+#define A6XX_RB_BIN_CONTROL2_BINH__SHIFT                       8
+static inline uint32_t A6XX_RB_BIN_CONTROL2_BINH(uint32_t val)
+{
+       return ((val >> 4) << A6XX_RB_BIN_CONTROL2_BINH__SHIFT) & A6XX_RB_BIN_CONTROL2_BINH__MASK;
+}
+
+#define REG_A6XX_RB_WINDOW_OFFSET2                             0x000088d4
+#define A6XX_RB_WINDOW_OFFSET2_X__MASK                         0x00003fff
+#define A6XX_RB_WINDOW_OFFSET2_X__SHIFT                                0
+static inline uint32_t A6XX_RB_WINDOW_OFFSET2_X(uint32_t val)
+{
+       return ((val) << A6XX_RB_WINDOW_OFFSET2_X__SHIFT) & A6XX_RB_WINDOW_OFFSET2_X__MASK;
+}
+#define A6XX_RB_WINDOW_OFFSET2_Y__MASK                         0x3fff0000
+#define A6XX_RB_WINDOW_OFFSET2_Y__SHIFT                                16
+static inline uint32_t A6XX_RB_WINDOW_OFFSET2_Y(uint32_t val)
+{
+       return ((val) << A6XX_RB_WINDOW_OFFSET2_Y__SHIFT) & A6XX_RB_WINDOW_OFFSET2_Y__MASK;
+}
+
 #define REG_A6XX_RB_MSAA_CNTL                                  0x000088d5
 #define A6XX_RB_MSAA_CNTL_SAMPLES__MASK                                0x00000018
 #define A6XX_RB_MSAA_CNTL_SAMPLES__SHIFT                       3
@@ -3758,6 +4312,12 @@ static inline uint32_t A6XX_RB_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
 }
 
 #define REG_A6XX_RB_BLIT_BASE_GMEM                             0x000088d6
+#define A6XX_RB_BLIT_BASE_GMEM__MASK                           0xfffff000
+#define A6XX_RB_BLIT_BASE_GMEM__SHIFT                          12
+static inline uint32_t A6XX_RB_BLIT_BASE_GMEM(uint32_t val)
+{
+       return ((val >> 12) << A6XX_RB_BLIT_BASE_GMEM__SHIFT) & A6XX_RB_BLIT_BASE_GMEM__MASK;
+}
 
 #define REG_A6XX_RB_BLIT_DST_INFO                              0x000088d7
 #define A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK                  0x00000003
@@ -3773,17 +4333,26 @@ static inline uint32_t A6XX_RB_BLIT_DST_INFO_SAMPLES(enum a3xx_msaa_samples val)
 {
        return ((val) << A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT) & A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK;
 }
+#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK                 0x00000060
+#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT                        5
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+       return ((val) << A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK;
+}
 #define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK               0x00007f80
 #define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT              7
-static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(enum a6xx_format val)
 {
        return ((val) << A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK;
 }
-#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK                 0x00000060
-#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT                        5
-static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+#define A6XX_RB_BLIT_DST_INFO_UNK15                            0x00008000
+
+#define REG_A6XX_RB_BLIT_DST                                   0x000088d8
+#define A6XX_RB_BLIT_DST__MASK                                 0xffffffff
+#define A6XX_RB_BLIT_DST__SHIFT                                        0
+static inline uint32_t A6XX_RB_BLIT_DST(uint32_t val)
 {
-       return ((val) << A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK;
+       return ((val) << A6XX_RB_BLIT_DST__SHIFT) & A6XX_RB_BLIT_DST__MASK;
 }
 
 #define REG_A6XX_RB_BLIT_DST_LO                                        0x000088d8
@@ -3791,7 +4360,7 @@ static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val
 #define REG_A6XX_RB_BLIT_DST_HI                                        0x000088d9
 
 #define REG_A6XX_RB_BLIT_DST_PITCH                             0x000088da
-#define A6XX_RB_BLIT_DST_PITCH__MASK                           0xffffffff
+#define A6XX_RB_BLIT_DST_PITCH__MASK                           0x0000ffff
 #define A6XX_RB_BLIT_DST_PITCH__SHIFT                          0
 static inline uint32_t A6XX_RB_BLIT_DST_PITCH(uint32_t val)
 {
@@ -3799,18 +4368,40 @@ static inline uint32_t A6XX_RB_BLIT_DST_PITCH(uint32_t val)
 }
 
 #define REG_A6XX_RB_BLIT_DST_ARRAY_PITCH                       0x000088db
-#define A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK                     0xffffffff
+#define A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK                     0x1fffffff
 #define A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT                    0
 static inline uint32_t A6XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val)
 {
        return ((val >> 6) << A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT) & A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK;
 }
 
+#define REG_A6XX_RB_BLIT_FLAG_DST                              0x000088dc
+#define A6XX_RB_BLIT_FLAG_DST__MASK                            0xffffffff
+#define A6XX_RB_BLIT_FLAG_DST__SHIFT                           0
+static inline uint32_t A6XX_RB_BLIT_FLAG_DST(uint32_t val)
+{
+       return ((val) << A6XX_RB_BLIT_FLAG_DST__SHIFT) & A6XX_RB_BLIT_FLAG_DST__MASK;
+}
+
 #define REG_A6XX_RB_BLIT_FLAG_DST_LO                           0x000088dc
 
 #define REG_A6XX_RB_BLIT_FLAG_DST_HI                           0x000088dd
 
-#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0                       0x000088df
+#define REG_A6XX_RB_BLIT_FLAG_DST_PITCH                                0x000088de
+#define A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__MASK                        0x000007ff
+#define A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__SHIFT               0
+static inline uint32_t A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH(uint32_t val)
+{
+       return ((val >> 6) << A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__SHIFT) & A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__MASK;
+}
+#define A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__MASK          0x0ffff800
+#define A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__SHIFT         11
+static inline uint32_t A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH(uint32_t val)
+{
+       return ((val >> 7) << A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0                       0x000088df
 
 #define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW1                       0x000088e0
 
@@ -3829,14 +4420,76 @@ static inline uint32_t A6XX_RB_BLIT_INFO_CLEAR_MASK(uint32_t val)
 {
        return ((val) << A6XX_RB_BLIT_INFO_CLEAR_MASK__SHIFT) & A6XX_RB_BLIT_INFO_CLEAR_MASK__MASK;
 }
+#define A6XX_RB_BLIT_INFO_UNK8__MASK                           0x00000300
+#define A6XX_RB_BLIT_INFO_UNK8__SHIFT                          8
+static inline uint32_t A6XX_RB_BLIT_INFO_UNK8(uint32_t val)
+{
+       return ((val) << A6XX_RB_BLIT_INFO_UNK8__SHIFT) & A6XX_RB_BLIT_INFO_UNK8__MASK;
+}
+#define A6XX_RB_BLIT_INFO_UNK12__MASK                          0x0000f000
+#define A6XX_RB_BLIT_INFO_UNK12__SHIFT                         12
+static inline uint32_t A6XX_RB_BLIT_INFO_UNK12(uint32_t val)
+{
+       return ((val) << A6XX_RB_BLIT_INFO_UNK12__SHIFT) & A6XX_RB_BLIT_INFO_UNK12__MASK;
+}
 
 #define REG_A6XX_RB_UNKNOWN_88F0                               0x000088f0
 
+#define REG_A6XX_RB_UNK_FLAG_BUFFER_BASE                       0x000088f1
+#define A6XX_RB_UNK_FLAG_BUFFER_BASE__MASK                     0xffffffff
+#define A6XX_RB_UNK_FLAG_BUFFER_BASE__SHIFT                    0
+static inline uint32_t A6XX_RB_UNK_FLAG_BUFFER_BASE(uint32_t val)
+{
+       return ((val) << A6XX_RB_UNK_FLAG_BUFFER_BASE__SHIFT) & A6XX_RB_UNK_FLAG_BUFFER_BASE__MASK;
+}
+
+#define REG_A6XX_RB_UNK_FLAG_BUFFER_PITCH                      0x000088f3
+#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__MASK              0x000007ff
+#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__SHIFT             0
+static inline uint32_t A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH(uint32_t val)
+{
+       return ((val >> 6) << A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__MASK;
+}
+#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK                0x00fff800
+#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT       11
+static inline uint32_t A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
+{
+       return ((val >> 7) << A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_UNKNOWN_88F4                               0x000088f4
+
 #define REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO                  0x00008900
 
 #define REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_HI                  0x00008901
 
+#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE                     0x00008900
+#define A6XX_RB_DEPTH_FLAG_BUFFER_BASE__MASK                   0xffffffff
+#define A6XX_RB_DEPTH_FLAG_BUFFER_BASE__SHIFT                  0
+static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_BASE(uint32_t val)
+{
+       return ((val) << A6XX_RB_DEPTH_FLAG_BUFFER_BASE__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_BASE__MASK;
+}
+
 #define REG_A6XX_RB_DEPTH_FLAG_BUFFER_PITCH                    0x00008902
+#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__MASK            0x0000007f
+#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__SHIFT           0
+static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH(uint32_t val)
+{
+       return ((val >> 6) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__MASK;
+}
+#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__MASK             0x00000700
+#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__SHIFT            8
+static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8(uint32_t val)
+{
+       return ((val) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__MASK;
+}
+#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK      0x0ffff800
+#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT     11
+static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
+{
+       return ((val >> 7) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
+}
 
 static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER(uint32_t i0) { return 0x00008903 + 0x3*i0; }
 
@@ -3844,39 +4497,93 @@ static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR_LO(uint32_t i0) { return
 
 static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR_HI(uint32_t i0) { return 0x00008904 + 0x3*i0; }
 
+static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR(uint32_t i0) { return 0x00008903 + 0x3*i0; }
+#define A6XX_RB_MRT_FLAG_BUFFER_ADDR__MASK                     0xffffffff
+#define A6XX_RB_MRT_FLAG_BUFFER_ADDR__SHIFT                    0
+static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_ADDR(uint32_t val)
+{
+       return ((val) << A6XX_RB_MRT_FLAG_BUFFER_ADDR__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_ADDR__MASK;
+}
+
 static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t i0) { return 0x00008905 + 0x3*i0; }
 #define A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK              0x000007ff
 #define A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT             0
 static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(uint32_t val)
 {
-       return ((val >> 5) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK;
+       return ((val >> 6) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK;
 }
-#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK                0x003ff800
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK                0x1ffff800
 #define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT       11
 static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
 {
-       return ((val >> 5) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
+       return ((val >> 7) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
 }
 
 #define REG_A6XX_RB_SAMPLE_COUNT_ADDR_LO                       0x00008927
 
 #define REG_A6XX_RB_SAMPLE_COUNT_ADDR_HI                       0x00008928
 
+#define REG_A6XX_RB_SAMPLE_COUNT_ADDR                          0x00008927
+#define A6XX_RB_SAMPLE_COUNT_ADDR__MASK                                0xffffffff
+#define A6XX_RB_SAMPLE_COUNT_ADDR__SHIFT                       0
+static inline uint32_t A6XX_RB_SAMPLE_COUNT_ADDR(uint32_t val)
+{
+       return ((val) << A6XX_RB_SAMPLE_COUNT_ADDR__SHIFT) & A6XX_RB_SAMPLE_COUNT_ADDR__MASK;
+}
+
 #define REG_A6XX_RB_2D_BLIT_CNTL                               0x00008c00
+#define A6XX_RB_2D_BLIT_CNTL_ROTATE__MASK                      0x00000007
+#define A6XX_RB_2D_BLIT_CNTL_ROTATE__SHIFT                     0
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_ROTATE(enum a6xx_rotation val)
+{
+       return ((val) << A6XX_RB_2D_BLIT_CNTL_ROTATE__SHIFT) & A6XX_RB_2D_BLIT_CNTL_ROTATE__MASK;
+}
+#define A6XX_RB_2D_BLIT_CNTL_UNK3__MASK                                0x00000078
+#define A6XX_RB_2D_BLIT_CNTL_UNK3__SHIFT                       3
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_UNK3(uint32_t val)
+{
+       return ((val) << A6XX_RB_2D_BLIT_CNTL_UNK3__SHIFT) & A6XX_RB_2D_BLIT_CNTL_UNK3__MASK;
+}
+#define A6XX_RB_2D_BLIT_CNTL_SOLID_COLOR                       0x00000080
 #define A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK                        0x0000ff00
 #define A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT               8
-static inline uint32_t A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_color_fmt val)
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_format val)
 {
        return ((val) << A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
 }
 #define A6XX_RB_2D_BLIT_CNTL_SCISSOR                           0x00010000
+#define A6XX_RB_2D_BLIT_CNTL_UNK17__MASK                       0x00060000
+#define A6XX_RB_2D_BLIT_CNTL_UNK17__SHIFT                      17
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_UNK17(uint32_t val)
+{
+       return ((val) << A6XX_RB_2D_BLIT_CNTL_UNK17__SHIFT) & A6XX_RB_2D_BLIT_CNTL_UNK17__MASK;
+}
+#define A6XX_RB_2D_BLIT_CNTL_D24S8                             0x00080000
+#define A6XX_RB_2D_BLIT_CNTL_MASK__MASK                                0x00f00000
+#define A6XX_RB_2D_BLIT_CNTL_MASK__SHIFT                       20
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_MASK(uint32_t val)
+{
+       return ((val) << A6XX_RB_2D_BLIT_CNTL_MASK__SHIFT) & A6XX_RB_2D_BLIT_CNTL_MASK__MASK;
+}
+#define A6XX_RB_2D_BLIT_CNTL_IFMT__MASK                                0x1f000000
+#define A6XX_RB_2D_BLIT_CNTL_IFMT__SHIFT                       24
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_IFMT(enum a6xx_2d_ifmt val)
+{
+       return ((val) << A6XX_RB_2D_BLIT_CNTL_IFMT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_IFMT__MASK;
+}
+#define A6XX_RB_2D_BLIT_CNTL_UNK29__MASK                       0x20000000
+#define A6XX_RB_2D_BLIT_CNTL_UNK29__SHIFT                      29
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_UNK29(uint32_t val)
+{
+       return ((val) << A6XX_RB_2D_BLIT_CNTL_UNK29__SHIFT) & A6XX_RB_2D_BLIT_CNTL_UNK29__MASK;
+}
 
-#define REG_A6XX_RB_UNKNOWN_8C01                               0x00008c01
+#define REG_A6XX_RB_2D_UNKNOWN_8C01                            0x00008c01
 
 #define REG_A6XX_RB_2D_DST_INFO                                        0x00008c17
 #define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK                 0x000000ff
 #define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT                        0
-static inline uint32_t A6XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+static inline uint32_t A6XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a6xx_format val)
 {
        return ((val) << A6XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK;
 }
@@ -3893,23 +4600,98 @@ static inline uint32_t A6XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
        return ((val) << A6XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_2D_DST_INFO_COLOR_SWAP__MASK;
 }
 #define A6XX_RB_2D_DST_INFO_FLAGS                              0x00001000
+#define A6XX_RB_2D_DST_INFO_SRGB                               0x00002000
+#define A6XX_RB_2D_DST_INFO_SAMPLES__MASK                      0x0000c000
+#define A6XX_RB_2D_DST_INFO_SAMPLES__SHIFT                     14
+static inline uint32_t A6XX_RB_2D_DST_INFO_SAMPLES(enum a3xx_msaa_samples val)
+{
+       return ((val) << A6XX_RB_2D_DST_INFO_SAMPLES__SHIFT) & A6XX_RB_2D_DST_INFO_SAMPLES__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_FILTER                             0x00010000
+#define A6XX_RB_2D_DST_INFO_SAMPLES_AVERAGE                    0x00040000
+#define A6XX_RB_2D_DST_INFO_UNK20                              0x00100000
+#define A6XX_RB_2D_DST_INFO_UNK22                              0x00400000
 
 #define REG_A6XX_RB_2D_DST_LO                                  0x00008c18
 
 #define REG_A6XX_RB_2D_DST_HI                                  0x00008c19
 
-#define REG_A6XX_RB_2D_DST_SIZE                                        0x00008c1a
-#define A6XX_RB_2D_DST_SIZE_PITCH__MASK                                0x0000ffff
-#define A6XX_RB_2D_DST_SIZE_PITCH__SHIFT                       0
-static inline uint32_t A6XX_RB_2D_DST_SIZE_PITCH(uint32_t val)
+#define REG_A6XX_RB_2D_DST                                     0x00008c18
+#define A6XX_RB_2D_DST__MASK                                   0xffffffff
+#define A6XX_RB_2D_DST__SHIFT                                  0
+static inline uint32_t A6XX_RB_2D_DST(uint32_t val)
+{
+       return ((val) << A6XX_RB_2D_DST__SHIFT) & A6XX_RB_2D_DST__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_PITCH                               0x00008c1a
+#define A6XX_RB_2D_DST_PITCH__MASK                             0x0000ffff
+#define A6XX_RB_2D_DST_PITCH__SHIFT                            0
+static inline uint32_t A6XX_RB_2D_DST_PITCH(uint32_t val)
+{
+       return ((val >> 6) << A6XX_RB_2D_DST_PITCH__SHIFT) & A6XX_RB_2D_DST_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_PLANE1                              0x00008c1b
+#define A6XX_RB_2D_DST_PLANE1__MASK                            0xffffffff
+#define A6XX_RB_2D_DST_PLANE1__SHIFT                           0
+static inline uint32_t A6XX_RB_2D_DST_PLANE1(uint32_t val)
 {
-       return ((val >> 6) << A6XX_RB_2D_DST_SIZE_PITCH__SHIFT) & A6XX_RB_2D_DST_SIZE_PITCH__MASK;
+       return ((val) << A6XX_RB_2D_DST_PLANE1__SHIFT) & A6XX_RB_2D_DST_PLANE1__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_PLANE_PITCH                         0x00008c1d
+#define A6XX_RB_2D_DST_PLANE_PITCH__MASK                       0x0000ffff
+#define A6XX_RB_2D_DST_PLANE_PITCH__SHIFT                      0
+static inline uint32_t A6XX_RB_2D_DST_PLANE_PITCH(uint32_t val)
+{
+       return ((val >> 6) << A6XX_RB_2D_DST_PLANE_PITCH__SHIFT) & A6XX_RB_2D_DST_PLANE_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_PLANE2                              0x00008c1e
+#define A6XX_RB_2D_DST_PLANE2__MASK                            0xffffffff
+#define A6XX_RB_2D_DST_PLANE2__SHIFT                           0
+static inline uint32_t A6XX_RB_2D_DST_PLANE2(uint32_t val)
+{
+       return ((val) << A6XX_RB_2D_DST_PLANE2__SHIFT) & A6XX_RB_2D_DST_PLANE2__MASK;
 }
 
 #define REG_A6XX_RB_2D_DST_FLAGS_LO                            0x00008c20
 
 #define REG_A6XX_RB_2D_DST_FLAGS_HI                            0x00008c21
 
+#define REG_A6XX_RB_2D_DST_FLAGS                               0x00008c20
+#define A6XX_RB_2D_DST_FLAGS__MASK                             0xffffffff
+#define A6XX_RB_2D_DST_FLAGS__SHIFT                            0
+static inline uint32_t A6XX_RB_2D_DST_FLAGS(uint32_t val)
+{
+       return ((val) << A6XX_RB_2D_DST_FLAGS__SHIFT) & A6XX_RB_2D_DST_FLAGS__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_FLAGS_PITCH                         0x00008c22
+#define A6XX_RB_2D_DST_FLAGS_PITCH__MASK                       0x000000ff
+#define A6XX_RB_2D_DST_FLAGS_PITCH__SHIFT                      0
+static inline uint32_t A6XX_RB_2D_DST_FLAGS_PITCH(uint32_t val)
+{
+       return ((val >> 6) << A6XX_RB_2D_DST_FLAGS_PITCH__SHIFT) & A6XX_RB_2D_DST_FLAGS_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_FLAGS_PLANE                         0x00008c23
+#define A6XX_RB_2D_DST_FLAGS_PLANE__MASK                       0xffffffff
+#define A6XX_RB_2D_DST_FLAGS_PLANE__SHIFT                      0
+static inline uint32_t A6XX_RB_2D_DST_FLAGS_PLANE(uint32_t val)
+{
+       return ((val) << A6XX_RB_2D_DST_FLAGS_PLANE__SHIFT) & A6XX_RB_2D_DST_FLAGS_PLANE__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_FLAGS_PLANE_PITCH                   0x00008c25
+#define A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__MASK                 0x000000ff
+#define A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__SHIFT                        0
+static inline uint32_t A6XX_RB_2D_DST_FLAGS_PLANE_PITCH(uint32_t val)
+{
+       return ((val >> 6) << A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__SHIFT) & A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__MASK;
+}
+
 #define REG_A6XX_RB_2D_SRC_SOLID_C0                            0x00008c2c
 
 #define REG_A6XX_RB_2D_SRC_SOLID_C1                            0x00008c2d
@@ -3922,15 +4704,205 @@ static inline uint32_t A6XX_RB_2D_DST_SIZE_PITCH(uint32_t val)
 
 #define REG_A6XX_RB_UNKNOWN_8E04                               0x00008e04
 
+#define REG_A6XX_RB_ADDR_MODE_CNTL                             0x00008e05
+
 #define REG_A6XX_RB_CCU_CNTL                                   0x00008e07
+#define A6XX_RB_CCU_CNTL_OFFSET__MASK                          0xff800000
+#define A6XX_RB_CCU_CNTL_OFFSET__SHIFT                         23
+static inline uint32_t A6XX_RB_CCU_CNTL_OFFSET(uint32_t val)
+{
+       return ((val >> 12) << A6XX_RB_CCU_CNTL_OFFSET__SHIFT) & A6XX_RB_CCU_CNTL_OFFSET__MASK;
+}
+#define A6XX_RB_CCU_CNTL_GMEM                                  0x00400000
+#define A6XX_RB_CCU_CNTL_UNK2                                  0x00000004
+
+#define REG_A6XX_RB_NC_MODE_CNTL                               0x00008e08
+#define A6XX_RB_NC_MODE_CNTL_MODE                              0x00000001
+#define A6XX_RB_NC_MODE_CNTL_LOWER_BIT__MASK                   0x00000006
+#define A6XX_RB_NC_MODE_CNTL_LOWER_BIT__SHIFT                  1
+static inline uint32_t A6XX_RB_NC_MODE_CNTL_LOWER_BIT(uint32_t val)
+{
+       return ((val) << A6XX_RB_NC_MODE_CNTL_LOWER_BIT__SHIFT) & A6XX_RB_NC_MODE_CNTL_LOWER_BIT__MASK;
+}
+#define A6XX_RB_NC_MODE_CNTL_MIN_ACCESS_LENGTH                 0x00000008
+#define A6XX_RB_NC_MODE_CNTL_AMSBC                             0x00000010
+#define A6XX_RB_NC_MODE_CNTL_UPPER_BIT__MASK                   0x00000400
+#define A6XX_RB_NC_MODE_CNTL_UPPER_BIT__SHIFT                  10
+static inline uint32_t A6XX_RB_NC_MODE_CNTL_UPPER_BIT(uint32_t val)
+{
+       return ((val) << A6XX_RB_NC_MODE_CNTL_UPPER_BIT__SHIFT) & A6XX_RB_NC_MODE_CNTL_UPPER_BIT__MASK;
+}
+#define A6XX_RB_NC_MODE_CNTL_RGB565_PREDICATOR                 0x00000800
+#define A6XX_RB_NC_MODE_CNTL_UNK12__MASK                       0x00003000
+#define A6XX_RB_NC_MODE_CNTL_UNK12__SHIFT                      12
+static inline uint32_t A6XX_RB_NC_MODE_CNTL_UNK12(uint32_t val)
+{
+       return ((val) << A6XX_RB_NC_MODE_CNTL_UNK12__SHIFT) & A6XX_RB_NC_MODE_CNTL_UNK12__MASK;
+}
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_0                           0x00008e10
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_1                           0x00008e11
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_2                           0x00008e12
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_3                           0x00008e13
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_4                           0x00008e14
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_5                           0x00008e15
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_6                           0x00008e16
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_7                           0x00008e17
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_0                          0x00008e18
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_1                          0x00008e19
 
-#define REG_A6XX_VPC_UNKNOWN_9101                              0x00009101
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_2                          0x00008e1a
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_3                          0x00008e1b
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_4                          0x00008e1c
+
+#define REG_A6XX_RB_UNKNOWN_8E28                               0x00008e28
+
+#define REG_A6XX_RB_PERFCTR_CMP_SEL_0                          0x00008e2c
+
+#define REG_A6XX_RB_PERFCTR_CMP_SEL_1                          0x00008e2d
+
+#define REG_A6XX_RB_PERFCTR_CMP_SEL_2                          0x00008e2e
+
+#define REG_A6XX_RB_PERFCTR_CMP_SEL_3                          0x00008e2f
+
+#define REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST                 0x00008e3b
+
+#define REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD                   0x00008e3d
+
+#define REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE           0x00008e50
+
+#define REG_A6XX_RB_UNKNOWN_8E51                               0x00008e51
+#define A6XX_RB_UNKNOWN_8E51__MASK                             0xffffffff
+#define A6XX_RB_UNKNOWN_8E51__SHIFT                            0
+static inline uint32_t A6XX_RB_UNKNOWN_8E51(uint32_t val)
+{
+       return ((val) << A6XX_RB_UNKNOWN_8E51__SHIFT) & A6XX_RB_UNKNOWN_8E51__MASK;
+}
+
+#define REG_A6XX_VPC_UNKNOWN_9100                              0x00009100
+
+#define REG_A6XX_VPC_VS_CLIP_CNTL                              0x00009101
+#define A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__MASK                  0x000000ff
+#define A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__SHIFT                 0
+static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK(uint32_t val)
+{
+       return ((val) << A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__MASK;
+}
+#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK           0x0000ff00
+#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT          8
+static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val)
+{
+       return ((val) << A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK;
+}
+#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK           0x00ff0000
+#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT          16
+static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val)
+{
+       return ((val) << A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK;
+}
+
+#define REG_A6XX_VPC_GS_CLIP_CNTL                              0x00009102
+#define A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__MASK                  0x000000ff
+#define A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__SHIFT                 0
+static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK(uint32_t val)
+{
+       return ((val) << A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__MASK;
+}
+#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK           0x0000ff00
+#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT          8
+static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val)
+{
+       return ((val) << A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK;
+}
+#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK           0x00ff0000
+#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT          16
+static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val)
+{
+       return ((val) << A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK;
+}
+
+#define REG_A6XX_VPC_DS_CLIP_CNTL                              0x00009103
+#define A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__MASK                  0x000000ff
+#define A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__SHIFT                 0
+static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK(uint32_t val)
+{
+       return ((val) << A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__MASK;
+}
+#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK           0x0000ff00
+#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT          8
+static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val)
+{
+       return ((val) << A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK;
+}
+#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK           0x00ff0000
+#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT          16
+static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val)
+{
+       return ((val) << A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK;
+}
+
+#define REG_A6XX_VPC_VS_LAYER_CNTL                             0x00009104
+#define A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__MASK                  0x000000ff
+#define A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__SHIFT                 0
+static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_LAYERLOC(uint32_t val)
+{
+       return ((val) << A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__MASK;
+}
+#define A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__MASK                   0x0000ff00
+#define A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__SHIFT                  8
+static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_VIEWLOC(uint32_t val)
+{
+       return ((val) << A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__MASK;
+}
+
+#define REG_A6XX_VPC_GS_LAYER_CNTL                             0x00009105
+#define A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__MASK                  0x000000ff
+#define A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__SHIFT                 0
+static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_LAYERLOC(uint32_t val)
+{
+       return ((val) << A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__MASK;
+}
+#define A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__MASK                   0x0000ff00
+#define A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__SHIFT                  8
+static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_VIEWLOC(uint32_t val)
+{
+       return ((val) << A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__MASK;
+}
 
-#define REG_A6XX_VPC_GS_SIV_CNTL                               0x00009104
+#define REG_A6XX_VPC_DS_LAYER_CNTL                             0x00009106
+#define A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__MASK                  0x000000ff
+#define A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__SHIFT                 0
+static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_LAYERLOC(uint32_t val)
+{
+       return ((val) << A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__MASK;
+}
+#define A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__MASK                   0x0000ff00
+#define A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__SHIFT                  8
+static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_VIEWLOC(uint32_t val)
+{
+       return ((val) << A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__MASK;
+}
 
 #define REG_A6XX_VPC_UNKNOWN_9107                              0x00009107
 
-#define REG_A6XX_VPC_UNKNOWN_9108                              0x00009108
+#define REG_A6XX_VPC_POLYGON_MODE                              0x00009108
+#define A6XX_VPC_POLYGON_MODE_MODE__MASK                       0x00000003
+#define A6XX_VPC_POLYGON_MODE_MODE__SHIFT                      0
+static inline uint32_t A6XX_VPC_POLYGON_MODE_MODE(enum a6xx_polygon_mode val)
+{
+       return ((val) << A6XX_VPC_POLYGON_MODE_MODE__SHIFT) & A6XX_VPC_POLYGON_MODE_MODE__MASK;
+}
 
 static inline uint32_t REG_A6XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00009200 + 0x1*i0; }
 
@@ -3949,6 +4921,12 @@ static inline uint32_t REG_A6XX_VPC_VAR(uint32_t i0) { return 0x00009212 + 0x1*i
 static inline uint32_t REG_A6XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x00009212 + 0x1*i0; }
 
 #define REG_A6XX_VPC_SO_CNTL                                   0x00009216
+#define A6XX_VPC_SO_CNTL_UNK0__MASK                            0x000000ff
+#define A6XX_VPC_SO_CNTL_UNK0__SHIFT                           0
+static inline uint32_t A6XX_VPC_SO_CNTL_UNK0(uint32_t val)
+{
+       return ((val) << A6XX_VPC_SO_CNTL_UNK0__SHIFT) & A6XX_VPC_SO_CNTL_UNK0__MASK;
+}
 #define A6XX_VPC_SO_CNTL_ENABLE                                        0x00010000
 
 #define REG_A6XX_VPC_SO_PROG                                   0x00009217
@@ -3979,44 +4957,143 @@ static inline uint32_t A6XX_VPC_SO_PROG_B_OFF(uint32_t val)
 }
 #define A6XX_VPC_SO_PROG_B_EN                                  0x00800000
 
+#define REG_A6XX_VPC_SO_STREAM_COUNTS_LO                       0x00009218
+
+#define REG_A6XX_VPC_SO_STREAM_COUNTS_HI                       0x00009219
+
+#define REG_A6XX_VPC_SO_STREAM_COUNTS                          0x00009218
+#define A6XX_VPC_SO_STREAM_COUNTS__MASK                                0xffffffff
+#define A6XX_VPC_SO_STREAM_COUNTS__SHIFT                       0
+static inline uint32_t A6XX_VPC_SO_STREAM_COUNTS(uint32_t val)
+{
+       return ((val) << A6XX_VPC_SO_STREAM_COUNTS__SHIFT) & A6XX_VPC_SO_STREAM_COUNTS__MASK;
+}
+
 static inline uint32_t REG_A6XX_VPC_SO(uint32_t i0) { return 0x0000921a + 0x7*i0; }
 
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_BASE(uint32_t i0) { return 0x0000921a + 0x7*i0; }
+#define A6XX_VPC_SO_BUFFER_BASE__MASK                          0xffffffff
+#define A6XX_VPC_SO_BUFFER_BASE__SHIFT                         0
+static inline uint32_t A6XX_VPC_SO_BUFFER_BASE(uint32_t val)
+{
+       return ((val) << A6XX_VPC_SO_BUFFER_BASE__SHIFT) & A6XX_VPC_SO_BUFFER_BASE__MASK;
+}
+
 static inline uint32_t REG_A6XX_VPC_SO_BUFFER_BASE_LO(uint32_t i0) { return 0x0000921a + 0x7*i0; }
 
 static inline uint32_t REG_A6XX_VPC_SO_BUFFER_BASE_HI(uint32_t i0) { return 0x0000921b + 0x7*i0; }
 
 static inline uint32_t REG_A6XX_VPC_SO_BUFFER_SIZE(uint32_t i0) { return 0x0000921c + 0x7*i0; }
+#define A6XX_VPC_SO_BUFFER_SIZE__MASK                          0xfffffffc
+#define A6XX_VPC_SO_BUFFER_SIZE__SHIFT                         2
+static inline uint32_t A6XX_VPC_SO_BUFFER_SIZE(uint32_t val)
+{
+       return ((val >> 2) << A6XX_VPC_SO_BUFFER_SIZE__SHIFT) & A6XX_VPC_SO_BUFFER_SIZE__MASK;
+}
 
 static inline uint32_t REG_A6XX_VPC_SO_NCOMP(uint32_t i0) { return 0x0000921d + 0x7*i0; }
 
 static inline uint32_t REG_A6XX_VPC_SO_BUFFER_OFFSET(uint32_t i0) { return 0x0000921e + 0x7*i0; }
+#define A6XX_VPC_SO_BUFFER_OFFSET__MASK                                0xfffffffc
+#define A6XX_VPC_SO_BUFFER_OFFSET__SHIFT                       2
+static inline uint32_t A6XX_VPC_SO_BUFFER_OFFSET(uint32_t val)
+{
+       return ((val >> 2) << A6XX_VPC_SO_BUFFER_OFFSET__SHIFT) & A6XX_VPC_SO_BUFFER_OFFSET__MASK;
+}
+
+static inline uint32_t REG_A6XX_VPC_SO_FLUSH_BASE(uint32_t i0) { return 0x0000921f + 0x7*i0; }
+#define A6XX_VPC_SO_FLUSH_BASE__MASK                           0xffffffff
+#define A6XX_VPC_SO_FLUSH_BASE__SHIFT                          0
+static inline uint32_t A6XX_VPC_SO_FLUSH_BASE(uint32_t val)
+{
+       return ((val) << A6XX_VPC_SO_FLUSH_BASE__SHIFT) & A6XX_VPC_SO_FLUSH_BASE__MASK;
+}
 
 static inline uint32_t REG_A6XX_VPC_SO_FLUSH_BASE_LO(uint32_t i0) { return 0x0000921f + 0x7*i0; }
 
 static inline uint32_t REG_A6XX_VPC_SO_FLUSH_BASE_HI(uint32_t i0) { return 0x00009220 + 0x7*i0; }
 
-#define REG_A6XX_VPC_UNKNOWN_9236                              0x00009236
+#define REG_A6XX_VPC_POINT_COORD_INVERT                                0x00009236
+#define A6XX_VPC_POINT_COORD_INVERT_INVERT                     0x00000001
 
 #define REG_A6XX_VPC_UNKNOWN_9300                              0x00009300
 
-#define REG_A6XX_VPC_PACK                                      0x00009301
-#define A6XX_VPC_PACK_STRIDE_IN_VPC__MASK                      0x000000ff
-#define A6XX_VPC_PACK_STRIDE_IN_VPC__SHIFT                     0
-static inline uint32_t A6XX_VPC_PACK_STRIDE_IN_VPC(uint32_t val)
+#define REG_A6XX_VPC_VS_PACK                                   0x00009301
+#define A6XX_VPC_VS_PACK_STRIDE_IN_VPC__MASK                   0x000000ff
+#define A6XX_VPC_VS_PACK_STRIDE_IN_VPC__SHIFT                  0
+static inline uint32_t A6XX_VPC_VS_PACK_STRIDE_IN_VPC(uint32_t val)
+{
+       return ((val) << A6XX_VPC_VS_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_VS_PACK_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_VPC_VS_PACK_POSITIONLOC__MASK                     0x0000ff00
+#define A6XX_VPC_VS_PACK_POSITIONLOC__SHIFT                    8
+static inline uint32_t A6XX_VPC_VS_PACK_POSITIONLOC(uint32_t val)
+{
+       return ((val) << A6XX_VPC_VS_PACK_POSITIONLOC__SHIFT) & A6XX_VPC_VS_PACK_POSITIONLOC__MASK;
+}
+#define A6XX_VPC_VS_PACK_PSIZELOC__MASK                                0x00ff0000
+#define A6XX_VPC_VS_PACK_PSIZELOC__SHIFT                       16
+static inline uint32_t A6XX_VPC_VS_PACK_PSIZELOC(uint32_t val)
+{
+       return ((val) << A6XX_VPC_VS_PACK_PSIZELOC__SHIFT) & A6XX_VPC_VS_PACK_PSIZELOC__MASK;
+}
+#define A6XX_VPC_VS_PACK_UNK24__MASK                           0x0f000000
+#define A6XX_VPC_VS_PACK_UNK24__SHIFT                          24
+static inline uint32_t A6XX_VPC_VS_PACK_UNK24(uint32_t val)
+{
+       return ((val) << A6XX_VPC_VS_PACK_UNK24__SHIFT) & A6XX_VPC_VS_PACK_UNK24__MASK;
+}
+
+#define REG_A6XX_VPC_GS_PACK                                   0x00009302
+#define A6XX_VPC_GS_PACK_STRIDE_IN_VPC__MASK                   0x000000ff
+#define A6XX_VPC_GS_PACK_STRIDE_IN_VPC__SHIFT                  0
+static inline uint32_t A6XX_VPC_GS_PACK_STRIDE_IN_VPC(uint32_t val)
+{
+       return ((val) << A6XX_VPC_GS_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_GS_PACK_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_VPC_GS_PACK_POSITIONLOC__MASK                     0x0000ff00
+#define A6XX_VPC_GS_PACK_POSITIONLOC__SHIFT                    8
+static inline uint32_t A6XX_VPC_GS_PACK_POSITIONLOC(uint32_t val)
+{
+       return ((val) << A6XX_VPC_GS_PACK_POSITIONLOC__SHIFT) & A6XX_VPC_GS_PACK_POSITIONLOC__MASK;
+}
+#define A6XX_VPC_GS_PACK_PSIZELOC__MASK                                0x00ff0000
+#define A6XX_VPC_GS_PACK_PSIZELOC__SHIFT                       16
+static inline uint32_t A6XX_VPC_GS_PACK_PSIZELOC(uint32_t val)
+{
+       return ((val) << A6XX_VPC_GS_PACK_PSIZELOC__SHIFT) & A6XX_VPC_GS_PACK_PSIZELOC__MASK;
+}
+#define A6XX_VPC_GS_PACK_UNK24__MASK                           0x0f000000
+#define A6XX_VPC_GS_PACK_UNK24__SHIFT                          24
+static inline uint32_t A6XX_VPC_GS_PACK_UNK24(uint32_t val)
+{
+       return ((val) << A6XX_VPC_GS_PACK_UNK24__SHIFT) & A6XX_VPC_GS_PACK_UNK24__MASK;
+}
+
+#define REG_A6XX_VPC_DS_PACK                                   0x00009303
+#define A6XX_VPC_DS_PACK_STRIDE_IN_VPC__MASK                   0x000000ff
+#define A6XX_VPC_DS_PACK_STRIDE_IN_VPC__SHIFT                  0
+static inline uint32_t A6XX_VPC_DS_PACK_STRIDE_IN_VPC(uint32_t val)
 {
-       return ((val) << A6XX_VPC_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_PACK_STRIDE_IN_VPC__MASK;
+       return ((val) << A6XX_VPC_DS_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_DS_PACK_STRIDE_IN_VPC__MASK;
 }
-#define A6XX_VPC_PACK_NUMNONPOSVAR__MASK                       0x0000ff00
-#define A6XX_VPC_PACK_NUMNONPOSVAR__SHIFT                      8
-static inline uint32_t A6XX_VPC_PACK_NUMNONPOSVAR(uint32_t val)
+#define A6XX_VPC_DS_PACK_POSITIONLOC__MASK                     0x0000ff00
+#define A6XX_VPC_DS_PACK_POSITIONLOC__SHIFT                    8
+static inline uint32_t A6XX_VPC_DS_PACK_POSITIONLOC(uint32_t val)
 {
-       return ((val) << A6XX_VPC_PACK_NUMNONPOSVAR__SHIFT) & A6XX_VPC_PACK_NUMNONPOSVAR__MASK;
+       return ((val) << A6XX_VPC_DS_PACK_POSITIONLOC__SHIFT) & A6XX_VPC_DS_PACK_POSITIONLOC__MASK;
 }
-#define A6XX_VPC_PACK_PSIZELOC__MASK                           0x00ff0000
-#define A6XX_VPC_PACK_PSIZELOC__SHIFT                          16
-static inline uint32_t A6XX_VPC_PACK_PSIZELOC(uint32_t val)
+#define A6XX_VPC_DS_PACK_PSIZELOC__MASK                                0x00ff0000
+#define A6XX_VPC_DS_PACK_PSIZELOC__SHIFT                       16
+static inline uint32_t A6XX_VPC_DS_PACK_PSIZELOC(uint32_t val)
 {
-       return ((val) << A6XX_VPC_PACK_PSIZELOC__SHIFT) & A6XX_VPC_PACK_PSIZELOC__MASK;
+       return ((val) << A6XX_VPC_DS_PACK_PSIZELOC__SHIFT) & A6XX_VPC_DS_PACK_PSIZELOC__MASK;
+}
+#define A6XX_VPC_DS_PACK_UNK24__MASK                           0x0f000000
+#define A6XX_VPC_DS_PACK_UNK24__SHIFT                          24
+static inline uint32_t A6XX_VPC_DS_PACK_UNK24(uint32_t val)
+{
+       return ((val) << A6XX_VPC_DS_PACK_UNK24__SHIFT) & A6XX_VPC_DS_PACK_UNK24__MASK;
 }
 
 #define REG_A6XX_VPC_CNTL_0                                    0x00009304
@@ -4026,7 +5103,19 @@ static inline uint32_t A6XX_VPC_CNTL_0_NUMNONPOSVAR(uint32_t val)
 {
        return ((val) << A6XX_VPC_CNTL_0_NUMNONPOSVAR__SHIFT) & A6XX_VPC_CNTL_0_NUMNONPOSVAR__MASK;
 }
+#define A6XX_VPC_CNTL_0_PRIMIDLOC__MASK                                0x0000ff00
+#define A6XX_VPC_CNTL_0_PRIMIDLOC__SHIFT                       8
+static inline uint32_t A6XX_VPC_CNTL_0_PRIMIDLOC(uint32_t val)
+{
+       return ((val) << A6XX_VPC_CNTL_0_PRIMIDLOC__SHIFT) & A6XX_VPC_CNTL_0_PRIMIDLOC__MASK;
+}
 #define A6XX_VPC_CNTL_0_VARYING                                        0x00010000
+#define A6XX_VPC_CNTL_0_UNKLOC__MASK                           0xff000000
+#define A6XX_VPC_CNTL_0_UNKLOC__SHIFT                          24
+static inline uint32_t A6XX_VPC_CNTL_0_UNKLOC(uint32_t val)
+{
+       return ((val) << A6XX_VPC_CNTL_0_UNKLOC__SHIFT) & A6XX_VPC_CNTL_0_UNKLOC__MASK;
+}
 
 #define REG_A6XX_VPC_SO_BUF_CNTL                               0x00009305
 #define A6XX_VPC_SO_BUF_CNTL_BUF0                              0x00000001
@@ -4034,15 +5123,65 @@ static inline uint32_t A6XX_VPC_CNTL_0_NUMNONPOSVAR(uint32_t val)
 #define A6XX_VPC_SO_BUF_CNTL_BUF2                              0x00000040
 #define A6XX_VPC_SO_BUF_CNTL_BUF3                              0x00000200
 #define A6XX_VPC_SO_BUF_CNTL_ENABLE                            0x00008000
+#define A6XX_VPC_SO_BUF_CNTL_UNK16__MASK                       0x000f0000
+#define A6XX_VPC_SO_BUF_CNTL_UNK16__SHIFT                      16
+static inline uint32_t A6XX_VPC_SO_BUF_CNTL_UNK16(uint32_t val)
+{
+       return ((val) << A6XX_VPC_SO_BUF_CNTL_UNK16__SHIFT) & A6XX_VPC_SO_BUF_CNTL_UNK16__MASK;
+}
 
-#define REG_A6XX_VPC_SO_OVERRIDE                               0x00009306
-#define A6XX_VPC_SO_OVERRIDE_SO_DISABLE                                0x00000001
+#define REG_A6XX_VPC_SO_DISABLE                                        0x00009306
+#define A6XX_VPC_SO_DISABLE_DISABLE                            0x00000001
 
 #define REG_A6XX_VPC_UNKNOWN_9600                              0x00009600
 
+#define REG_A6XX_VPC_ADDR_MODE_CNTL                            0x00009601
+
 #define REG_A6XX_VPC_UNKNOWN_9602                              0x00009602
 
+#define REG_A6XX_VPC_UNKNOWN_9603                              0x00009603
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_0                         0x00009604
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_1                         0x00009605
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_2                         0x00009606
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_3                         0x00009607
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_4                         0x00009608
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_5                         0x00009609
+
+#define REG_A6XX_PC_TESS_NUM_VERTEX                            0x00009800
+
 #define REG_A6XX_PC_UNKNOWN_9801                               0x00009801
+#define A6XX_PC_UNKNOWN_9801_UNK0__MASK                                0x000007ff
+#define A6XX_PC_UNKNOWN_9801_UNK0__SHIFT                       0
+static inline uint32_t A6XX_PC_UNKNOWN_9801_UNK0(uint32_t val)
+{
+       return ((val) << A6XX_PC_UNKNOWN_9801_UNK0__SHIFT) & A6XX_PC_UNKNOWN_9801_UNK0__MASK;
+}
+#define A6XX_PC_UNKNOWN_9801_UNK13__MASK                       0x00002000
+#define A6XX_PC_UNKNOWN_9801_UNK13__SHIFT                      13
+static inline uint32_t A6XX_PC_UNKNOWN_9801_UNK13(uint32_t val)
+{
+       return ((val) << A6XX_PC_UNKNOWN_9801_UNK13__SHIFT) & A6XX_PC_UNKNOWN_9801_UNK13__MASK;
+}
+
+#define REG_A6XX_PC_TESS_CNTL                                  0x00009802
+#define A6XX_PC_TESS_CNTL_SPACING__MASK                                0x00000003
+#define A6XX_PC_TESS_CNTL_SPACING__SHIFT                       0
+static inline uint32_t A6XX_PC_TESS_CNTL_SPACING(enum a6xx_tess_spacing val)
+{
+       return ((val) << A6XX_PC_TESS_CNTL_SPACING__SHIFT) & A6XX_PC_TESS_CNTL_SPACING__MASK;
+}
+#define A6XX_PC_TESS_CNTL_OUTPUT__MASK                         0x0000000c
+#define A6XX_PC_TESS_CNTL_OUTPUT__SHIFT                                2
+static inline uint32_t A6XX_PC_TESS_CNTL_OUTPUT(enum a6xx_tess_output val)
+{
+       return ((val) << A6XX_PC_TESS_CNTL_OUTPUT__SHIFT) & A6XX_PC_TESS_CNTL_OUTPUT__MASK;
+}
 
 #define REG_A6XX_PC_RESTART_INDEX                              0x00009803
 
@@ -4050,43 +5189,244 @@ static inline uint32_t A6XX_VPC_CNTL_0_NUMNONPOSVAR(uint32_t val)
 
 #define REG_A6XX_PC_UNKNOWN_9805                               0x00009805
 
-#define REG_A6XX_PC_UNKNOWN_9806                               0x00009806
+#define REG_A6XX_PC_PRIMID_PASSTHRU                            0x00009806
 
-#define REG_A6XX_PC_UNKNOWN_9980                               0x00009980
+#define REG_A6XX_PC_DRAW_CMD                                   0x00009840
+#define A6XX_PC_DRAW_CMD_STATE_ID__MASK                                0x000000ff
+#define A6XX_PC_DRAW_CMD_STATE_ID__SHIFT                       0
+static inline uint32_t A6XX_PC_DRAW_CMD_STATE_ID(uint32_t val)
+{
+       return ((val) << A6XX_PC_DRAW_CMD_STATE_ID__SHIFT) & A6XX_PC_DRAW_CMD_STATE_ID__MASK;
+}
 
-#define REG_A6XX_PC_UNKNOWN_9981                               0x00009981
+#define REG_A6XX_PC_DISPATCH_CMD                               0x00009841
+#define A6XX_PC_DISPATCH_CMD_STATE_ID__MASK                    0x000000ff
+#define A6XX_PC_DISPATCH_CMD_STATE_ID__SHIFT                   0
+static inline uint32_t A6XX_PC_DISPATCH_CMD_STATE_ID(uint32_t val)
+{
+       return ((val) << A6XX_PC_DISPATCH_CMD_STATE_ID__SHIFT) & A6XX_PC_DISPATCH_CMD_STATE_ID__MASK;
+}
 
-#define REG_A6XX_PC_UNKNOWN_9990                               0x00009990
+#define REG_A6XX_PC_EVENT_CMD                                  0x00009842
+#define A6XX_PC_EVENT_CMD_STATE_ID__MASK                       0x00ff0000
+#define A6XX_PC_EVENT_CMD_STATE_ID__SHIFT                      16
+static inline uint32_t A6XX_PC_EVENT_CMD_STATE_ID(uint32_t val)
+{
+       return ((val) << A6XX_PC_EVENT_CMD_STATE_ID__SHIFT) & A6XX_PC_EVENT_CMD_STATE_ID__MASK;
+}
+#define A6XX_PC_EVENT_CMD_EVENT__MASK                          0x0000007f
+#define A6XX_PC_EVENT_CMD_EVENT__SHIFT                         0
+static inline uint32_t A6XX_PC_EVENT_CMD_EVENT(enum vgt_event_type val)
+{
+       return ((val) << A6XX_PC_EVENT_CMD_EVENT__SHIFT) & A6XX_PC_EVENT_CMD_EVENT__MASK;
+}
+
+#define REG_A6XX_PC_POLYGON_MODE                               0x00009981
+#define A6XX_PC_POLYGON_MODE_MODE__MASK                                0x00000003
+#define A6XX_PC_POLYGON_MODE_MODE__SHIFT                       0
+static inline uint32_t A6XX_PC_POLYGON_MODE_MODE(enum a6xx_polygon_mode val)
+{
+       return ((val) << A6XX_PC_POLYGON_MODE_MODE__SHIFT) & A6XX_PC_POLYGON_MODE_MODE__MASK;
+}
+
+#define REG_A6XX_PC_UNKNOWN_9980                               0x00009980
 
 #define REG_A6XX_PC_PRIMITIVE_CNTL_0                           0x00009b00
 #define A6XX_PC_PRIMITIVE_CNTL_0_PRIMITIVE_RESTART             0x00000001
 #define A6XX_PC_PRIMITIVE_CNTL_0_PROVOKING_VTX_LAST            0x00000002
+#define A6XX_PC_PRIMITIVE_CNTL_0_TESS_UPPER_LEFT_DOMAIN_ORIGIN 0x00000004
+#define A6XX_PC_PRIMITIVE_CNTL_0_UNK3                          0x00000008
+
+#define REG_A6XX_PC_VS_OUT_CNTL                                        0x00009b01
+#define A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__MASK                        0x000000ff
+#define A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__SHIFT               0
+static inline uint32_t A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+       return ((val) << A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_PC_VS_OUT_CNTL_PSIZE                              0x00000100
+#define A6XX_PC_VS_OUT_CNTL_LAYER                              0x00000200
+#define A6XX_PC_VS_OUT_CNTL_VIEW                               0x00000400
+#define A6XX_PC_VS_OUT_CNTL_PRIMITIVE_ID                       0x00000800
+#define A6XX_PC_VS_OUT_CNTL_CLIP_MASK__MASK                    0x00ff0000
+#define A6XX_PC_VS_OUT_CNTL_CLIP_MASK__SHIFT                   16
+static inline uint32_t A6XX_PC_VS_OUT_CNTL_CLIP_MASK(uint32_t val)
+{
+       return ((val) << A6XX_PC_VS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_VS_OUT_CNTL_CLIP_MASK__MASK;
+}
+
+#define REG_A6XX_PC_GS_OUT_CNTL                                        0x00009b02
+#define A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__MASK                        0x000000ff
+#define A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__SHIFT               0
+static inline uint32_t A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+       return ((val) << A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_PC_GS_OUT_CNTL_PSIZE                              0x00000100
+#define A6XX_PC_GS_OUT_CNTL_LAYER                              0x00000200
+#define A6XX_PC_GS_OUT_CNTL_VIEW                               0x00000400
+#define A6XX_PC_GS_OUT_CNTL_PRIMITIVE_ID                       0x00000800
+#define A6XX_PC_GS_OUT_CNTL_CLIP_MASK__MASK                    0x00ff0000
+#define A6XX_PC_GS_OUT_CNTL_CLIP_MASK__SHIFT                   16
+static inline uint32_t A6XX_PC_GS_OUT_CNTL_CLIP_MASK(uint32_t val)
+{
+       return ((val) << A6XX_PC_GS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_GS_OUT_CNTL_CLIP_MASK__MASK;
+}
+
+#define REG_A6XX_PC_PRIMITIVE_CNTL_3                           0x00009b03
+
+#define REG_A6XX_PC_DS_OUT_CNTL                                        0x00009b04
+#define A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__MASK                        0x000000ff
+#define A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__SHIFT               0
+static inline uint32_t A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+       return ((val) << A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_PC_DS_OUT_CNTL_PSIZE                              0x00000100
+#define A6XX_PC_DS_OUT_CNTL_LAYER                              0x00000200
+#define A6XX_PC_DS_OUT_CNTL_VIEW                               0x00000400
+#define A6XX_PC_DS_OUT_CNTL_PRIMITIVE_ID                       0x00000800
+#define A6XX_PC_DS_OUT_CNTL_CLIP_MASK__MASK                    0x00ff0000
+#define A6XX_PC_DS_OUT_CNTL_CLIP_MASK__SHIFT                   16
+static inline uint32_t A6XX_PC_DS_OUT_CNTL_CLIP_MASK(uint32_t val)
+{
+       return ((val) << A6XX_PC_DS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_DS_OUT_CNTL_CLIP_MASK__MASK;
+}
 
-#define REG_A6XX_PC_PRIMITIVE_CNTL_1                           0x00009b01
-#define A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__MASK           0x0000007f
-#define A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__SHIFT          0
-static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC(uint32_t val)
+#define REG_A6XX_PC_PRIMITIVE_CNTL_5                           0x00009b05
+#define A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__MASK         0x000000ff
+#define A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__SHIFT                0
+static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT(uint32_t val)
+{
+       return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__MASK;
+}
+#define A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__MASK          0x00007c00
+#define A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__SHIFT         10
+static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS(uint32_t val)
 {
-       return ((val) << A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__MASK;
+       return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__MASK;
+}
+#define A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__MASK               0x00030000
+#define A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__SHIFT              16
+static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT(enum a6xx_tess_output val)
+{
+       return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__MASK;
+}
+#define A6XX_PC_PRIMITIVE_CNTL_5_UNK18__MASK                   0x00040000
+#define A6XX_PC_PRIMITIVE_CNTL_5_UNK18__SHIFT                  18
+static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_UNK18(uint32_t val)
+{
+       return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_UNK18__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_UNK18__MASK;
 }
-#define A6XX_PC_PRIMITIVE_CNTL_1_PSIZE                         0x00000100
 
-#define REG_A6XX_PC_UNKNOWN_9B06                               0x00009b06
+#define REG_A6XX_PC_PRIMITIVE_CNTL_6                           0x00009b06
+#define A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__MASK           0x000007ff
+#define A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__SHIFT          0
+static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC(uint32_t val)
+{
+       return ((val) << A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__MASK;
+}
 
 #define REG_A6XX_PC_UNKNOWN_9B07                               0x00009b07
 
+#define REG_A6XX_PC_UNKNOWN_9B08                               0x00009b08
+
+#define REG_A6XX_PC_2D_EVENT_CMD                               0x00009c00
+#define A6XX_PC_2D_EVENT_CMD_EVENT__MASK                       0x0000007f
+#define A6XX_PC_2D_EVENT_CMD_EVENT__SHIFT                      0
+static inline uint32_t A6XX_PC_2D_EVENT_CMD_EVENT(enum vgt_event_type val)
+{
+       return ((val) << A6XX_PC_2D_EVENT_CMD_EVENT__SHIFT) & A6XX_PC_2D_EVENT_CMD_EVENT__MASK;
+}
+#define A6XX_PC_2D_EVENT_CMD_STATE_ID__MASK                    0x0000ff00
+#define A6XX_PC_2D_EVENT_CMD_STATE_ID__SHIFT                   8
+static inline uint32_t A6XX_PC_2D_EVENT_CMD_STATE_ID(uint32_t val)
+{
+       return ((val) << A6XX_PC_2D_EVENT_CMD_STATE_ID__SHIFT) & A6XX_PC_2D_EVENT_CMD_STATE_ID__MASK;
+}
+
+#define REG_A6XX_PC_DBG_ECO_CNTL                               0x00009e00
+
+#define REG_A6XX_PC_ADDR_MODE_CNTL                             0x00009e01
+
 #define REG_A6XX_PC_TESSFACTOR_ADDR_LO                         0x00009e08
 
 #define REG_A6XX_PC_TESSFACTOR_ADDR_HI                         0x00009e09
 
+#define REG_A6XX_PC_TESSFACTOR_ADDR                            0x00009e08
+#define A6XX_PC_TESSFACTOR_ADDR__MASK                          0xffffffff
+#define A6XX_PC_TESSFACTOR_ADDR__SHIFT                         0
+static inline uint32_t A6XX_PC_TESSFACTOR_ADDR(uint32_t val)
+{
+       return ((val) << A6XX_PC_TESSFACTOR_ADDR__SHIFT) & A6XX_PC_TESSFACTOR_ADDR__MASK;
+}
+
+#define REG_A6XX_PC_VSTREAM_CONTROL                            0x00009e11
+#define A6XX_PC_VSTREAM_CONTROL_UNK0__MASK                     0x0000ffff
+#define A6XX_PC_VSTREAM_CONTROL_UNK0__SHIFT                    0
+static inline uint32_t A6XX_PC_VSTREAM_CONTROL_UNK0(uint32_t val)
+{
+       return ((val) << A6XX_PC_VSTREAM_CONTROL_UNK0__SHIFT) & A6XX_PC_VSTREAM_CONTROL_UNK0__MASK;
+}
+#define A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__MASK                 0x003f0000
+#define A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__SHIFT                        16
+static inline uint32_t A6XX_PC_VSTREAM_CONTROL_VSC_SIZE(uint32_t val)
+{
+       return ((val) << A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__SHIFT) & A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__MASK;
+}
+#define A6XX_PC_VSTREAM_CONTROL_VSC_N__MASK                    0x07c00000
+#define A6XX_PC_VSTREAM_CONTROL_VSC_N__SHIFT                   22
+static inline uint32_t A6XX_PC_VSTREAM_CONTROL_VSC_N(uint32_t val)
+{
+       return ((val) << A6XX_PC_VSTREAM_CONTROL_VSC_N__SHIFT) & A6XX_PC_VSTREAM_CONTROL_VSC_N__MASK;
+}
+
+#define REG_A6XX_PC_BIN_PRIM_STRM                              0x00009e12
+#define A6XX_PC_BIN_PRIM_STRM__MASK                            0xffffffff
+#define A6XX_PC_BIN_PRIM_STRM__SHIFT                           0
+static inline uint32_t A6XX_PC_BIN_PRIM_STRM(uint32_t val)
+{
+       return ((val) << A6XX_PC_BIN_PRIM_STRM__SHIFT) & A6XX_PC_BIN_PRIM_STRM__MASK;
+}
+
+#define REG_A6XX_PC_BIN_DRAW_STRM                              0x00009e14
+#define A6XX_PC_BIN_DRAW_STRM__MASK                            0xffffffff
+#define A6XX_PC_BIN_DRAW_STRM__SHIFT                           0
+static inline uint32_t A6XX_PC_BIN_DRAW_STRM(uint32_t val)
+{
+       return ((val) << A6XX_PC_BIN_DRAW_STRM__SHIFT) & A6XX_PC_BIN_DRAW_STRM__MASK;
+}
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_0                           0x00009e34
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_1                           0x00009e35
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_2                           0x00009e36
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_3                           0x00009e37
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_4                           0x00009e38
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_5                           0x00009e39
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_6                           0x00009e3a
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_7                           0x00009e3b
+
 #define REG_A6XX_PC_UNKNOWN_9E72                               0x00009e72
 
 #define REG_A6XX_VFD_CONTROL_0                                 0x0000a000
-#define A6XX_VFD_CONTROL_0_VTXCNT__MASK                                0x0000003f
-#define A6XX_VFD_CONTROL_0_VTXCNT__SHIFT                       0
-static inline uint32_t A6XX_VFD_CONTROL_0_VTXCNT(uint32_t val)
+#define A6XX_VFD_CONTROL_0_FETCH_CNT__MASK                     0x0000003f
+#define A6XX_VFD_CONTROL_0_FETCH_CNT__SHIFT                    0
+static inline uint32_t A6XX_VFD_CONTROL_0_FETCH_CNT(uint32_t val)
 {
-       return ((val) << A6XX_VFD_CONTROL_0_VTXCNT__SHIFT) & A6XX_VFD_CONTROL_0_VTXCNT__MASK;
+       return ((val) << A6XX_VFD_CONTROL_0_FETCH_CNT__SHIFT) & A6XX_VFD_CONTROL_0_FETCH_CNT__MASK;
+}
+#define A6XX_VFD_CONTROL_0_DECODE_CNT__MASK                    0x00003f00
+#define A6XX_VFD_CONTROL_0_DECODE_CNT__SHIFT                   8
+static inline uint32_t A6XX_VFD_CONTROL_0_DECODE_CNT(uint32_t val)
+{
+       return ((val) << A6XX_VFD_CONTROL_0_DECODE_CNT__SHIFT) & A6XX_VFD_CONTROL_0_DECODE_CNT__MASK;
 }
 
 #define REG_A6XX_VFD_CONTROL_1                                 0x0000a001
@@ -4110,19 +5450,25 @@ static inline uint32_t A6XX_VFD_CONTROL_1_REGID4PRIMID(uint32_t val)
 }
 
 #define REG_A6XX_VFD_CONTROL_2                                 0x0000a002
-#define A6XX_VFD_CONTROL_2_REGID_PATCHID__MASK                 0x000000ff
-#define A6XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT                        0
-static inline uint32_t A6XX_VFD_CONTROL_2_REGID_PATCHID(uint32_t val)
+#define A6XX_VFD_CONTROL_2_REGID_HSPATCHID__MASK               0x000000ff
+#define A6XX_VFD_CONTROL_2_REGID_HSPATCHID__SHIFT              0
+static inline uint32_t A6XX_VFD_CONTROL_2_REGID_HSPATCHID(uint32_t val)
+{
+       return ((val) << A6XX_VFD_CONTROL_2_REGID_HSPATCHID__SHIFT) & A6XX_VFD_CONTROL_2_REGID_HSPATCHID__MASK;
+}
+#define A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__MASK            0x0000ff00
+#define A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__SHIFT           8
+static inline uint32_t A6XX_VFD_CONTROL_2_REGID_INVOCATIONID(uint32_t val)
 {
-       return ((val) << A6XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT) & A6XX_VFD_CONTROL_2_REGID_PATCHID__MASK;
+       return ((val) << A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__SHIFT) & A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__MASK;
 }
 
 #define REG_A6XX_VFD_CONTROL_3                                 0x0000a003
-#define A6XX_VFD_CONTROL_3_REGID_PATCHID__MASK                 0x0000ff00
-#define A6XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT                        8
-static inline uint32_t A6XX_VFD_CONTROL_3_REGID_PATCHID(uint32_t val)
+#define A6XX_VFD_CONTROL_3_REGID_DSPATCHID__MASK               0x0000ff00
+#define A6XX_VFD_CONTROL_3_REGID_DSPATCHID__SHIFT              8
+static inline uint32_t A6XX_VFD_CONTROL_3_REGID_DSPATCHID(uint32_t val)
 {
-       return ((val) << A6XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT) & A6XX_VFD_CONTROL_3_REGID_PATCHID__MASK;
+       return ((val) << A6XX_VFD_CONTROL_3_REGID_DSPATCHID__SHIFT) & A6XX_VFD_CONTROL_3_REGID_DSPATCHID__MASK;
 }
 #define A6XX_VFD_CONTROL_3_REGID_TESSX__MASK                   0x00ff0000
 #define A6XX_VFD_CONTROL_3_REGID_TESSX__SHIFT                  16
@@ -4140,15 +5486,24 @@ static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
 #define REG_A6XX_VFD_CONTROL_4                                 0x0000a004
 
 #define REG_A6XX_VFD_CONTROL_5                                 0x0000a005
+#define A6XX_VFD_CONTROL_5_REGID_GSHEADER__MASK                        0x000000ff
+#define A6XX_VFD_CONTROL_5_REGID_GSHEADER__SHIFT               0
+static inline uint32_t A6XX_VFD_CONTROL_5_REGID_GSHEADER(uint32_t val)
+{
+       return ((val) << A6XX_VFD_CONTROL_5_REGID_GSHEADER__SHIFT) & A6XX_VFD_CONTROL_5_REGID_GSHEADER__MASK;
+}
 
 #define REG_A6XX_VFD_CONTROL_6                                 0x0000a006
+#define A6XX_VFD_CONTROL_6_PRIMID_PASSTHRU                     0x00000001
 
 #define REG_A6XX_VFD_MODE_CNTL                                 0x0000a007
 #define A6XX_VFD_MODE_CNTL_BINNING_PASS                                0x00000001
 
 #define REG_A6XX_VFD_UNKNOWN_A008                              0x0000a008
 
-#define REG_A6XX_VFD_UNKNOWN_A009                              0x0000a009
+#define REG_A6XX_VFD_ADD_OFFSET                                        0x0000a009
+#define A6XX_VFD_ADD_OFFSET_VERTEX                             0x00000001
+#define A6XX_VFD_ADD_OFFSET_INSTANCE                           0x00000002
 
 #define REG_A6XX_VFD_INDEX_OFFSET                              0x0000a00e
 
@@ -4156,6 +5511,8 @@ static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
 
 static inline uint32_t REG_A6XX_VFD_FETCH(uint32_t i0) { return 0x0000a010 + 0x4*i0; }
 
+static inline uint32_t REG_A6XX_VFD_FETCH_BASE(uint32_t i0) { return 0x0000a010 + 0x4*i0; }
+
 static inline uint32_t REG_A6XX_VFD_FETCH_BASE_LO(uint32_t i0) { return 0x0000a010 + 0x4*i0; }
 
 static inline uint32_t REG_A6XX_VFD_FETCH_BASE_HI(uint32_t i0) { return 0x0000a011 + 0x4*i0; }
@@ -4173,10 +5530,16 @@ static inline uint32_t A6XX_VFD_DECODE_INSTR_IDX(uint32_t val)
 {
        return ((val) << A6XX_VFD_DECODE_INSTR_IDX__SHIFT) & A6XX_VFD_DECODE_INSTR_IDX__MASK;
 }
+#define A6XX_VFD_DECODE_INSTR_OFFSET__MASK                     0x0001ffe0
+#define A6XX_VFD_DECODE_INSTR_OFFSET__SHIFT                    5
+static inline uint32_t A6XX_VFD_DECODE_INSTR_OFFSET(uint32_t val)
+{
+       return ((val) << A6XX_VFD_DECODE_INSTR_OFFSET__SHIFT) & A6XX_VFD_DECODE_INSTR_OFFSET__MASK;
+}
 #define A6XX_VFD_DECODE_INSTR_INSTANCED                                0x00020000
 #define A6XX_VFD_DECODE_INSTR_FORMAT__MASK                     0x0ff00000
 #define A6XX_VFD_DECODE_INSTR_FORMAT__SHIFT                    20
-static inline uint32_t A6XX_VFD_DECODE_INSTR_FORMAT(enum a6xx_vtx_fmt val)
+static inline uint32_t A6XX_VFD_DECODE_INSTR_FORMAT(enum a6xx_format val)
 {
        return ((val) << A6XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A6XX_VFD_DECODE_INSTR_FORMAT__MASK;
 }
@@ -4209,12 +5572,44 @@ static inline uint32_t A6XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val)
 
 #define REG_A6XX_SP_UNKNOWN_A0F8                               0x0000a0f8
 
-#define REG_A6XX_SP_PRIMITIVE_CNTL                             0x0000a802
-#define A6XX_SP_PRIMITIVE_CNTL_VSOUT__MASK                     0x0000001f
-#define A6XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT                    0
-static inline uint32_t A6XX_SP_PRIMITIVE_CNTL_VSOUT(uint32_t val)
+#define REG_A6XX_SP_VS_CTRL_REG0                               0x0000a800
+#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK            0x0000007e
+#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT           1
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+       return ((val) << A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK            0x00001f80
+#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT           7
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+       return ((val) << A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK                 0x000fc000
+#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT                        14
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+       return ((val) << A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_THREADSIZE__MASK                  0x00100000
+#define A6XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT                 20
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+       return ((val) << A6XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_VARYING                           0x00400000
+#define A6XX_SP_VS_CTRL_REG0_DIFF_FINE                         0x00800000
+#define A6XX_SP_VS_CTRL_REG0_PIXLODENABLE                      0x04000000
+#define A6XX_SP_VS_CTRL_REG0_MERGEDREGS                                0x80000000
+
+#define REG_A6XX_SP_VS_BRANCH_COND                             0x0000a801
+
+#define REG_A6XX_SP_VS_PRIMITIVE_CNTL                          0x0000a802
+#define A6XX_SP_VS_PRIMITIVE_CNTL_OUT__MASK                    0x0000003f
+#define A6XX_SP_VS_PRIMITIVE_CNTL_OUT__SHIFT                   0
+static inline uint32_t A6XX_SP_VS_PRIMITIVE_CNTL_OUT(uint32_t val)
 {
-       return ((val) << A6XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT) & A6XX_SP_PRIMITIVE_CNTL_VSOUT__MASK;
+       return ((val) << A6XX_SP_VS_PRIMITIVE_CNTL_OUT__SHIFT) & A6XX_SP_VS_PRIMITIVE_CNTL_OUT__MASK;
 }
 
 static inline uint32_t REG_A6XX_SP_VS_OUT(uint32_t i0) { return 0x0000a803 + 0x1*i0; }
@@ -4273,35 +5668,6 @@ static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
        return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
 }
 
-#define REG_A6XX_SP_VS_CTRL_REG0                               0x0000a800
-#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK            0x0000007e
-#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT           1
-static inline uint32_t A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
-{
-       return ((val) << A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
-}
-#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK            0x00001f80
-#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT           7
-static inline uint32_t A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
-{
-       return ((val) << A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
-}
-#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK                 0x000fc000
-#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT                        14
-static inline uint32_t A6XX_SP_VS_CTRL_REG0_BRANCHSTACK(uint32_t val)
-{
-       return ((val) << A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK;
-}
-#define A6XX_SP_VS_CTRL_REG0_THREADSIZE__MASK                  0x00100000
-#define A6XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT                 20
-static inline uint32_t A6XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
-{
-       return ((val) << A6XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
-}
-#define A6XX_SP_VS_CTRL_REG0_VARYING                           0x00400000
-#define A6XX_SP_VS_CTRL_REG0_PIXLODENABLE                      0x04000000
-#define A6XX_SP_VS_CTRL_REG0_MERGEDREGS                                0x80000000
-
 #define REG_A6XX_SP_UNKNOWN_A81B                               0x0000a81b
 
 #define REG_A6XX_SP_VS_OBJ_START_LO                            0x0000a81c
@@ -4311,6 +5677,10 @@ static inline uint32_t A6XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
 #define REG_A6XX_SP_VS_TEX_COUNT                               0x0000a822
 
 #define REG_A6XX_SP_VS_CONFIG                                  0x0000a823
+#define A6XX_SP_VS_CONFIG_BINDLESS_TEX                         0x00000001
+#define A6XX_SP_VS_CONFIG_BINDLESS_SAMP                                0x00000002
+#define A6XX_SP_VS_CONFIG_BINDLESS_IBO                         0x00000004
+#define A6XX_SP_VS_CONFIG_BINDLESS_UBO                         0x00000008
 #define A6XX_SP_VS_CONFIG_ENABLED                              0x00000100
 #define A6XX_SP_VS_CONFIG_NTEX__MASK                           0x0001fe00
 #define A6XX_SP_VS_CONFIG_NTEX__SHIFT                          9
@@ -4318,12 +5688,18 @@ static inline uint32_t A6XX_SP_VS_CONFIG_NTEX(uint32_t val)
 {
        return ((val) << A6XX_SP_VS_CONFIG_NTEX__SHIFT) & A6XX_SP_VS_CONFIG_NTEX__MASK;
 }
-#define A6XX_SP_VS_CONFIG_NSAMP__MASK                          0x01fe0000
+#define A6XX_SP_VS_CONFIG_NSAMP__MASK                          0x003e0000
 #define A6XX_SP_VS_CONFIG_NSAMP__SHIFT                         17
 static inline uint32_t A6XX_SP_VS_CONFIG_NSAMP(uint32_t val)
 {
        return ((val) << A6XX_SP_VS_CONFIG_NSAMP__SHIFT) & A6XX_SP_VS_CONFIG_NSAMP__MASK;
 }
+#define A6XX_SP_VS_CONFIG_NIBO__MASK                           0x3fc00000
+#define A6XX_SP_VS_CONFIG_NIBO__SHIFT                          22
+static inline uint32_t A6XX_SP_VS_CONFIG_NIBO(uint32_t val)
+{
+       return ((val) << A6XX_SP_VS_CONFIG_NIBO__SHIFT) & A6XX_SP_VS_CONFIG_NIBO__MASK;
+}
 
 #define REG_A6XX_SP_VS_INSTRLEN                                        0x0000a824
 
@@ -4353,11 +5729,14 @@ static inline uint32_t A6XX_SP_HS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
        return ((val) << A6XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_HS_CTRL_REG0_THREADSIZE__MASK;
 }
 #define A6XX_SP_HS_CTRL_REG0_VARYING                           0x00400000
+#define A6XX_SP_HS_CTRL_REG0_DIFF_FINE                         0x00800000
 #define A6XX_SP_HS_CTRL_REG0_PIXLODENABLE                      0x04000000
 #define A6XX_SP_HS_CTRL_REG0_MERGEDREGS                                0x80000000
 
 #define REG_A6XX_SP_HS_UNKNOWN_A831                            0x0000a831
 
+#define REG_A6XX_SP_HS_UNKNOWN_A833                            0x0000a833
+
 #define REG_A6XX_SP_HS_OBJ_START_LO                            0x0000a834
 
 #define REG_A6XX_SP_HS_OBJ_START_HI                            0x0000a835
@@ -4365,6 +5744,10 @@ static inline uint32_t A6XX_SP_HS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
 #define REG_A6XX_SP_HS_TEX_COUNT                               0x0000a83a
 
 #define REG_A6XX_SP_HS_CONFIG                                  0x0000a83b
+#define A6XX_SP_HS_CONFIG_BINDLESS_TEX                         0x00000001
+#define A6XX_SP_HS_CONFIG_BINDLESS_SAMP                                0x00000002
+#define A6XX_SP_HS_CONFIG_BINDLESS_IBO                         0x00000004
+#define A6XX_SP_HS_CONFIG_BINDLESS_UBO                         0x00000008
 #define A6XX_SP_HS_CONFIG_ENABLED                              0x00000100
 #define A6XX_SP_HS_CONFIG_NTEX__MASK                           0x0001fe00
 #define A6XX_SP_HS_CONFIG_NTEX__SHIFT                          9
@@ -4372,12 +5755,18 @@ static inline uint32_t A6XX_SP_HS_CONFIG_NTEX(uint32_t val)
 {
        return ((val) << A6XX_SP_HS_CONFIG_NTEX__SHIFT) & A6XX_SP_HS_CONFIG_NTEX__MASK;
 }
-#define A6XX_SP_HS_CONFIG_NSAMP__MASK                          0x01fe0000
+#define A6XX_SP_HS_CONFIG_NSAMP__MASK                          0x003e0000
 #define A6XX_SP_HS_CONFIG_NSAMP__SHIFT                         17
 static inline uint32_t A6XX_SP_HS_CONFIG_NSAMP(uint32_t val)
 {
        return ((val) << A6XX_SP_HS_CONFIG_NSAMP__SHIFT) & A6XX_SP_HS_CONFIG_NSAMP__MASK;
 }
+#define A6XX_SP_HS_CONFIG_NIBO__MASK                           0x3fc00000
+#define A6XX_SP_HS_CONFIG_NIBO__SHIFT                          22
+static inline uint32_t A6XX_SP_HS_CONFIG_NIBO(uint32_t val)
+{
+       return ((val) << A6XX_SP_HS_CONFIG_NIBO__SHIFT) & A6XX_SP_HS_CONFIG_NIBO__MASK;
+}
 
 #define REG_A6XX_SP_HS_INSTRLEN                                        0x0000a83c
 
@@ -4398,17 +5787,84 @@ static inline uint32_t A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
 #define A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT                        14
 static inline uint32_t A6XX_SP_DS_CTRL_REG0_BRANCHSTACK(uint32_t val)
 {
-       return ((val) << A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK;
+       return ((val) << A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_DS_CTRL_REG0_THREADSIZE__MASK                  0x00100000
+#define A6XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT                 20
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+       return ((val) << A6XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_DS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_DS_CTRL_REG0_VARYING                           0x00400000
+#define A6XX_SP_DS_CTRL_REG0_DIFF_FINE                         0x00800000
+#define A6XX_SP_DS_CTRL_REG0_PIXLODENABLE                      0x04000000
+#define A6XX_SP_DS_CTRL_REG0_MERGEDREGS                                0x80000000
+
+#define REG_A6XX_SP_DS_PRIMITIVE_CNTL                          0x0000a842
+#define A6XX_SP_DS_PRIMITIVE_CNTL_OUT__MASK                    0x0000003f
+#define A6XX_SP_DS_PRIMITIVE_CNTL_OUT__SHIFT                   0
+static inline uint32_t A6XX_SP_DS_PRIMITIVE_CNTL_OUT(uint32_t val)
+{
+       return ((val) << A6XX_SP_DS_PRIMITIVE_CNTL_OUT__SHIFT) & A6XX_SP_DS_PRIMITIVE_CNTL_OUT__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_DS_OUT(uint32_t i0) { return 0x0000a843 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_DS_OUT_REG(uint32_t i0) { return 0x0000a843 + 0x1*i0; }
+#define A6XX_SP_DS_OUT_REG_A_REGID__MASK                       0x000000ff
+#define A6XX_SP_DS_OUT_REG_A_REGID__SHIFT                      0
+static inline uint32_t A6XX_SP_DS_OUT_REG_A_REGID(uint32_t val)
+{
+       return ((val) << A6XX_SP_DS_OUT_REG_A_REGID__SHIFT) & A6XX_SP_DS_OUT_REG_A_REGID__MASK;
+}
+#define A6XX_SP_DS_OUT_REG_A_COMPMASK__MASK                    0x00000f00
+#define A6XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT                   8
+static inline uint32_t A6XX_SP_DS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+       return ((val) << A6XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT) & A6XX_SP_DS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A6XX_SP_DS_OUT_REG_B_REGID__MASK                       0x00ff0000
+#define A6XX_SP_DS_OUT_REG_B_REGID__SHIFT                      16
+static inline uint32_t A6XX_SP_DS_OUT_REG_B_REGID(uint32_t val)
+{
+       return ((val) << A6XX_SP_DS_OUT_REG_B_REGID__SHIFT) & A6XX_SP_DS_OUT_REG_B_REGID__MASK;
+}
+#define A6XX_SP_DS_OUT_REG_B_COMPMASK__MASK                    0x0f000000
+#define A6XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT                   24
+static inline uint32_t A6XX_SP_DS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+       return ((val) << A6XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_DS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_DS_VPC_DST(uint32_t i0) { return 0x0000a853 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_DS_VPC_DST_REG(uint32_t i0) { return 0x0000a853 + 0x1*i0; }
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK                   0x000000ff
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT                  0
+static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+       return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK                   0x0000ff00
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT                  8
+static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+       return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK                   0x00ff0000
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT                  16
+static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+       return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK;
 }
-#define A6XX_SP_DS_CTRL_REG0_THREADSIZE__MASK                  0x00100000
-#define A6XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT                 20
-static inline uint32_t A6XX_SP_DS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK                   0xff000000
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT                  24
+static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC3(uint32_t val)
 {
-       return ((val) << A6XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_DS_CTRL_REG0_THREADSIZE__MASK;
+       return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK;
 }
-#define A6XX_SP_DS_CTRL_REG0_VARYING                           0x00400000
-#define A6XX_SP_DS_CTRL_REG0_PIXLODENABLE                      0x04000000
-#define A6XX_SP_DS_CTRL_REG0_MERGEDREGS                                0x80000000
+
+#define REG_A6XX_SP_DS_UNKNOWN_A85B                            0x0000a85b
 
 #define REG_A6XX_SP_DS_OBJ_START_LO                            0x0000a85c
 
@@ -4417,6 +5873,10 @@ static inline uint32_t A6XX_SP_DS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
 #define REG_A6XX_SP_DS_TEX_COUNT                               0x0000a862
 
 #define REG_A6XX_SP_DS_CONFIG                                  0x0000a863
+#define A6XX_SP_DS_CONFIG_BINDLESS_TEX                         0x00000001
+#define A6XX_SP_DS_CONFIG_BINDLESS_SAMP                                0x00000002
+#define A6XX_SP_DS_CONFIG_BINDLESS_IBO                         0x00000004
+#define A6XX_SP_DS_CONFIG_BINDLESS_UBO                         0x00000008
 #define A6XX_SP_DS_CONFIG_ENABLED                              0x00000100
 #define A6XX_SP_DS_CONFIG_NTEX__MASK                           0x0001fe00
 #define A6XX_SP_DS_CONFIG_NTEX__SHIFT                          9
@@ -4424,12 +5884,18 @@ static inline uint32_t A6XX_SP_DS_CONFIG_NTEX(uint32_t val)
 {
        return ((val) << A6XX_SP_DS_CONFIG_NTEX__SHIFT) & A6XX_SP_DS_CONFIG_NTEX__MASK;
 }
-#define A6XX_SP_DS_CONFIG_NSAMP__MASK                          0x01fe0000
+#define A6XX_SP_DS_CONFIG_NSAMP__MASK                          0x003e0000
 #define A6XX_SP_DS_CONFIG_NSAMP__SHIFT                         17
 static inline uint32_t A6XX_SP_DS_CONFIG_NSAMP(uint32_t val)
 {
        return ((val) << A6XX_SP_DS_CONFIG_NSAMP__SHIFT) & A6XX_SP_DS_CONFIG_NSAMP__MASK;
 }
+#define A6XX_SP_DS_CONFIG_NIBO__MASK                           0x3fc00000
+#define A6XX_SP_DS_CONFIG_NIBO__SHIFT                          22
+static inline uint32_t A6XX_SP_DS_CONFIG_NIBO(uint32_t val)
+{
+       return ((val) << A6XX_SP_DS_CONFIG_NIBO__SHIFT) & A6XX_SP_DS_CONFIG_NIBO__MASK;
+}
 
 #define REG_A6XX_SP_DS_INSTRLEN                                        0x0000a864
 
@@ -4459,10 +5925,83 @@ static inline uint32_t A6XX_SP_GS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
        return ((val) << A6XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_GS_CTRL_REG0_THREADSIZE__MASK;
 }
 #define A6XX_SP_GS_CTRL_REG0_VARYING                           0x00400000
+#define A6XX_SP_GS_CTRL_REG0_DIFF_FINE                         0x00800000
 #define A6XX_SP_GS_CTRL_REG0_PIXLODENABLE                      0x04000000
 #define A6XX_SP_GS_CTRL_REG0_MERGEDREGS                                0x80000000
 
-#define REG_A6XX_SP_GS_UNKNOWN_A871                            0x0000a871
+#define REG_A6XX_SP_GS_PRIM_SIZE                               0x0000a871
+
+#define REG_A6XX_SP_GS_BRANCH_COND                             0x0000a872
+
+#define REG_A6XX_SP_GS_PRIMITIVE_CNTL                          0x0000a873
+#define A6XX_SP_GS_PRIMITIVE_CNTL_OUT__MASK                    0x0000003f
+#define A6XX_SP_GS_PRIMITIVE_CNTL_OUT__SHIFT                   0
+static inline uint32_t A6XX_SP_GS_PRIMITIVE_CNTL_OUT(uint32_t val)
+{
+       return ((val) << A6XX_SP_GS_PRIMITIVE_CNTL_OUT__SHIFT) & A6XX_SP_GS_PRIMITIVE_CNTL_OUT__MASK;
+}
+#define A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__MASK            0x00003fc0
+#define A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT           6
+static inline uint32_t A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID(uint32_t val)
+{
+       return ((val) << A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT) & A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_GS_OUT(uint32_t i0) { return 0x0000a874 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_GS_OUT_REG(uint32_t i0) { return 0x0000a874 + 0x1*i0; }
+#define A6XX_SP_GS_OUT_REG_A_REGID__MASK                       0x000000ff
+#define A6XX_SP_GS_OUT_REG_A_REGID__SHIFT                      0
+static inline uint32_t A6XX_SP_GS_OUT_REG_A_REGID(uint32_t val)
+{
+       return ((val) << A6XX_SP_GS_OUT_REG_A_REGID__SHIFT) & A6XX_SP_GS_OUT_REG_A_REGID__MASK;
+}
+#define A6XX_SP_GS_OUT_REG_A_COMPMASK__MASK                    0x00000f00
+#define A6XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT                   8
+static inline uint32_t A6XX_SP_GS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+       return ((val) << A6XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT) & A6XX_SP_GS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A6XX_SP_GS_OUT_REG_B_REGID__MASK                       0x00ff0000
+#define A6XX_SP_GS_OUT_REG_B_REGID__SHIFT                      16
+static inline uint32_t A6XX_SP_GS_OUT_REG_B_REGID(uint32_t val)
+{
+       return ((val) << A6XX_SP_GS_OUT_REG_B_REGID__SHIFT) & A6XX_SP_GS_OUT_REG_B_REGID__MASK;
+}
+#define A6XX_SP_GS_OUT_REG_B_COMPMASK__MASK                    0x0f000000
+#define A6XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT                   24
+static inline uint32_t A6XX_SP_GS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+       return ((val) << A6XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_GS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_GS_VPC_DST(uint32_t i0) { return 0x0000a884 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_GS_VPC_DST_REG(uint32_t i0) { return 0x0000a884 + 0x1*i0; }
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK                   0x000000ff
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT                  0
+static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+       return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK                   0x0000ff00
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT                  8
+static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+       return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK                   0x00ff0000
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT                  16
+static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+       return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK                   0xff000000
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT                  24
+static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+       return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK;
+}
 
 #define REG_A6XX_SP_GS_OBJ_START_LO                            0x0000a88d
 
@@ -4471,6 +6010,10 @@ static inline uint32_t A6XX_SP_GS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
 #define REG_A6XX_SP_GS_TEX_COUNT                               0x0000a893
 
 #define REG_A6XX_SP_GS_CONFIG                                  0x0000a894
+#define A6XX_SP_GS_CONFIG_BINDLESS_TEX                         0x00000001
+#define A6XX_SP_GS_CONFIG_BINDLESS_SAMP                                0x00000002
+#define A6XX_SP_GS_CONFIG_BINDLESS_IBO                         0x00000004
+#define A6XX_SP_GS_CONFIG_BINDLESS_UBO                         0x00000008
 #define A6XX_SP_GS_CONFIG_ENABLED                              0x00000100
 #define A6XX_SP_GS_CONFIG_NTEX__MASK                           0x0001fe00
 #define A6XX_SP_GS_CONFIG_NTEX__SHIFT                          9
@@ -4478,12 +6021,18 @@ static inline uint32_t A6XX_SP_GS_CONFIG_NTEX(uint32_t val)
 {
        return ((val) << A6XX_SP_GS_CONFIG_NTEX__SHIFT) & A6XX_SP_GS_CONFIG_NTEX__MASK;
 }
-#define A6XX_SP_GS_CONFIG_NSAMP__MASK                          0x01fe0000
+#define A6XX_SP_GS_CONFIG_NSAMP__MASK                          0x003e0000
 #define A6XX_SP_GS_CONFIG_NSAMP__SHIFT                         17
 static inline uint32_t A6XX_SP_GS_CONFIG_NSAMP(uint32_t val)
 {
        return ((val) << A6XX_SP_GS_CONFIG_NSAMP__SHIFT) & A6XX_SP_GS_CONFIG_NSAMP__MASK;
 }
+#define A6XX_SP_GS_CONFIG_NIBO__MASK                           0x3fc00000
+#define A6XX_SP_GS_CONFIG_NIBO__SHIFT                          22
+static inline uint32_t A6XX_SP_GS_CONFIG_NIBO(uint32_t val)
+{
+       return ((val) << A6XX_SP_GS_CONFIG_NIBO__SHIFT) & A6XX_SP_GS_CONFIG_NIBO__MASK;
+}
 
 #define REG_A6XX_SP_GS_INSTRLEN                                        0x0000a895
 
@@ -4545,9 +6094,12 @@ static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
        return ((val) << A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
 }
 #define A6XX_SP_FS_CTRL_REG0_VARYING                           0x00400000
+#define A6XX_SP_FS_CTRL_REG0_DIFF_FINE                         0x00800000
 #define A6XX_SP_FS_CTRL_REG0_PIXLODENABLE                      0x04000000
 #define A6XX_SP_FS_CTRL_REG0_MERGEDREGS                                0x80000000
 
+#define REG_A6XX_SP_FS_BRANCH_COND                             0x0000a981
+
 #define REG_A6XX_SP_UNKNOWN_A982                               0x0000a982
 
 #define REG_A6XX_SP_FS_OBJ_START_LO                            0x0000a983
@@ -4557,6 +6109,7 @@ static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
 #define REG_A6XX_SP_BLEND_CNTL                                 0x0000a989
 #define A6XX_SP_BLEND_CNTL_ENABLED                             0x00000001
 #define A6XX_SP_BLEND_CNTL_UNK8                                        0x00000100
+#define A6XX_SP_BLEND_CNTL_DUAL_COLOR_IN_ENABLE                        0x00000200
 #define A6XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE                   0x00000400
 
 #define REG_A6XX_SP_SRGB_CNTL                                  0x0000a98a
@@ -4620,12 +6173,25 @@ static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT7(uint32_t val)
 }
 
 #define REG_A6XX_SP_FS_OUTPUT_CNTL0                            0x0000a98c
+#define A6XX_SP_FS_OUTPUT_CNTL0_DUAL_COLOR_IN_ENABLE           0x00000001
 #define A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__MASK              0x0000ff00
 #define A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__SHIFT             8
 static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(uint32_t val)
 {
        return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__MASK;
 }
+#define A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__MASK           0x00ff0000
+#define A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__SHIFT          16
+static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID(uint32_t val)
+{
+       return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__MASK;
+}
+#define A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__MASK         0xff000000
+#define A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__SHIFT                24
+static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID(uint32_t val)
+{
+       return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__MASK;
+}
 
 #define REG_A6XX_SP_FS_OUTPUT_CNTL1                            0x0000a98d
 #define A6XX_SP_FS_OUTPUT_CNTL1_MRT__MASK                      0x0000000f
@@ -4640,19 +6206,101 @@ static inline uint32_t REG_A6XX_SP_FS_MRT(uint32_t i0) { return 0x0000a996 + 0x1
 static inline uint32_t REG_A6XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000a996 + 0x1*i0; }
 #define A6XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK                  0x000000ff
 #define A6XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT                 0
-static inline uint32_t A6XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a6xx_color_fmt val)
+static inline uint32_t A6XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a6xx_format val)
 {
        return ((val) << A6XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A6XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
 }
 #define A6XX_SP_FS_MRT_REG_COLOR_SINT                          0x00000100
 #define A6XX_SP_FS_MRT_REG_COLOR_UINT                          0x00000200
 
-#define REG_A6XX_SP_UNKNOWN_A99E                               0x0000a99e
+#define REG_A6XX_SP_FS_PREFETCH_CNTL                           0x0000a99e
+#define A6XX_SP_FS_PREFETCH_CNTL_COUNT__MASK                   0x00000007
+#define A6XX_SP_FS_PREFETCH_CNTL_COUNT__SHIFT                  0
+static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_COUNT(uint32_t val)
+{
+       return ((val) << A6XX_SP_FS_PREFETCH_CNTL_COUNT__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_COUNT__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CNTL_UNK3                          0x00000008
+#define A6XX_SP_FS_PREFETCH_CNTL_UNK4__MASK                    0x00000ff0
+#define A6XX_SP_FS_PREFETCH_CNTL_UNK4__SHIFT                   4
+static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_UNK4(uint32_t val)
+{
+       return ((val) << A6XX_SP_FS_PREFETCH_CNTL_UNK4__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_UNK4__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_FS_PREFETCH(uint32_t i0) { return 0x0000a99f + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_FS_PREFETCH_CMD(uint32_t i0) { return 0x0000a99f + 0x1*i0; }
+#define A6XX_SP_FS_PREFETCH_CMD_SRC__MASK                      0x0000007f
+#define A6XX_SP_FS_PREFETCH_CMD_SRC__SHIFT                     0
+static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_SRC(uint32_t val)
+{
+       return ((val) << A6XX_SP_FS_PREFETCH_CMD_SRC__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_SRC__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__MASK                  0x00000780
+#define A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__SHIFT                 7
+static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_SAMP_ID(uint32_t val)
+{
+       return ((val) << A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CMD_TEX_ID__MASK                   0x0000f800
+#define A6XX_SP_FS_PREFETCH_CMD_TEX_ID__SHIFT                  11
+static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_TEX_ID(uint32_t val)
+{
+       return ((val) << A6XX_SP_FS_PREFETCH_CMD_TEX_ID__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_TEX_ID__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CMD_DST__MASK                      0x003f0000
+#define A6XX_SP_FS_PREFETCH_CMD_DST__SHIFT                     16
+static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_DST(uint32_t val)
+{
+       return ((val) << A6XX_SP_FS_PREFETCH_CMD_DST__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_DST__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CMD_WRMASK__MASK                   0x03c00000
+#define A6XX_SP_FS_PREFETCH_CMD_WRMASK__SHIFT                  22
+static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_WRMASK(uint32_t val)
+{
+       return ((val) << A6XX_SP_FS_PREFETCH_CMD_WRMASK__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_WRMASK__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CMD_HALF                           0x04000000
+#define A6XX_SP_FS_PREFETCH_CMD_CMD__MASK                      0xf8000000
+#define A6XX_SP_FS_PREFETCH_CMD_CMD__SHIFT                     27
+static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_CMD(uint32_t val)
+{
+       return ((val) << A6XX_SP_FS_PREFETCH_CMD_CMD__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_CMD__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_FS_BINDLESS_PREFETCH(uint32_t i0) { return 0x0000a9a3 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_FS_BINDLESS_PREFETCH_CMD(uint32_t i0) { return 0x0000a9a3 + 0x1*i0; }
+#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__MASK         0x000000ff
+#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__SHIFT                0
+static inline uint32_t A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID(uint32_t val)
+{
+       return ((val) << A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__SHIFT) & A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__MASK;
+}
+#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__MASK          0x00ff0000
+#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__SHIFT         16
+static inline uint32_t A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID(uint32_t val)
+{
+       return ((val) << A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__SHIFT) & A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__MASK;
+}
 
 #define REG_A6XX_SP_FS_TEX_COUNT                               0x0000a9a7
 
 #define REG_A6XX_SP_UNKNOWN_A9A8                               0x0000a9a8
 
+#define REG_A6XX_SP_CS_UNKNOWN_A9B1                            0x0000a9b1
+#define A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE_2K__MASK           0x00000001
+#define A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE_2K__SHIFT          0
+static inline uint32_t A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE_2K(uint32_t val)
+{
+       return ((val) << A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE_2K__SHIFT) & A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE_2K__MASK;
+}
+
+#define REG_A6XX_SP_CS_UNKNOWN_A9B3                            0x0000a9b3
+
+#define REG_A6XX_SP_CS_TEX_COUNT                               0x0000a9ba
+
 #define REG_A6XX_SP_FS_TEX_SAMP_LO                             0x0000a9e0
 
 #define REG_A6XX_SP_FS_TEX_SAMP_HI                             0x0000a9e1
@@ -4669,6 +6317,10 @@ static inline uint32_t A6XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a6xx_color_fmt val)
 
 #define REG_A6XX_SP_CS_TEX_CONST_HI                            0x0000a9e7
 
+static inline uint32_t REG_A6XX_SP_CS_BINDLESS_BASE(uint32_t i0) { return 0x0000a9e8 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_SP_CS_BINDLESS_BASE_ADDR(uint32_t i0) { return 0x0000a9e8 + 0x2*i0; }
+
 static inline uint32_t REG_A6XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000a98e + 0x1*i0; }
 
 static inline uint32_t REG_A6XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000a98e + 0x1*i0; }
@@ -4706,6 +6358,7 @@ static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
        return ((val) << A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK;
 }
 #define A6XX_SP_CS_CTRL_REG0_VARYING                           0x00400000
+#define A6XX_SP_CS_CTRL_REG0_DIFF_FINE                         0x00800000
 #define A6XX_SP_CS_CTRL_REG0_PIXLODENABLE                      0x04000000
 #define A6XX_SP_CS_CTRL_REG0_MERGEDREGS                                0x80000000
 
@@ -4713,11 +6366,46 @@ static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
 
 #define REG_A6XX_SP_CS_OBJ_START_HI                            0x0000a9b5
 
+#define REG_A6XX_SP_CS_CONFIG                                  0x0000a9bb
+#define A6XX_SP_CS_CONFIG_BINDLESS_TEX                         0x00000001
+#define A6XX_SP_CS_CONFIG_BINDLESS_SAMP                                0x00000002
+#define A6XX_SP_CS_CONFIG_BINDLESS_IBO                         0x00000004
+#define A6XX_SP_CS_CONFIG_BINDLESS_UBO                         0x00000008
+#define A6XX_SP_CS_CONFIG_ENABLED                              0x00000100
+#define A6XX_SP_CS_CONFIG_NTEX__MASK                           0x0001fe00
+#define A6XX_SP_CS_CONFIG_NTEX__SHIFT                          9
+static inline uint32_t A6XX_SP_CS_CONFIG_NTEX(uint32_t val)
+{
+       return ((val) << A6XX_SP_CS_CONFIG_NTEX__SHIFT) & A6XX_SP_CS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_CS_CONFIG_NSAMP__MASK                          0x003e0000
+#define A6XX_SP_CS_CONFIG_NSAMP__SHIFT                         17
+static inline uint32_t A6XX_SP_CS_CONFIG_NSAMP(uint32_t val)
+{
+       return ((val) << A6XX_SP_CS_CONFIG_NSAMP__SHIFT) & A6XX_SP_CS_CONFIG_NSAMP__MASK;
+}
+#define A6XX_SP_CS_CONFIG_NIBO__MASK                           0x3fc00000
+#define A6XX_SP_CS_CONFIG_NIBO__SHIFT                          22
+static inline uint32_t A6XX_SP_CS_CONFIG_NIBO(uint32_t val)
+{
+       return ((val) << A6XX_SP_CS_CONFIG_NIBO__SHIFT) & A6XX_SP_CS_CONFIG_NIBO__MASK;
+}
+
 #define REG_A6XX_SP_CS_INSTRLEN                                        0x0000a9bc
 
+#define REG_A6XX_SP_CS_IBO_LO                                  0x0000a9f2
+
+#define REG_A6XX_SP_CS_IBO_HI                                  0x0000a9f3
+
+#define REG_A6XX_SP_CS_IBO_COUNT                               0x0000aa00
+
 #define REG_A6XX_SP_UNKNOWN_AB00                               0x0000ab00
 
 #define REG_A6XX_SP_FS_CONFIG                                  0x0000ab04
+#define A6XX_SP_FS_CONFIG_BINDLESS_TEX                         0x00000001
+#define A6XX_SP_FS_CONFIG_BINDLESS_SAMP                                0x00000002
+#define A6XX_SP_FS_CONFIG_BINDLESS_IBO                         0x00000004
+#define A6XX_SP_FS_CONFIG_BINDLESS_UBO                         0x00000008
 #define A6XX_SP_FS_CONFIG_ENABLED                              0x00000100
 #define A6XX_SP_FS_CONFIG_NTEX__MASK                           0x0001fe00
 #define A6XX_SP_FS_CONFIG_NTEX__SHIFT                          9
@@ -4725,18 +6413,48 @@ static inline uint32_t A6XX_SP_FS_CONFIG_NTEX(uint32_t val)
 {
        return ((val) << A6XX_SP_FS_CONFIG_NTEX__SHIFT) & A6XX_SP_FS_CONFIG_NTEX__MASK;
 }
-#define A6XX_SP_FS_CONFIG_NSAMP__MASK                          0x01fe0000
+#define A6XX_SP_FS_CONFIG_NSAMP__MASK                          0x003e0000
 #define A6XX_SP_FS_CONFIG_NSAMP__SHIFT                         17
 static inline uint32_t A6XX_SP_FS_CONFIG_NSAMP(uint32_t val)
 {
        return ((val) << A6XX_SP_FS_CONFIG_NSAMP__SHIFT) & A6XX_SP_FS_CONFIG_NSAMP__MASK;
 }
+#define A6XX_SP_FS_CONFIG_NIBO__MASK                           0x3fc00000
+#define A6XX_SP_FS_CONFIG_NIBO__SHIFT                          22
+static inline uint32_t A6XX_SP_FS_CONFIG_NIBO(uint32_t val)
+{
+       return ((val) << A6XX_SP_FS_CONFIG_NIBO__SHIFT) & A6XX_SP_FS_CONFIG_NIBO__MASK;
+}
 
 #define REG_A6XX_SP_FS_INSTRLEN                                        0x0000ab05
 
-#define REG_A6XX_SP_UNKNOWN_AB20                               0x0000ab20
+static inline uint32_t REG_A6XX_SP_BINDLESS_BASE(uint32_t i0) { return 0x0000ab10 + 0x2*i0; }
 
-#define REG_A6XX_SP_UNKNOWN_ACC0                               0x0000acc0
+static inline uint32_t REG_A6XX_SP_BINDLESS_BASE_ADDR(uint32_t i0) { return 0x0000ab10 + 0x2*i0; }
+
+#define REG_A6XX_SP_IBO_LO                                     0x0000ab1a
+
+#define REG_A6XX_SP_IBO_HI                                     0x0000ab1b
+
+#define REG_A6XX_SP_IBO_COUNT                                  0x0000ab20
+
+#define REG_A6XX_SP_2D_DST_FORMAT                              0x0000acc0
+#define A6XX_SP_2D_DST_FORMAT_NORM                             0x00000001
+#define A6XX_SP_2D_DST_FORMAT_SINT                             0x00000002
+#define A6XX_SP_2D_DST_FORMAT_UINT                             0x00000004
+#define A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__MASK               0x000007f8
+#define A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__SHIFT              3
+static inline uint32_t A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT(enum a6xx_format val)
+{
+       return ((val) << A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__SHIFT) & A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__MASK;
+}
+#define A6XX_SP_2D_DST_FORMAT_SRGB                             0x00000800
+#define A6XX_SP_2D_DST_FORMAT_MASK__MASK                       0x0000f000
+#define A6XX_SP_2D_DST_FORMAT_MASK__SHIFT                      12
+static inline uint32_t A6XX_SP_2D_DST_FORMAT_MASK(uint32_t val)
+{
+       return ((val) << A6XX_SP_2D_DST_FORMAT_MASK__SHIFT) & A6XX_SP_2D_DST_FORMAT_MASK__MASK;
+}
 
 #define REG_A6XX_SP_UNKNOWN_AE00                               0x0000ae00
 
@@ -4746,6 +6464,8 @@ static inline uint32_t A6XX_SP_FS_CONFIG_NSAMP(uint32_t val)
 
 #define REG_A6XX_SP_UNKNOWN_AE0F                               0x0000ae0f
 
+#define REG_A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR               0x0000b180
+
 #define REG_A6XX_SP_UNKNOWN_B182                               0x0000b182
 
 #define REG_A6XX_SP_UNKNOWN_B183                               0x0000b183
@@ -4767,18 +6487,122 @@ static inline uint32_t A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples
 }
 #define A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE                 0x00000004
 
+#define REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR                  0x0000b302
+
 #define REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR_LO               0x0000b302
 
 #define REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR_HI               0x0000b303
 
-#define REG_A6XX_SP_TP_UNKNOWN_B304                            0x0000b304
+#define REG_A6XX_SP_TP_SAMPLE_CONFIG                           0x0000b304
+#define A6XX_SP_TP_SAMPLE_CONFIG_UNK0                          0x00000001
+#define A6XX_SP_TP_SAMPLE_CONFIG_LOCATION_ENABLE               0x00000002
+
+#define REG_A6XX_SP_TP_SAMPLE_LOCATION_0                       0x0000b305
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK          0x0000000f
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT         0
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK          0x000000f0
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT         4
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK          0x00000f00
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT         8
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK          0x0000f000
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT         12
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK          0x000f0000
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT         16
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK          0x00f00000
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT         20
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK          0x0f000000
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT         24
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK          0xf0000000
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT         28
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK;
+}
+
+#define REG_A6XX_SP_TP_SAMPLE_LOCATION_1                       0x0000b306
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK          0x0000000f
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT         0
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK          0x000000f0
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT         4
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK          0x00000f00
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT         8
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK          0x0000f000
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT         12
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK          0x000f0000
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT         16
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK          0x00f00000
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT         20
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK          0x0f000000
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT         24
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK          0xf0000000
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT         28
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK;
+}
 
 #define REG_A6XX_SP_TP_UNKNOWN_B309                            0x0000b309
 
 #define REG_A6XX_SP_PS_2D_SRC_INFO                             0x0000b4c0
 #define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK              0x000000ff
 #define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT             0
-static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT(enum a6xx_format val)
 {
        return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK;
 }
@@ -4795,7 +6619,17 @@ static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap va
        return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK;
 }
 #define A6XX_SP_PS_2D_SRC_INFO_FLAGS                           0x00001000
+#define A6XX_SP_PS_2D_SRC_INFO_SRGB                            0x00002000
+#define A6XX_SP_PS_2D_SRC_INFO_SAMPLES__MASK                   0x0000c000
+#define A6XX_SP_PS_2D_SRC_INFO_SAMPLES__SHIFT                  14
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_SAMPLES(enum a3xx_msaa_samples val)
+{
+       return ((val) << A6XX_SP_PS_2D_SRC_INFO_SAMPLES__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_SAMPLES__MASK;
+}
 #define A6XX_SP_PS_2D_SRC_INFO_FILTER                          0x00010000
+#define A6XX_SP_PS_2D_SRC_INFO_SAMPLES_AVERAGE                 0x00040000
+#define A6XX_SP_PS_2D_SRC_INFO_UNK20                           0x00100000
+#define A6XX_SP_PS_2D_SRC_INFO_UNK22                           0x00400000
 
 #define REG_A6XX_SP_PS_2D_SRC_SIZE                             0x0000b4c1
 #define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK                     0x00007fff
@@ -4815,6 +6649,8 @@ static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_HEIGHT(uint32_t val)
 
 #define REG_A6XX_SP_PS_2D_SRC_HI                               0x0000b4c3
 
+#define REG_A6XX_SP_PS_2D_SRC                                  0x0000b4c2
+
 #define REG_A6XX_SP_PS_2D_SRC_PITCH                            0x0000b4c4
 #define A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK                    0x01fffe00
 #define A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT                   9
@@ -4827,6 +6663,22 @@ static inline uint32_t A6XX_SP_PS_2D_SRC_PITCH_PITCH(uint32_t val)
 
 #define REG_A6XX_SP_PS_2D_SRC_FLAGS_HI                         0x0000b4cb
 
+#define REG_A6XX_SP_PS_2D_SRC_FLAGS                            0x0000b4ca
+
+#define REG_A6XX_SP_PS_2D_SRC_FLAGS_PITCH                      0x0000b4cc
+#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH_PITCH__MASK              0x000007ff
+#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH_PITCH__SHIFT             0
+static inline uint32_t A6XX_SP_PS_2D_SRC_FLAGS_PITCH_PITCH(uint32_t val)
+{
+       return ((val >> 6) << A6XX_SP_PS_2D_SRC_FLAGS_PITCH_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_FLAGS_PITCH_PITCH__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH_ARRAY_PITCH__MASK                0x003ff800
+#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH_ARRAY_PITCH__SHIFT       11
+static inline uint32_t A6XX_SP_PS_2D_SRC_FLAGS_PITCH_ARRAY_PITCH(uint32_t val)
+{
+       return ((val >> 7) << A6XX_SP_PS_2D_SRC_FLAGS_PITCH_ARRAY_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_FLAGS_PITCH_ARRAY_PITCH__MASK;
+}
+
 #define REG_A6XX_SP_UNKNOWN_B600                               0x0000b600
 
 #define REG_A6XX_SP_UNKNOWN_B605                               0x0000b605
@@ -4838,6 +6690,7 @@ static inline uint32_t A6XX_HLSQ_VS_CNTL_CONSTLEN(uint32_t val)
 {
        return ((val >> 2) << A6XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_VS_CNTL_CONSTLEN__MASK;
 }
+#define A6XX_HLSQ_VS_CNTL_ENABLED                              0x00000100
 
 #define REG_A6XX_HLSQ_HS_CNTL                                  0x0000b801
 #define A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK                       0x000000ff
@@ -4846,6 +6699,7 @@ static inline uint32_t A6XX_HLSQ_HS_CNTL_CONSTLEN(uint32_t val)
 {
        return ((val >> 2) << A6XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK;
 }
+#define A6XX_HLSQ_HS_CNTL_ENABLED                              0x00000100
 
 #define REG_A6XX_HLSQ_DS_CNTL                                  0x0000b802
 #define A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK                       0x000000ff
@@ -4854,6 +6708,7 @@ static inline uint32_t A6XX_HLSQ_DS_CNTL_CONSTLEN(uint32_t val)
 {
        return ((val >> 2) << A6XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK;
 }
+#define A6XX_HLSQ_DS_CNTL_ENABLED                              0x00000100
 
 #define REG_A6XX_HLSQ_GS_CNTL                                  0x0000b803
 #define A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK                       0x000000ff
@@ -4862,6 +6717,13 @@ static inline uint32_t A6XX_HLSQ_GS_CNTL_CONSTLEN(uint32_t val)
 {
        return ((val >> 2) << A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK;
 }
+#define A6XX_HLSQ_GS_CNTL_ENABLED                              0x00000100
+
+#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_CMD                      0x0000b820
+
+#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR             0x0000b821
+
+#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_DATA                     0x0000b823
 
 #define REG_A6XX_HLSQ_UNKNOWN_B980                             0x0000b980
 
@@ -4886,16 +6748,52 @@ static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val)
 {
        return ((val) << A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK;
 }
+#define A6XX_HLSQ_CONTROL_2_REG_SIZE__MASK                     0xff000000
+#define A6XX_HLSQ_CONTROL_2_REG_SIZE__SHIFT                    24
+static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SIZE(uint32_t val)
+{
+       return ((val) << A6XX_HLSQ_CONTROL_2_REG_SIZE__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SIZE__MASK;
+}
 
 #define REG_A6XX_HLSQ_CONTROL_3_REG                            0x0000b984
-#define A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK         0x000000ff
-#define A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__SHIFT                0
-static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID(uint32_t val)
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK           0x000000ff
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT          0
+static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val)
+{
+       return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK;
+}
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK          0x0000ff00
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT         8
+static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val)
+{
+       return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK;
+}
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK                0x00ff0000
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT       16
+static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val)
+{
+       return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK       0xff000000
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT      24
+static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val)
 {
-       return ((val) << A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK;
+       return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK;
 }
 
 #define REG_A6XX_HLSQ_CONTROL_4_REG                            0x0000b985
+#define A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK          0x000000ff
+#define A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT         0
+static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val)
+{
+       return ((val) << A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK;
+}
+#define A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK         0x0000ff00
+#define A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT                8
+static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val)
+{
+       return ((val) << A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK;
+}
 #define A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK             0x00ff0000
 #define A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT            16
 static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
@@ -4911,6 +6809,15 @@ static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
 
 #define REG_A6XX_HLSQ_CONTROL_5_REG                            0x0000b986
 
+#define REG_A6XX_HLSQ_CS_CNTL                                  0x0000b987
+#define A6XX_HLSQ_CS_CNTL_CONSTLEN__MASK                       0x000000ff
+#define A6XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT                      0
+static inline uint32_t A6XX_HLSQ_CS_CNTL_CONSTLEN(uint32_t val)
+{
+       return ((val >> 2) << A6XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_CS_CNTL_CONSTLEN__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_ENABLED                              0x00000100
+
 #define REG_A6XX_HLSQ_CS_NDRANGE_0                             0x0000b990
 #define A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK                 0x00000003
 #define A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT                        0
@@ -5011,13 +6918,77 @@ static inline uint32_t A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(uint32_t val)
        return ((val) << A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK;
 }
 
+#define REG_A6XX_HLSQ_CS_UNKNOWN_B998                          0x0000b998
+
 #define REG_A6XX_HLSQ_CS_KERNEL_GROUP_X                                0x0000b999
 
 #define REG_A6XX_HLSQ_CS_KERNEL_GROUP_Y                                0x0000b99a
 
 #define REG_A6XX_HLSQ_CS_KERNEL_GROUP_Z                                0x0000b99b
 
-#define REG_A6XX_HLSQ_UPDATE_CNTL                              0x0000bb08
+#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_CMD                      0x0000b9a0
+
+#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR             0x0000b9a1
+
+#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_DATA                     0x0000b9a3
+
+static inline uint32_t REG_A6XX_HLSQ_CS_BINDLESS_BASE(uint32_t i0) { return 0x0000b9c0 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_HLSQ_CS_BINDLESS_BASE_ADDR(uint32_t i0) { return 0x0000b9c0 + 0x2*i0; }
+
+#define REG_A6XX_HLSQ_DRAW_CMD                                 0x0000bb00
+#define A6XX_HLSQ_DRAW_CMD_STATE_ID__MASK                      0x000000ff
+#define A6XX_HLSQ_DRAW_CMD_STATE_ID__SHIFT                     0
+static inline uint32_t A6XX_HLSQ_DRAW_CMD_STATE_ID(uint32_t val)
+{
+       return ((val) << A6XX_HLSQ_DRAW_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_DRAW_CMD_STATE_ID__MASK;
+}
+
+#define REG_A6XX_HLSQ_DISPATCH_CMD                             0x0000bb01
+#define A6XX_HLSQ_DISPATCH_CMD_STATE_ID__MASK                  0x000000ff
+#define A6XX_HLSQ_DISPATCH_CMD_STATE_ID__SHIFT                 0
+static inline uint32_t A6XX_HLSQ_DISPATCH_CMD_STATE_ID(uint32_t val)
+{
+       return ((val) << A6XX_HLSQ_DISPATCH_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_DISPATCH_CMD_STATE_ID__MASK;
+}
+
+#define REG_A6XX_HLSQ_EVENT_CMD                                        0x0000bb02
+#define A6XX_HLSQ_EVENT_CMD_STATE_ID__MASK                     0x00ff0000
+#define A6XX_HLSQ_EVENT_CMD_STATE_ID__SHIFT                    16
+static inline uint32_t A6XX_HLSQ_EVENT_CMD_STATE_ID(uint32_t val)
+{
+       return ((val) << A6XX_HLSQ_EVENT_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_EVENT_CMD_STATE_ID__MASK;
+}
+#define A6XX_HLSQ_EVENT_CMD_EVENT__MASK                                0x0000007f
+#define A6XX_HLSQ_EVENT_CMD_EVENT__SHIFT                       0
+static inline uint32_t A6XX_HLSQ_EVENT_CMD_EVENT(enum vgt_event_type val)
+{
+       return ((val) << A6XX_HLSQ_EVENT_CMD_EVENT__SHIFT) & A6XX_HLSQ_EVENT_CMD_EVENT__MASK;
+}
+
+#define REG_A6XX_HLSQ_INVALIDATE_CMD                           0x0000bb08
+#define A6XX_HLSQ_INVALIDATE_CMD_VS_STATE                      0x00000001
+#define A6XX_HLSQ_INVALIDATE_CMD_HS_STATE                      0x00000002
+#define A6XX_HLSQ_INVALIDATE_CMD_DS_STATE                      0x00000004
+#define A6XX_HLSQ_INVALIDATE_CMD_GS_STATE                      0x00000008
+#define A6XX_HLSQ_INVALIDATE_CMD_FS_STATE                      0x00000010
+#define A6XX_HLSQ_INVALIDATE_CMD_CS_STATE                      0x00000020
+#define A6XX_HLSQ_INVALIDATE_CMD_CS_IBO                                0x00000040
+#define A6XX_HLSQ_INVALIDATE_CMD_GFX_IBO                       0x00000080
+#define A6XX_HLSQ_INVALIDATE_CMD_CS_SHARED_CONST               0x00080000
+#define A6XX_HLSQ_INVALIDATE_CMD_GFX_SHARED_CONST              0x00000100
+#define A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__MASK             0x00003e00
+#define A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__SHIFT            9
+static inline uint32_t A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS(uint32_t val)
+{
+       return ((val) << A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__SHIFT) & A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__MASK;
+}
+#define A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__MASK            0x0007c000
+#define A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__SHIFT           14
+static inline uint32_t A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS(uint32_t val)
+{
+       return ((val) << A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__SHIFT) & A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__MASK;
+}
 
 #define REG_A6XX_HLSQ_FS_CNTL                                  0x0000bb10
 #define A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK                       0x000000ff
@@ -5026,8 +6997,28 @@ static inline uint32_t A6XX_HLSQ_FS_CNTL_CONSTLEN(uint32_t val)
 {
        return ((val >> 2) << A6XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK;
 }
+#define A6XX_HLSQ_FS_CNTL_ENABLED                              0x00000100
 
-#define REG_A6XX_HLSQ_UNKNOWN_BB11                             0x0000bb11
+#define REG_A6XX_HLSQ_SHARED_CONSTS                            0x0000bb11
+#define A6XX_HLSQ_SHARED_CONSTS_ENABLE                         0x00000001
+
+static inline uint32_t REG_A6XX_HLSQ_BINDLESS_BASE(uint32_t i0) { return 0x0000bb20 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_HLSQ_BINDLESS_BASE_ADDR(uint32_t i0) { return 0x0000bb20 + 0x2*i0; }
+
+#define REG_A6XX_HLSQ_2D_EVENT_CMD                             0x0000bd80
+#define A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__MASK                  0x0000ff00
+#define A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__SHIFT                 8
+static inline uint32_t A6XX_HLSQ_2D_EVENT_CMD_STATE_ID(uint32_t val)
+{
+       return ((val) << A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__MASK;
+}
+#define A6XX_HLSQ_2D_EVENT_CMD_EVENT__MASK                     0x0000007f
+#define A6XX_HLSQ_2D_EVENT_CMD_EVENT__SHIFT                    0
+static inline uint32_t A6XX_HLSQ_2D_EVENT_CMD_EVENT(enum vgt_event_type val)
+{
+       return ((val) << A6XX_HLSQ_2D_EVENT_CMD_EVENT__SHIFT) & A6XX_HLSQ_2D_EVENT_CMD_EVENT__MASK;
+}
 
 #define REG_A6XX_HLSQ_UNKNOWN_BE00                             0x0000be00
 
@@ -5035,6 +7026,38 @@ static inline uint32_t A6XX_HLSQ_FS_CNTL_CONSTLEN(uint32_t val)
 
 #define REG_A6XX_HLSQ_UNKNOWN_BE04                             0x0000be04
 
+#define REG_A6XX_CP_EVENT_START                                        0x0000d600
+#define A6XX_CP_EVENT_START_STATE_ID__MASK                     0x000000ff
+#define A6XX_CP_EVENT_START_STATE_ID__SHIFT                    0
+static inline uint32_t A6XX_CP_EVENT_START_STATE_ID(uint32_t val)
+{
+       return ((val) << A6XX_CP_EVENT_START_STATE_ID__SHIFT) & A6XX_CP_EVENT_START_STATE_ID__MASK;
+}
+
+#define REG_A6XX_CP_EVENT_END                                  0x0000d601
+#define A6XX_CP_EVENT_END_STATE_ID__MASK                       0x000000ff
+#define A6XX_CP_EVENT_END_STATE_ID__SHIFT                      0
+static inline uint32_t A6XX_CP_EVENT_END_STATE_ID(uint32_t val)
+{
+       return ((val) << A6XX_CP_EVENT_END_STATE_ID__SHIFT) & A6XX_CP_EVENT_END_STATE_ID__MASK;
+}
+
+#define REG_A6XX_CP_2D_EVENT_START                             0x0000d700
+#define A6XX_CP_2D_EVENT_START_STATE_ID__MASK                  0x000000ff
+#define A6XX_CP_2D_EVENT_START_STATE_ID__SHIFT                 0
+static inline uint32_t A6XX_CP_2D_EVENT_START_STATE_ID(uint32_t val)
+{
+       return ((val) << A6XX_CP_2D_EVENT_START_STATE_ID__SHIFT) & A6XX_CP_2D_EVENT_START_STATE_ID__MASK;
+}
+
+#define REG_A6XX_CP_2D_EVENT_END                               0x0000d701
+#define A6XX_CP_2D_EVENT_END_STATE_ID__MASK                    0x000000ff
+#define A6XX_CP_2D_EVENT_END_STATE_ID__SHIFT                   0
+static inline uint32_t A6XX_CP_2D_EVENT_END_STATE_ID(uint32_t val)
+{
+       return ((val) << A6XX_CP_2D_EVENT_END_STATE_ID__SHIFT) & A6XX_CP_2D_EVENT_END_STATE_ID__MASK;
+}
+
 #define REG_A6XX_TEX_SAMP_0                                    0x00000000
 #define A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR                  0x00000001
 #define A6XX_TEX_SAMP_0_XY_MAG__MASK                           0x00000006
@@ -5081,6 +7104,7 @@ static inline uint32_t A6XX_TEX_SAMP_0_LOD_BIAS(float val)
 }
 
 #define REG_A6XX_TEX_SAMP_1                                    0x00000001
+#define A6XX_TEX_SAMP_1_UNK0                                   0x00000001
 #define A6XX_TEX_SAMP_1_COMPARE_FUNC__MASK                     0x0000000e
 #define A6XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT                    1
 static inline uint32_t A6XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
@@ -5104,11 +7128,18 @@ static inline uint32_t A6XX_TEX_SAMP_1_MIN_LOD(float val)
 }
 
 #define REG_A6XX_TEX_SAMP_2                                    0x00000002
-#define A6XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK                    0xfffffff0
-#define A6XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT                   4
+#define A6XX_TEX_SAMP_2_REDUCTION_MODE__MASK                   0x00000003
+#define A6XX_TEX_SAMP_2_REDUCTION_MODE__SHIFT                  0
+static inline uint32_t A6XX_TEX_SAMP_2_REDUCTION_MODE(enum a6xx_reduction_mode val)
+{
+       return ((val) << A6XX_TEX_SAMP_2_REDUCTION_MODE__SHIFT) & A6XX_TEX_SAMP_2_REDUCTION_MODE__MASK;
+}
+#define A6XX_TEX_SAMP_2_CHROMA_LINEAR                          0x00000020
+#define A6XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK                    0xffffff80
+#define A6XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT                   7
 static inline uint32_t A6XX_TEX_SAMP_2_BCOLOR_OFFSET(uint32_t val)
 {
-       return ((val) << A6XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT) & A6XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK;
+       return ((val >> 7) << A6XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT) & A6XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK;
 }
 
 #define REG_A6XX_TEX_SAMP_3                                    0x00000003
@@ -5151,6 +7182,8 @@ static inline uint32_t A6XX_TEX_CONST_0_MIPLVLS(uint32_t val)
 {
        return ((val) << A6XX_TEX_CONST_0_MIPLVLS__SHIFT) & A6XX_TEX_CONST_0_MIPLVLS__MASK;
 }
+#define A6XX_TEX_CONST_0_CHROMA_MIDPOINT_X                     0x00010000
+#define A6XX_TEX_CONST_0_CHROMA_MIDPOINT_Y                     0x00040000
 #define A6XX_TEX_CONST_0_SAMPLES__MASK                         0x00300000
 #define A6XX_TEX_CONST_0_SAMPLES__SHIFT                                20
 static inline uint32_t A6XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val)
@@ -5159,7 +7192,7 @@ static inline uint32_t A6XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val)
 }
 #define A6XX_TEX_CONST_0_FMT__MASK                             0x3fc00000
 #define A6XX_TEX_CONST_0_FMT__SHIFT                            22
-static inline uint32_t A6XX_TEX_CONST_0_FMT(enum a6xx_tex_fmt val)
+static inline uint32_t A6XX_TEX_CONST_0_FMT(enum a6xx_format val)
 {
        return ((val) << A6XX_TEX_CONST_0_FMT__SHIFT) & A6XX_TEX_CONST_0_FMT__MASK;
 }
@@ -5185,11 +7218,12 @@ static inline uint32_t A6XX_TEX_CONST_1_HEIGHT(uint32_t val)
 }
 
 #define REG_A6XX_TEX_CONST_2                                   0x00000002
-#define A6XX_TEX_CONST_2_FETCHSIZE__MASK                       0x0000000f
-#define A6XX_TEX_CONST_2_FETCHSIZE__SHIFT                      0
-static inline uint32_t A6XX_TEX_CONST_2_FETCHSIZE(enum a6xx_tex_fetchsize val)
+#define A6XX_TEX_CONST_2_UNK4                                  0x00000010
+#define A6XX_TEX_CONST_2_PITCHALIGN__MASK                      0x0000000f
+#define A6XX_TEX_CONST_2_PITCHALIGN__SHIFT                     0
+static inline uint32_t A6XX_TEX_CONST_2_PITCHALIGN(uint32_t val)
 {
-       return ((val) << A6XX_TEX_CONST_2_FETCHSIZE__SHIFT) & A6XX_TEX_CONST_2_FETCHSIZE__MASK;
+       return ((val) << A6XX_TEX_CONST_2_PITCHALIGN__SHIFT) & A6XX_TEX_CONST_2_PITCHALIGN__MASK;
 }
 #define A6XX_TEX_CONST_2_PITCH__MASK                           0x1fffff80
 #define A6XX_TEX_CONST_2_PITCH__SHIFT                          7
@@ -5203,6 +7237,7 @@ static inline uint32_t A6XX_TEX_CONST_2_TYPE(enum a6xx_tex_type val)
 {
        return ((val) << A6XX_TEX_CONST_2_TYPE__SHIFT) & A6XX_TEX_CONST_2_TYPE__MASK;
 }
+#define A6XX_TEX_CONST_2_UNK31                                 0x80000000
 
 #define REG_A6XX_TEX_CONST_3                                   0x00000003
 #define A6XX_TEX_CONST_3_ARRAY_PITCH__MASK                     0x00003fff
@@ -5211,6 +7246,13 @@ static inline uint32_t A6XX_TEX_CONST_3_ARRAY_PITCH(uint32_t val)
 {
        return ((val >> 12) << A6XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A6XX_TEX_CONST_3_ARRAY_PITCH__MASK;
 }
+#define A6XX_TEX_CONST_3_MIN_LAYERSZ__MASK                     0x07800000
+#define A6XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT                    23
+static inline uint32_t A6XX_TEX_CONST_3_MIN_LAYERSZ(uint32_t val)
+{
+       return ((val >> 12) << A6XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT) & A6XX_TEX_CONST_3_MIN_LAYERSZ__MASK;
+}
+#define A6XX_TEX_CONST_3_TILE_ALL                              0x08000000
 #define A6XX_TEX_CONST_3_FLAG                                  0x10000000
 
 #define REG_A6XX_TEX_CONST_4                                   0x00000004
@@ -5236,6 +7278,12 @@ static inline uint32_t A6XX_TEX_CONST_5_DEPTH(uint32_t val)
 }
 
 #define REG_A6XX_TEX_CONST_6                                   0x00000006
+#define A6XX_TEX_CONST_6_PLANE_PITCH__MASK                     0xffffff00
+#define A6XX_TEX_CONST_6_PLANE_PITCH__SHIFT                    8
+static inline uint32_t A6XX_TEX_CONST_6_PLANE_PITCH(uint32_t val)
+{
+       return ((val) << A6XX_TEX_CONST_6_PLANE_PITCH__SHIFT) & A6XX_TEX_CONST_6_PLANE_PITCH__MASK;
+}
 
 #define REG_A6XX_TEX_CONST_7                                   0x00000007
 #define A6XX_TEX_CONST_7_FLAG_LO__MASK                         0xffffffe0
@@ -5254,8 +7302,32 @@ static inline uint32_t A6XX_TEX_CONST_8_FLAG_HI(uint32_t val)
 }
 
 #define REG_A6XX_TEX_CONST_9                                   0x00000009
+#define A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__MASK         0x0001ffff
+#define A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__SHIFT                0
+static inline uint32_t A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+       return ((val >> 4) << A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__MASK;
+}
 
 #define REG_A6XX_TEX_CONST_10                                  0x0000000a
+#define A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__MASK              0x0000007f
+#define A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__SHIFT             0
+static inline uint32_t A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH(uint32_t val)
+{
+       return ((val >> 6) << A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__SHIFT) & A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__MASK;
+}
+#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__MASK               0x00000f00
+#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__SHIFT              8
+static inline uint32_t A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW(uint32_t val)
+{
+       return ((val) << A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__SHIFT) & A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__MASK;
+}
+#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__MASK               0x0000f000
+#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__SHIFT              12
+static inline uint32_t A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH(uint32_t val)
+{
+       return ((val) << A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__SHIFT) & A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__MASK;
+}
 
 #define REG_A6XX_TEX_CONST_11                                  0x0000000b
 
@@ -5267,6 +7339,126 @@ static inline uint32_t A6XX_TEX_CONST_8_FLAG_HI(uint32_t val)
 
 #define REG_A6XX_TEX_CONST_15                                  0x0000000f
 
+#define REG_A6XX_IBO_0                                         0x00000000
+#define A6XX_IBO_0_TILE_MODE__MASK                             0x00000003
+#define A6XX_IBO_0_TILE_MODE__SHIFT                            0
+static inline uint32_t A6XX_IBO_0_TILE_MODE(enum a6xx_tile_mode val)
+{
+       return ((val) << A6XX_IBO_0_TILE_MODE__SHIFT) & A6XX_IBO_0_TILE_MODE__MASK;
+}
+#define A6XX_IBO_0_FMT__MASK                                   0x3fc00000
+#define A6XX_IBO_0_FMT__SHIFT                                  22
+static inline uint32_t A6XX_IBO_0_FMT(enum a6xx_format val)
+{
+       return ((val) << A6XX_IBO_0_FMT__SHIFT) & A6XX_IBO_0_FMT__MASK;
+}
+
+#define REG_A6XX_IBO_1                                         0x00000001
+#define A6XX_IBO_1_WIDTH__MASK                                 0x00007fff
+#define A6XX_IBO_1_WIDTH__SHIFT                                        0
+static inline uint32_t A6XX_IBO_1_WIDTH(uint32_t val)
+{
+       return ((val) << A6XX_IBO_1_WIDTH__SHIFT) & A6XX_IBO_1_WIDTH__MASK;
+}
+#define A6XX_IBO_1_HEIGHT__MASK                                        0x3fff8000
+#define A6XX_IBO_1_HEIGHT__SHIFT                               15
+static inline uint32_t A6XX_IBO_1_HEIGHT(uint32_t val)
+{
+       return ((val) << A6XX_IBO_1_HEIGHT__SHIFT) & A6XX_IBO_1_HEIGHT__MASK;
+}
+
+#define REG_A6XX_IBO_2                                         0x00000002
+#define A6XX_IBO_2_UNK4                                                0x00000010
+#define A6XX_IBO_2_PITCH__MASK                                 0x1fffff80
+#define A6XX_IBO_2_PITCH__SHIFT                                        7
+static inline uint32_t A6XX_IBO_2_PITCH(uint32_t val)
+{
+       return ((val) << A6XX_IBO_2_PITCH__SHIFT) & A6XX_IBO_2_PITCH__MASK;
+}
+#define A6XX_IBO_2_TYPE__MASK                                  0x60000000
+#define A6XX_IBO_2_TYPE__SHIFT                                 29
+static inline uint32_t A6XX_IBO_2_TYPE(enum a6xx_tex_type val)
+{
+       return ((val) << A6XX_IBO_2_TYPE__SHIFT) & A6XX_IBO_2_TYPE__MASK;
+}
+#define A6XX_IBO_2_UNK31                                       0x80000000
+
+#define REG_A6XX_IBO_3                                         0x00000003
+#define A6XX_IBO_3_ARRAY_PITCH__MASK                           0x00003fff
+#define A6XX_IBO_3_ARRAY_PITCH__SHIFT                          0
+static inline uint32_t A6XX_IBO_3_ARRAY_PITCH(uint32_t val)
+{
+       return ((val >> 12) << A6XX_IBO_3_ARRAY_PITCH__SHIFT) & A6XX_IBO_3_ARRAY_PITCH__MASK;
+}
+#define A6XX_IBO_3_UNK27                                       0x08000000
+#define A6XX_IBO_3_FLAG                                                0x10000000
+
+#define REG_A6XX_IBO_4                                         0x00000004
+#define A6XX_IBO_4_BASE_LO__MASK                               0xffffffff
+#define A6XX_IBO_4_BASE_LO__SHIFT                              0
+static inline uint32_t A6XX_IBO_4_BASE_LO(uint32_t val)
+{
+       return ((val) << A6XX_IBO_4_BASE_LO__SHIFT) & A6XX_IBO_4_BASE_LO__MASK;
+}
+
+#define REG_A6XX_IBO_5                                         0x00000005
+#define A6XX_IBO_5_BASE_HI__MASK                               0x0001ffff
+#define A6XX_IBO_5_BASE_HI__SHIFT                              0
+static inline uint32_t A6XX_IBO_5_BASE_HI(uint32_t val)
+{
+       return ((val) << A6XX_IBO_5_BASE_HI__SHIFT) & A6XX_IBO_5_BASE_HI__MASK;
+}
+#define A6XX_IBO_5_DEPTH__MASK                                 0x3ffe0000
+#define A6XX_IBO_5_DEPTH__SHIFT                                        17
+static inline uint32_t A6XX_IBO_5_DEPTH(uint32_t val)
+{
+       return ((val) << A6XX_IBO_5_DEPTH__SHIFT) & A6XX_IBO_5_DEPTH__MASK;
+}
+
+#define REG_A6XX_IBO_6                                         0x00000006
+
+#define REG_A6XX_IBO_7                                         0x00000007
+
+#define REG_A6XX_IBO_8                                         0x00000008
+
+#define REG_A6XX_IBO_9                                         0x00000009
+#define A6XX_IBO_9_FLAG_BUFFER_ARRAY_PITCH__MASK               0x0001ffff
+#define A6XX_IBO_9_FLAG_BUFFER_ARRAY_PITCH__SHIFT              0
+static inline uint32_t A6XX_IBO_9_FLAG_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+       return ((val >> 4) << A6XX_IBO_9_FLAG_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_IBO_9_FLAG_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_IBO_10                                                0x0000000a
+#define A6XX_IBO_10_FLAG_BUFFER_PITCH__MASK                    0x0000007f
+#define A6XX_IBO_10_FLAG_BUFFER_PITCH__SHIFT                   0
+static inline uint32_t A6XX_IBO_10_FLAG_BUFFER_PITCH(uint32_t val)
+{
+       return ((val >> 6) << A6XX_IBO_10_FLAG_BUFFER_PITCH__SHIFT) & A6XX_IBO_10_FLAG_BUFFER_PITCH__MASK;
+}
+
+#define REG_A6XX_UBO_0                                         0x00000000
+#define A6XX_UBO_0_BASE_LO__MASK                               0xffffffff
+#define A6XX_UBO_0_BASE_LO__SHIFT                              0
+static inline uint32_t A6XX_UBO_0_BASE_LO(uint32_t val)
+{
+       return ((val) << A6XX_UBO_0_BASE_LO__SHIFT) & A6XX_UBO_0_BASE_LO__MASK;
+}
+
+#define REG_A6XX_UBO_1                                         0x00000001
+#define A6XX_UBO_1_BASE_HI__MASK                               0x0001ffff
+#define A6XX_UBO_1_BASE_HI__SHIFT                              0
+static inline uint32_t A6XX_UBO_1_BASE_HI(uint32_t val)
+{
+       return ((val) << A6XX_UBO_1_BASE_HI__SHIFT) & A6XX_UBO_1_BASE_HI__MASK;
+}
+#define A6XX_UBO_1_SIZE__MASK                                  0xfffe0000
+#define A6XX_UBO_1_SIZE__SHIFT                                 17
+static inline uint32_t A6XX_UBO_1_SIZE(uint32_t val)
+{
+       return ((val) << A6XX_UBO_1_SIZE__SHIFT) & A6XX_UBO_1_SIZE__MASK;
+}
+
 #define REG_A6XX_PDC_GPU_ENABLE_PDC                            0x00001140
 
 #define REG_A6XX_PDC_GPU_SEQ_START_ADDR                                0x00001148
index 096be97..b67b38c 100644 (file)
@@ -103,17 +103,45 @@ bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
                A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
 }
 
-static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
+void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
 {
-       struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
-       struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
-       struct msm_gpu *gpu = &adreno_gpu->base;
-       int ret;
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+       struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+       u32 perf_index;
+       unsigned long gpu_freq;
+       int ret = 0;
+
+       gpu_freq = dev_pm_opp_get_freq(opp);
+
+       if (gpu_freq == gmu->freq)
+               return;
+
+       for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
+               if (gpu_freq == gmu->gpu_freqs[perf_index])
+                       break;
+
+       gmu->current_perf_index = perf_index;
+       gmu->freq = gmu->gpu_freqs[perf_index];
+
+       /*
+        * This can get called from devfreq while the hardware is idle. Don't
+        * bring up the power if it isn't already active
+        */
+       if (pm_runtime_get_if_in_use(gmu->dev) == 0)
+               return;
+
+       if (!gmu->legacy) {
+               a6xx_hfi_set_freq(gmu, perf_index);
+               icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216));
+               pm_runtime_put(gmu->dev);
+               return;
+       }
 
        gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
 
        gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
-               ((3 & 0xf) << 28) | index);
+                       ((3 & 0xf) << 28) | perf_index);
 
        /*
         * Send an invalid index as a vote for the bus bandwidth and let the
@@ -134,37 +162,6 @@ static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
         * for now leave it at max so that the performance is nominal.
         */
        icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216));
-}
-
-void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
-{
-       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
-       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
-       struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
-       u32 perf_index = 0;
-
-       if (freq == gmu->freq)
-               return;
-
-       for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
-               if (freq == gmu->gpu_freqs[perf_index])
-                       break;
-
-       gmu->current_perf_index = perf_index;
-       gmu->freq = gmu->gpu_freqs[perf_index];
-
-       /*
-        * This can get called from devfreq while the hardware is idle. Don't
-        * bring up the power if it isn't already active
-        */
-       if (pm_runtime_get_if_in_use(gmu->dev) == 0)
-               return;
-
-       if (gmu->legacy)
-               __a6xx_gmu_set_freq(gmu, perf_index);
-       else
-               a6xx_hfi_set_freq(gmu, perf_index);
-
        pm_runtime_put(gmu->dev);
 }
 
@@ -839,6 +836,19 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
        a6xx_gmu_rpmh_off(gmu);
 }
 
+static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
+{
+       struct dev_pm_opp *gpu_opp;
+       unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
+
+       gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
+       if (IS_ERR_OR_NULL(gpu_opp))
+               return;
+
+       a6xx_gmu_set_freq(gpu, gpu_opp);
+       dev_pm_opp_put(gpu_opp);
+}
+
 int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
 {
        struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
@@ -854,10 +864,19 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
        /* Turn on the resources */
        pm_runtime_get_sync(gmu->dev);
 
+       /*
+        * "enable" the GX power domain which won't actually do anything but it
+        * will make sure that the refcounting is correct in case we need to
+        * bring down the GX after a GMU failure
+        */
+       if (!IS_ERR_OR_NULL(gmu->gxpd))
+               pm_runtime_get_sync(gmu->gxpd);
+
        /* Use a known rate to bring up the GMU */
        clk_set_rate(gmu->core_clk, 200000000);
        ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
        if (ret) {
+               pm_runtime_put(gmu->gxpd);
                pm_runtime_put(gmu->dev);
                return ret;
        }
@@ -898,24 +917,14 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
        enable_irq(gmu->hfi_irq);
 
        /* Set the GPU to the current freq */
-       if (gmu->legacy)
-               __a6xx_gmu_set_freq(gmu, gmu->current_perf_index);
-       else
-               a6xx_hfi_set_freq(gmu, gmu->current_perf_index);
-
-       /*
-        * "enable" the GX power domain which won't actually do anything but it
-        * will make sure that the refcounting is correct in case we need to
-        * bring down the GX after a GMU failure
-        */
-       if (!IS_ERR_OR_NULL(gmu->gxpd))
-               pm_runtime_get(gmu->gxpd);
+       a6xx_gmu_set_initial_freq(gpu, gmu);
 
 out:
        /* On failure, shut down the GMU to leave it in a good state */
        if (ret) {
                disable_irq(gmu->gmu_irq);
                a6xx_rpmh_stop(gmu);
+               pm_runtime_put(gmu->gxpd);
                pm_runtime_put(gmu->dev);
        }
 
@@ -1121,7 +1130,7 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
                return -ENODEV;
 
        mmu = msm_iommu_new(gmu->dev, domain);
-       gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x7fffffff);
+       gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
        if (IS_ERR(gmu->aspace)) {
                iommu_domain_free(domain);
                return PTR_ERR(gmu->aspace);
index 47df474..c6d2bce 100644 (file)
@@ -127,6 +127,11 @@ static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
        readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
                interval, timeout)
 
+static inline u32 gmu_read_rscc(struct a6xx_gmu *gmu, u32 offset)
+{
+       return msm_readl(gmu->rscc + (offset << 2));
+}
+
 static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value)
 {
        return msm_writel(value, gmu->rscc + (offset << 2));
index 176ae94..5a43d30 100644 (file)
@@ -8,19 +8,21 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/adreno.xml               (    501 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  42463 bytes, from 2018-11-19 13:44:03)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  14201 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  43052 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 140790 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2018-09-14 13:03:07)
-- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml                     (    594 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml        (   1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml                (  90159 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml       (  14386 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml          (  65048 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml                (  84226 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml                ( 112556 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml                ( 149461 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml                ( 184695 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml            (  11218 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml               (   1773 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml (   4559 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml    (   2872 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
@@ -46,24 +48,109 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 */
 
 
-#define A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB                  0x00800000
-#define A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB                0x40000000
-#define A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK                     0x00400000
-#define A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK                   0x40000000
-#define A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK                   0x40000000
-#define A6XX_GMU_OOB_DCVS_SET_MASK                             0x00800000
-#define A6XX_GMU_OOB_DCVS_CHECK_MASK                           0x80000000
-#define A6XX_GMU_OOB_DCVS_CLEAR_MASK                           0x80000000
-#define A6XX_GMU_OOB_GPU_SET_MASK                              0x00040000
-#define A6XX_GMU_OOB_GPU_CHECK_MASK                            0x04000000
-#define A6XX_GMU_OOB_GPU_CLEAR_MASK                            0x04000000
-#define A6XX_GMU_OOB_PERFCNTR_SET_MASK                         0x00020000
-#define A6XX_GMU_OOB_PERFCNTR_CHECK_MASK                       0x02000000
-#define A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK                       0x02000000
+#define A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__MASK            0x00800000
+#define A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__SHIFT           23
+static inline uint32_t A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB(uint32_t val)
+{
+       return ((val) << A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__SHIFT) & A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__MASK;
+}
+#define A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__MASK  0x40000000
+#define A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__SHIFT 30
+static inline uint32_t A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB(uint32_t val)
+{
+       return ((val) << A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__SHIFT) & A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__MASK;
+}
+#define A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__MASK               0x00400000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__SHIFT              22
+static inline uint32_t A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK(uint32_t val)
+{
+       return ((val) << A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__SHIFT) & A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__MASK;
+}
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__MASK             0x40000000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__SHIFT            30
+static inline uint32_t A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK(uint32_t val)
+{
+       return ((val) << A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__MASK;
+}
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__MASK             0x40000000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__SHIFT            30
+static inline uint32_t A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK(uint32_t val)
+{
+       return ((val) << A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__MASK;
+}
+#define A6XX_GMU_OOB_DCVS_SET_MASK__MASK                       0x00800000
+#define A6XX_GMU_OOB_DCVS_SET_MASK__SHIFT                      23
+static inline uint32_t A6XX_GMU_OOB_DCVS_SET_MASK(uint32_t val)
+{
+       return ((val) << A6XX_GMU_OOB_DCVS_SET_MASK__SHIFT) & A6XX_GMU_OOB_DCVS_SET_MASK__MASK;
+}
+#define A6XX_GMU_OOB_DCVS_CHECK_MASK__MASK                     0x80000000
+#define A6XX_GMU_OOB_DCVS_CHECK_MASK__SHIFT                    31
+static inline uint32_t A6XX_GMU_OOB_DCVS_CHECK_MASK(uint32_t val)
+{
+       return ((val) << A6XX_GMU_OOB_DCVS_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_DCVS_CHECK_MASK__MASK;
+}
+#define A6XX_GMU_OOB_DCVS_CLEAR_MASK__MASK                     0x80000000
+#define A6XX_GMU_OOB_DCVS_CLEAR_MASK__SHIFT                    31
+static inline uint32_t A6XX_GMU_OOB_DCVS_CLEAR_MASK(uint32_t val)
+{
+       return ((val) << A6XX_GMU_OOB_DCVS_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_DCVS_CLEAR_MASK__MASK;
+}
+#define A6XX_GMU_OOB_GPU_SET_MASK__MASK                                0x00040000
+#define A6XX_GMU_OOB_GPU_SET_MASK__SHIFT                       18
+static inline uint32_t A6XX_GMU_OOB_GPU_SET_MASK(uint32_t val)
+{
+       return ((val) << A6XX_GMU_OOB_GPU_SET_MASK__SHIFT) & A6XX_GMU_OOB_GPU_SET_MASK__MASK;
+}
+#define A6XX_GMU_OOB_GPU_CHECK_MASK__MASK                      0x04000000
+#define A6XX_GMU_OOB_GPU_CHECK_MASK__SHIFT                     26
+static inline uint32_t A6XX_GMU_OOB_GPU_CHECK_MASK(uint32_t val)
+{
+       return ((val) << A6XX_GMU_OOB_GPU_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_GPU_CHECK_MASK__MASK;
+}
+#define A6XX_GMU_OOB_GPU_CLEAR_MASK__MASK                      0x04000000
+#define A6XX_GMU_OOB_GPU_CLEAR_MASK__SHIFT                     26
+static inline uint32_t A6XX_GMU_OOB_GPU_CLEAR_MASK(uint32_t val)
+{
+       return ((val) << A6XX_GMU_OOB_GPU_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_GPU_CLEAR_MASK__MASK;
+}
+#define A6XX_GMU_OOB_PERFCNTR_SET_MASK__MASK                   0x00020000
+#define A6XX_GMU_OOB_PERFCNTR_SET_MASK__SHIFT                  17
+static inline uint32_t A6XX_GMU_OOB_PERFCNTR_SET_MASK(uint32_t val)
+{
+       return ((val) << A6XX_GMU_OOB_PERFCNTR_SET_MASK__SHIFT) & A6XX_GMU_OOB_PERFCNTR_SET_MASK__MASK;
+}
+#define A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__MASK                 0x02000000
+#define A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__SHIFT                        25
+static inline uint32_t A6XX_GMU_OOB_PERFCNTR_CHECK_MASK(uint32_t val)
+{
+       return ((val) << A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__MASK;
+}
+#define A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__MASK                 0x02000000
+#define A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__SHIFT                        25
+static inline uint32_t A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK(uint32_t val)
+{
+       return ((val) << A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__MASK;
+}
 #define A6XX_HFI_IRQ_MSGQ_MASK                                 0x00000001
-#define A6XX_HFI_IRQ_DSGQ_MASK                                 0x00000002
-#define A6XX_HFI_IRQ_BLOCKED_MSG_MASK                          0x00000004
-#define A6XX_HFI_IRQ_CM3_FAULT_MASK                            0x00800000
+#define A6XX_HFI_IRQ_DSGQ_MASK__MASK                           0x00000002
+#define A6XX_HFI_IRQ_DSGQ_MASK__SHIFT                          1
+static inline uint32_t A6XX_HFI_IRQ_DSGQ_MASK(uint32_t val)
+{
+       return ((val) << A6XX_HFI_IRQ_DSGQ_MASK__SHIFT) & A6XX_HFI_IRQ_DSGQ_MASK__MASK;
+}
+#define A6XX_HFI_IRQ_BLOCKED_MSG_MASK__MASK                    0x00000004
+#define A6XX_HFI_IRQ_BLOCKED_MSG_MASK__SHIFT                   2
+static inline uint32_t A6XX_HFI_IRQ_BLOCKED_MSG_MASK(uint32_t val)
+{
+       return ((val) << A6XX_HFI_IRQ_BLOCKED_MSG_MASK__SHIFT) & A6XX_HFI_IRQ_BLOCKED_MSG_MASK__MASK;
+}
+#define A6XX_HFI_IRQ_CM3_FAULT_MASK__MASK                      0x00800000
+#define A6XX_HFI_IRQ_CM3_FAULT_MASK__SHIFT                     23
+static inline uint32_t A6XX_HFI_IRQ_CM3_FAULT_MASK(uint32_t val)
+{
+       return ((val) << A6XX_HFI_IRQ_CM3_FAULT_MASK__SHIFT) & A6XX_HFI_IRQ_CM3_FAULT_MASK__MASK;
+}
 #define A6XX_HFI_IRQ_GMU_ERR_MASK__MASK                                0x007f0000
 #define A6XX_HFI_IRQ_GMU_ERR_MASK__SHIFT                       16
 static inline uint32_t A6XX_HFI_IRQ_GMU_ERR_MASK(uint32_t val)
index 68314dc..c5a3e4d 100644 (file)
@@ -74,7 +74,9 @@ static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
                u64 iova)
 {
        OUT_PKT7(ring, CP_REG_TO_MEM, 3);
-       OUT_RING(ring, counter | (1 << 30) | (2 << 18));
+       OUT_RING(ring, CP_REG_TO_MEM_0_REG(counter) |
+               CP_REG_TO_MEM_0_CNT(2) |
+               CP_REG_TO_MEM_0_64B);
        OUT_RING(ring, lower_32_bits(iova));
        OUT_RING(ring, upper_32_bits(iova));
 }
@@ -102,10 +104,10 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 
        /* Invalidate CCU depth and color */
        OUT_PKT7(ring, CP_EVENT_WRITE, 1);
-       OUT_RING(ring, PC_CCU_INVALIDATE_DEPTH);
+       OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_DEPTH));
 
        OUT_PKT7(ring, CP_EVENT_WRITE, 1);
-       OUT_RING(ring, PC_CCU_INVALIDATE_COLOR);
+       OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_COLOR));
 
        /* Submit the commands */
        for (i = 0; i < submit->nr_cmds; i++) {
@@ -139,7 +141,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
         * timestamp is written to the memory and then triggers the interrupt
         */
        OUT_PKT7(ring, CP_EVENT_WRITE, 4);
-       OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
+       OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS) |
+               CP_EVENT_WRITE_0_IRQ);
        OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
        OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
        OUT_RING(ring, submit->seqno);
@@ -151,10 +154,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
        a6xx_flush(gpu, ring);
 }
 
-static const struct {
-       u32 offset;
-       u32 value;
-} a6xx_hwcg[] = {
+const struct adreno_reglist a630_hwcg[] = {
        {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
        {REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
        {REG_A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222},
@@ -259,7 +259,114 @@ static const struct {
        {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
        {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
        {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
-       {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}
+       {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+       {},
+};
+
+const struct adreno_reglist a640_hwcg[] = {
+       {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+       {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+       {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+       {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+       {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+       {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+       {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+       {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+       {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+       {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+       {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+       {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+       {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+       {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+       {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+       {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+       {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+       {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
+       {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+       {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
+       {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05222022},
+       {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+       {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+       {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+       {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+       {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+       {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+       {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+       {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+       {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+       {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+       {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+       {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+       {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+       {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+       {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+       {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
+       {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
+       {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
+       {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000},
+       {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+       {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+       {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+       {REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
+       {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
+       {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
+       {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+       {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+       {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+       {},
+};
+
+const struct adreno_reglist a650_hwcg[] = {
+       {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+       {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+       {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+       {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+       {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+       {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+       {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+       {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+       {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+       {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+       {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+       {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+       {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+       {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+       {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+       {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+       {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+       {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
+       {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+       {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
+       {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
+       {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+       {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+       {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+       {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+       {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+       {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+       {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+       {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+       {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+       {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+       {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+       {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+       {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+       {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+       {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+       {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
+       {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
+       {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
+       {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000777},
+       {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+       {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+       {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+       {REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
+       {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
+       {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
+       {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+       {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+       {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+       {},
 };
 
 static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
@@ -267,26 +374,65 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
        struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+       const struct adreno_reglist *reg;
        unsigned int i;
-       u32 val;
+       u32 val, clock_cntl_on;
+
+       if (!adreno_gpu->info->hwcg)
+               return;
+
+       if (adreno_is_a630(adreno_gpu))
+               clock_cntl_on = 0x8aa8aa02;
+       else
+               clock_cntl_on = 0x8aa8aa82;
 
        val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL);
 
        /* Don't re-program the registers if they are already correct */
-       if ((!state && !val) || (state && (val == 0x8aa8aa02)))
+       if ((!state && !val) || (state && (val == clock_cntl_on)))
                return;
 
        /* Disable SP clock before programming HWCG registers */
        gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
 
-       for (i = 0; i < ARRAY_SIZE(a6xx_hwcg); i++)
-               gpu_write(gpu, a6xx_hwcg[i].offset,
-                       state ? a6xx_hwcg[i].value : 0);
+       for (i = 0; (reg = &adreno_gpu->info->hwcg[i], reg->offset); i++)
+               gpu_write(gpu, reg->offset, state ? reg->value : 0);
 
        /* Enable SP clock */
        gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
 
-       gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? 0x8aa8aa02 : 0);
+       gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
+}
+
+static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       u32 lower_bit = 2;
+       u32 amsbc = 0;
+       u32 rgb565_predicator = 0;
+       u32 uavflagprd_inv = 0;
+
+       /* a618 is using the hw default values */
+       if (adreno_is_a618(adreno_gpu))
+               return;
+
+       if (adreno_is_a640(adreno_gpu))
+               amsbc = 1;
+
+       if (adreno_is_a650(adreno_gpu)) {
+               /* TODO: get ddr type from bootloader and use 2 for LPDDR4 */
+               lower_bit = 3;
+               amsbc = 1;
+               rgb565_predicator = 1;
+               uavflagprd_inv = 2;
+       }
+
+       gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL,
+               rgb565_predicator << 11 | amsbc << 4 | lower_bit << 1);
+       gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, lower_bit << 1);
+       gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
+               uavflagprd_inv >> 4 | lower_bit << 1);
+       gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, lower_bit << 21);
 }
 
 static int a6xx_cp_init(struct msm_gpu *gpu)
@@ -406,12 +552,8 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
        gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
        gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
 
-       /*
-        * enable hardware clockgating
-        * For now enable clock gating only for a630
-        */
-       if (adreno_is_a630(adreno_gpu))
-               a6xx_set_hwcg(gpu, true);
+       /* enable hardware clockgating */
+       a6xx_set_hwcg(gpu, true);
 
        /* VBIF/GBIF start*/
        if (adreno_is_a640(adreno_gpu) || adreno_is_a650(adreno_gpu)) {
@@ -478,12 +620,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
        /* Select CP0 to always count cycles */
        gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
 
-       if (adreno_is_a630(adreno_gpu)) {
-               gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
-               gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
-               gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
-               gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21);
-       }
+       a6xx_set_ubwc_config(gpu);
 
        /* Enable fault detection */
        gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
@@ -893,8 +1030,8 @@ static const struct adreno_gpu_funcs funcs = {
 #if defined(CONFIG_DRM_MSM_GPU_STATE)
                .gpu_state_get = a6xx_gpu_state_get,
                .gpu_state_put = a6xx_gpu_state_put,
-               .create_address_space = adreno_iommu_create_address_space,
 #endif
+               .create_address_space = adreno_iommu_create_address_space,
        },
        .get_timestamp = a6xx_get_timestamp,
 };
index 7239b8b..03ba60d 100644 (file)
@@ -63,7 +63,7 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
 int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
 
-void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq);
+void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp);
 unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu);
 
 void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
index d6023ba..959656a 100644 (file)
@@ -736,7 +736,8 @@ static void a6xx_get_ahb_gpu_registers(struct msm_gpu *gpu,
 static void _a6xx_get_gmu_registers(struct msm_gpu *gpu,
                struct a6xx_gpu_state *a6xx_state,
                const struct a6xx_registers *regs,
-               struct a6xx_gpu_state_obj *obj)
+               struct a6xx_gpu_state_obj *obj,
+               bool rscc)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
@@ -755,9 +756,17 @@ static void _a6xx_get_gmu_registers(struct msm_gpu *gpu,
                u32 count = RANGE(regs->registers, i);
                int j;
 
-               for (j = 0; j < count; j++)
-                       obj->data[index++] = gmu_read(gmu,
-                               regs->registers[i] + j);
+               for (j = 0; j < count; j++) {
+                       u32 offset = regs->registers[i] + j;
+                       u32 val;
+
+                       if (rscc)
+                               val = gmu_read_rscc(gmu, offset);
+                       else
+                               val = gmu_read(gmu, offset);
+
+                       obj->data[index++] = val;
+               }
        }
 }
 
@@ -777,7 +786,9 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
 
        /* Get the CX GMU registers from AHB */
        _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
-               &a6xx_state->gmu_registers[0]);
+               &a6xx_state->gmu_registers[0], false);
+       _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[1],
+               &a6xx_state->gmu_registers[1], true);
 
        if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
                return;
@@ -785,8 +796,8 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
        /* Set the fence to ALLOW mode so we can access the registers */
        gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
 
-       _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[1],
-               &a6xx_state->gmu_registers[1]);
+       _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[2],
+               &a6xx_state->gmu_registers[2], false);
 }
 
 #define A6XX_GBIF_REGLIST_SIZE   1
index 24c974c..846fd5b 100644 (file)
@@ -341,10 +341,6 @@ static const u32 a6xx_gmu_cx_registers[] = {
        0x5157, 0x5158, 0x515d, 0x515d, 0x5162, 0x5162, 0x5164, 0x5165,
        0x5180, 0x5186, 0x5190, 0x519e, 0x51c0, 0x51c0, 0x51c5, 0x51cc,
        0x51e0, 0x51e2, 0x51f0, 0x51f0, 0x5200, 0x5201,
-       /* GPU RSCC */
-       0x8c8c, 0x8c8c, 0x8d01, 0x8d02, 0x8f40, 0x8f42, 0x8f44, 0x8f47,
-       0x8f4c, 0x8f87, 0x8fec, 0x8fef, 0x8ff4, 0x902f, 0x9094, 0x9097,
-       0x909c, 0x90d7, 0x913c, 0x913f, 0x9144, 0x917f,
        /* GMU AO */
        0x9300, 0x9316, 0x9400, 0x9400,
        /* GPU CC */
@@ -357,8 +353,16 @@ static const u32 a6xx_gmu_cx_registers[] = {
        0xbc00, 0xbc16, 0xbc20, 0xbc27,
 };
 
+static const u32 a6xx_gmu_cx_rscc_registers[] = {
+       /* GPU RSCC */
+       0x008c, 0x008c, 0x0101, 0x0102, 0x0340, 0x0342, 0x0344, 0x0347,
+       0x034c, 0x0387, 0x03ec, 0x03ef, 0x03f4, 0x042f, 0x0494, 0x0497,
+       0x049c, 0x04d7, 0x053c, 0x053f, 0x0544, 0x057f,
+};
+
 static const struct a6xx_registers a6xx_gmu_reglist[] = {
        REGS(a6xx_gmu_cx_registers, 0, 0),
+       REGS(a6xx_gmu_cx_rscc_registers, 0, 0),
        REGS(a6xx_gmu_gx_registers, 0, 0),
 };
 
index 9921e63..ccd44d0 100644 (file)
@@ -281,6 +281,76 @@ static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
        msg->cnoc_cmds_data[1][0] =  0x60000001;
 }
 
+static void a640_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+       /*
+        * Send a single "off" entry just to get things running
+        * TODO: bus scaling
+        */
+       msg->bw_level_num = 1;
+
+       msg->ddr_cmds_num = 3;
+       msg->ddr_wait_bitmask = 0x01;
+
+       msg->ddr_cmds_addrs[0] = 0x50000;
+       msg->ddr_cmds_addrs[1] = 0x5003c;
+       msg->ddr_cmds_addrs[2] = 0x5000c;
+
+       msg->ddr_cmds_data[0][0] =  0x40000000;
+       msg->ddr_cmds_data[0][1] =  0x40000000;
+       msg->ddr_cmds_data[0][2] =  0x40000000;
+
+       /*
+        * These are the CX (CNOC) votes - these are used by the GMU but the
+        * votes are known and fixed for the target
+        */
+       msg->cnoc_cmds_num = 3;
+       msg->cnoc_wait_bitmask = 0x01;
+
+       msg->cnoc_cmds_addrs[0] = 0x50034;
+       msg->cnoc_cmds_addrs[1] = 0x5007c;
+       msg->cnoc_cmds_addrs[2] = 0x5004c;
+
+       msg->cnoc_cmds_data[0][0] =  0x40000000;
+       msg->cnoc_cmds_data[0][1] =  0x00000000;
+       msg->cnoc_cmds_data[0][2] =  0x40000000;
+
+       msg->cnoc_cmds_data[1][0] =  0x60000001;
+       msg->cnoc_cmds_data[1][1] =  0x20000001;
+       msg->cnoc_cmds_data[1][2] =  0x60000001;
+}
+
+static void a650_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+       /*
+        * Send a single "off" entry just to get things running
+        * TODO: bus scaling
+        */
+       msg->bw_level_num = 1;
+
+       msg->ddr_cmds_num = 3;
+       msg->ddr_wait_bitmask = 0x01;
+
+       msg->ddr_cmds_addrs[0] = 0x50000;
+       msg->ddr_cmds_addrs[1] = 0x50004;
+       msg->ddr_cmds_addrs[2] = 0x5007c;
+
+       msg->ddr_cmds_data[0][0] =  0x40000000;
+       msg->ddr_cmds_data[0][1] =  0x40000000;
+       msg->ddr_cmds_data[0][2] =  0x40000000;
+
+       /*
+        * These are the CX (CNOC) votes - these are used by the GMU but the
+        * votes are known and fixed for the target
+        */
+       msg->cnoc_cmds_num = 1;
+       msg->cnoc_wait_bitmask = 0x01;
+
+       msg->cnoc_cmds_addrs[0] = 0x500a4;
+       msg->cnoc_cmds_data[0][0] =  0x40000000;
+       msg->cnoc_cmds_data[1][0] =  0x60000001;
+}
+
 static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
 {
        /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
@@ -327,6 +397,10 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
 
        if (adreno_is_a618(adreno_gpu))
                a618_build_bw_table(&msg);
+       else if (adreno_is_a640(adreno_gpu))
+               a640_build_bw_table(&msg);
+       else if (adreno_is_a650(adreno_gpu))
+               a650_build_bw_table(&msg);
        else
                a6xx_build_bw_table(&msg);
 
index 641d3ba..548f532 100644 (file)
@@ -8,19 +8,21 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/adreno.xml               (    501 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  42463 bytes, from 2018-11-19 13:44:03)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  14201 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  43052 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 140790 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2018-09-14 13:03:07)
-- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml                     (    594 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml        (   1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml                (  90159 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml       (  14386 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml          (  65048 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml                (  84226 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml                ( 112556 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml                ( 149461 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml                ( 184695 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml            (  11218 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml               (   1773 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml (   4559 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml    (   2872 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
@@ -159,6 +161,7 @@ enum a3xx_msaa_samples {
        MSAA_ONE = 0,
        MSAA_TWO = 1,
        MSAA_FOUR = 2,
+       MSAA_EIGHT = 3,
 };
 
 enum a3xx_threadmode {
@@ -197,6 +200,11 @@ enum a4xx_tess_spacing {
        EVEN_SPACING = 3,
 };
 
+enum a5xx_address_mode {
+       ADDR_32B = 0,
+       ADDR_64B = 1,
+};
+
 #define REG_AXXX_CP_RB_BASE                                    0x000001c0
 
 #define REG_AXXX_CP_RB_CNTL                                    0x000001c1
@@ -446,34 +454,174 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
 #define REG_AXXX_CP_IB2_BUFSZ                                  0x0000045b
 
 #define REG_AXXX_CP_STAT                                       0x0000047f
-#define AXXX_CP_STAT_CP_BUSY                                   0x80000000
-#define AXXX_CP_STAT_VS_EVENT_FIFO_BUSY                                0x40000000
-#define AXXX_CP_STAT_PS_EVENT_FIFO_BUSY                                0x20000000
-#define AXXX_CP_STAT_CF_EVENT_FIFO_BUSY                                0x10000000
-#define AXXX_CP_STAT_RB_EVENT_FIFO_BUSY                                0x08000000
-#define AXXX_CP_STAT_ME_BUSY                                   0x04000000
-#define AXXX_CP_STAT_MIU_WR_C_BUSY                             0x02000000
-#define AXXX_CP_STAT_CP_3D_BUSY                                        0x00800000
-#define AXXX_CP_STAT_CP_NRT_BUSY                               0x00400000
-#define AXXX_CP_STAT_RBIU_SCRATCH_BUSY                         0x00200000
-#define AXXX_CP_STAT_RCIU_ME_BUSY                              0x00100000
-#define AXXX_CP_STAT_RCIU_PFP_BUSY                             0x00080000
-#define AXXX_CP_STAT_MEQ_RING_BUSY                             0x00040000
-#define AXXX_CP_STAT_PFP_BUSY                                  0x00020000
-#define AXXX_CP_STAT_ST_QUEUE_BUSY                             0x00010000
-#define AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY                      0x00002000
-#define AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY                      0x00001000
-#define AXXX_CP_STAT_RING_QUEUE_BUSY                           0x00000800
-#define AXXX_CP_STAT_CSF_BUSY                                  0x00000400
-#define AXXX_CP_STAT_CSF_ST_BUSY                               0x00000200
-#define AXXX_CP_STAT_EVENT_BUSY                                        0x00000100
-#define AXXX_CP_STAT_CSF_INDIRECT2_BUSY                                0x00000080
-#define AXXX_CP_STAT_CSF_INDIRECTS_BUSY                                0x00000040
-#define AXXX_CP_STAT_CSF_RING_BUSY                             0x00000020
-#define AXXX_CP_STAT_RCIU_BUSY                                 0x00000010
-#define AXXX_CP_STAT_RBIU_BUSY                                 0x00000008
-#define AXXX_CP_STAT_MIU_RD_RETURN_BUSY                                0x00000004
-#define AXXX_CP_STAT_MIU_RD_REQ_BUSY                           0x00000002
+#define AXXX_CP_STAT_CP_BUSY__MASK                             0x80000000
+#define AXXX_CP_STAT_CP_BUSY__SHIFT                            31
+static inline uint32_t AXXX_CP_STAT_CP_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_CP_BUSY__SHIFT) & AXXX_CP_STAT_CP_BUSY__MASK;
+}
+#define AXXX_CP_STAT_VS_EVENT_FIFO_BUSY__MASK                  0x40000000
+#define AXXX_CP_STAT_VS_EVENT_FIFO_BUSY__SHIFT                 30
+static inline uint32_t AXXX_CP_STAT_VS_EVENT_FIFO_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_VS_EVENT_FIFO_BUSY__SHIFT) & AXXX_CP_STAT_VS_EVENT_FIFO_BUSY__MASK;
+}
+#define AXXX_CP_STAT_PS_EVENT_FIFO_BUSY__MASK                  0x20000000
+#define AXXX_CP_STAT_PS_EVENT_FIFO_BUSY__SHIFT                 29
+static inline uint32_t AXXX_CP_STAT_PS_EVENT_FIFO_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_PS_EVENT_FIFO_BUSY__SHIFT) & AXXX_CP_STAT_PS_EVENT_FIFO_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CF_EVENT_FIFO_BUSY__MASK                  0x10000000
+#define AXXX_CP_STAT_CF_EVENT_FIFO_BUSY__SHIFT                 28
+static inline uint32_t AXXX_CP_STAT_CF_EVENT_FIFO_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_CF_EVENT_FIFO_BUSY__SHIFT) & AXXX_CP_STAT_CF_EVENT_FIFO_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RB_EVENT_FIFO_BUSY__MASK                  0x08000000
+#define AXXX_CP_STAT_RB_EVENT_FIFO_BUSY__SHIFT                 27
+static inline uint32_t AXXX_CP_STAT_RB_EVENT_FIFO_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_RB_EVENT_FIFO_BUSY__SHIFT) & AXXX_CP_STAT_RB_EVENT_FIFO_BUSY__MASK;
+}
+#define AXXX_CP_STAT_ME_BUSY__MASK                             0x04000000
+#define AXXX_CP_STAT_ME_BUSY__SHIFT                            26
+static inline uint32_t AXXX_CP_STAT_ME_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_ME_BUSY__SHIFT) & AXXX_CP_STAT_ME_BUSY__MASK;
+}
+#define AXXX_CP_STAT_MIU_WR_C_BUSY__MASK                       0x02000000
+#define AXXX_CP_STAT_MIU_WR_C_BUSY__SHIFT                      25
+static inline uint32_t AXXX_CP_STAT_MIU_WR_C_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_MIU_WR_C_BUSY__SHIFT) & AXXX_CP_STAT_MIU_WR_C_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CP_3D_BUSY__MASK                          0x00800000
+#define AXXX_CP_STAT_CP_3D_BUSY__SHIFT                         23
+static inline uint32_t AXXX_CP_STAT_CP_3D_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_CP_3D_BUSY__SHIFT) & AXXX_CP_STAT_CP_3D_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CP_NRT_BUSY__MASK                         0x00400000
+#define AXXX_CP_STAT_CP_NRT_BUSY__SHIFT                                22
+static inline uint32_t AXXX_CP_STAT_CP_NRT_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_CP_NRT_BUSY__SHIFT) & AXXX_CP_STAT_CP_NRT_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RBIU_SCRATCH_BUSY__MASK                   0x00200000
+#define AXXX_CP_STAT_RBIU_SCRATCH_BUSY__SHIFT                  21
+static inline uint32_t AXXX_CP_STAT_RBIU_SCRATCH_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_RBIU_SCRATCH_BUSY__SHIFT) & AXXX_CP_STAT_RBIU_SCRATCH_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RCIU_ME_BUSY__MASK                                0x00100000
+#define AXXX_CP_STAT_RCIU_ME_BUSY__SHIFT                       20
+static inline uint32_t AXXX_CP_STAT_RCIU_ME_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_RCIU_ME_BUSY__SHIFT) & AXXX_CP_STAT_RCIU_ME_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RCIU_PFP_BUSY__MASK                       0x00080000
+#define AXXX_CP_STAT_RCIU_PFP_BUSY__SHIFT                      19
+static inline uint32_t AXXX_CP_STAT_RCIU_PFP_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_RCIU_PFP_BUSY__SHIFT) & AXXX_CP_STAT_RCIU_PFP_BUSY__MASK;
+}
+#define AXXX_CP_STAT_MEQ_RING_BUSY__MASK                       0x00040000
+#define AXXX_CP_STAT_MEQ_RING_BUSY__SHIFT                      18
+static inline uint32_t AXXX_CP_STAT_MEQ_RING_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_MEQ_RING_BUSY__SHIFT) & AXXX_CP_STAT_MEQ_RING_BUSY__MASK;
+}
+#define AXXX_CP_STAT_PFP_BUSY__MASK                            0x00020000
+#define AXXX_CP_STAT_PFP_BUSY__SHIFT                           17
+static inline uint32_t AXXX_CP_STAT_PFP_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_PFP_BUSY__SHIFT) & AXXX_CP_STAT_PFP_BUSY__MASK;
+}
+#define AXXX_CP_STAT_ST_QUEUE_BUSY__MASK                       0x00010000
+#define AXXX_CP_STAT_ST_QUEUE_BUSY__SHIFT                      16
+static inline uint32_t AXXX_CP_STAT_ST_QUEUE_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_ST_QUEUE_BUSY__SHIFT) & AXXX_CP_STAT_ST_QUEUE_BUSY__MASK;
+}
+#define AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY__MASK                        0x00002000
+#define AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY__SHIFT               13
+static inline uint32_t AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY__SHIFT) & AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY__MASK;
+}
+#define AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY__MASK                        0x00001000
+#define AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY__SHIFT               12
+static inline uint32_t AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY__SHIFT) & AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RING_QUEUE_BUSY__MASK                     0x00000800
+#define AXXX_CP_STAT_RING_QUEUE_BUSY__SHIFT                    11
+static inline uint32_t AXXX_CP_STAT_RING_QUEUE_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_RING_QUEUE_BUSY__SHIFT) & AXXX_CP_STAT_RING_QUEUE_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CSF_BUSY__MASK                            0x00000400
+#define AXXX_CP_STAT_CSF_BUSY__SHIFT                           10
+static inline uint32_t AXXX_CP_STAT_CSF_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_CSF_BUSY__SHIFT) & AXXX_CP_STAT_CSF_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CSF_ST_BUSY__MASK                         0x00000200
+#define AXXX_CP_STAT_CSF_ST_BUSY__SHIFT                                9
+static inline uint32_t AXXX_CP_STAT_CSF_ST_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_CSF_ST_BUSY__SHIFT) & AXXX_CP_STAT_CSF_ST_BUSY__MASK;
+}
+#define AXXX_CP_STAT_EVENT_BUSY__MASK                          0x00000100
+#define AXXX_CP_STAT_EVENT_BUSY__SHIFT                         8
+static inline uint32_t AXXX_CP_STAT_EVENT_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_EVENT_BUSY__SHIFT) & AXXX_CP_STAT_EVENT_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CSF_INDIRECT2_BUSY__MASK                  0x00000080
+#define AXXX_CP_STAT_CSF_INDIRECT2_BUSY__SHIFT                 7
+static inline uint32_t AXXX_CP_STAT_CSF_INDIRECT2_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_CSF_INDIRECT2_BUSY__SHIFT) & AXXX_CP_STAT_CSF_INDIRECT2_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CSF_INDIRECTS_BUSY__MASK                  0x00000040
+#define AXXX_CP_STAT_CSF_INDIRECTS_BUSY__SHIFT                 6
+static inline uint32_t AXXX_CP_STAT_CSF_INDIRECTS_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_CSF_INDIRECTS_BUSY__SHIFT) & AXXX_CP_STAT_CSF_INDIRECTS_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CSF_RING_BUSY__MASK                       0x00000020
+#define AXXX_CP_STAT_CSF_RING_BUSY__SHIFT                      5
+static inline uint32_t AXXX_CP_STAT_CSF_RING_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_CSF_RING_BUSY__SHIFT) & AXXX_CP_STAT_CSF_RING_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RCIU_BUSY__MASK                           0x00000010
+#define AXXX_CP_STAT_RCIU_BUSY__SHIFT                          4
+static inline uint32_t AXXX_CP_STAT_RCIU_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_RCIU_BUSY__SHIFT) & AXXX_CP_STAT_RCIU_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RBIU_BUSY__MASK                           0x00000008
+#define AXXX_CP_STAT_RBIU_BUSY__SHIFT                          3
+static inline uint32_t AXXX_CP_STAT_RBIU_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_RBIU_BUSY__SHIFT) & AXXX_CP_STAT_RBIU_BUSY__MASK;
+}
+#define AXXX_CP_STAT_MIU_RD_RETURN_BUSY__MASK                  0x00000004
+#define AXXX_CP_STAT_MIU_RD_RETURN_BUSY__SHIFT                 2
+static inline uint32_t AXXX_CP_STAT_MIU_RD_RETURN_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_MIU_RD_RETURN_BUSY__SHIFT) & AXXX_CP_STAT_MIU_RD_RETURN_BUSY__MASK;
+}
+#define AXXX_CP_STAT_MIU_RD_REQ_BUSY__MASK                     0x00000002
+#define AXXX_CP_STAT_MIU_RD_REQ_BUSY__SHIFT                    1
+static inline uint32_t AXXX_CP_STAT_MIU_RD_REQ_BUSY(uint32_t val)
+{
+       return ((val) << AXXX_CP_STAT_MIU_RD_REQ_BUSY__SHIFT) & AXXX_CP_STAT_MIU_RD_REQ_BUSY__MASK;
+}
 #define AXXX_CP_STAT_MIU_WR_BUSY                               0x00000001
 
 #define REG_AXXX_CP_SCRATCH_REG0                               0x00000578
index 7732f03..4e84f3c 100644 (file)
@@ -200,6 +200,7 @@ static const struct adreno_info gpulist[] = {
                .inactive_period = DRM_MSM_INACTIVE_PERIOD,
                .init = a6xx_gpu_init,
                .zapfw = "a630_zap.mdt",
+               .hwcg = a630_hwcg,
        }, {
                .rev = ADRENO_REV(6, 4, 0, ANY_ID),
                .revn = 640,
@@ -212,6 +213,7 @@ static const struct adreno_info gpulist[] = {
                .inactive_period = DRM_MSM_INACTIVE_PERIOD,
                .init = a6xx_gpu_init,
                .zapfw = "a640_zap.mdt",
+               .hwcg = a640_hwcg,
        }, {
                .rev = ADRENO_REV(6, 5, 0, ANY_ID),
                .revn = 650,
@@ -224,6 +226,7 @@ static const struct adreno_info gpulist[] = {
                .inactive_period = DRM_MSM_INACTIVE_PERIOD,
                .init = a6xx_gpu_init,
                .zapfw = "a650_zap.mdt",
+               .hwcg = a650_hwcg,
        },
 };
 
index 89673c7..e23641a 100644 (file)
@@ -194,7 +194,7 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
        struct msm_gem_address_space *aspace;
 
        aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
-               0xfffffff);
+               0xffffffff - SZ_16M);
 
        if (IS_ERR(aspace) && !IS_ERR(mmu))
                mmu->funcs->destroy(mmu);
@@ -895,7 +895,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
        return 0;
 }
 
-static int adreno_get_pwrlevels(struct device *dev,
+static void adreno_get_pwrlevels(struct device *dev,
                struct msm_gpu *gpu)
 {
        unsigned long freq = ULONG_MAX;
@@ -930,24 +930,6 @@ static int adreno_get_pwrlevels(struct device *dev,
        }
 
        DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate);
-
-       /* Check for an interconnect path for the bus */
-       gpu->icc_path = of_icc_get(dev, "gfx-mem");
-       if (!gpu->icc_path) {
-               /*
-                * Keep compatbility with device trees that don't have an
-                * interconnect-names property.
-                */
-               gpu->icc_path = of_icc_get(dev, NULL);
-       }
-       if (IS_ERR(gpu->icc_path))
-               gpu->icc_path = NULL;
-
-       gpu->ocmem_icc_path = of_icc_get(dev, "ocmem");
-       if (IS_ERR(gpu->ocmem_icc_path))
-               gpu->ocmem_icc_path = NULL;
-
-       return 0;
 }
 
 int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
@@ -993,9 +975,11 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                struct adreno_gpu *adreno_gpu,
                const struct adreno_gpu_funcs *funcs, int nr_rings)
 {
-       struct adreno_platform_config *config = pdev->dev.platform_data;
+       struct device *dev = &pdev->dev;
+       struct adreno_platform_config *config = dev->platform_data;
        struct msm_gpu_config adreno_gpu_config  = { 0 };
        struct msm_gpu *gpu = &adreno_gpu->base;
+       int ret;
 
        adreno_gpu->funcs = funcs;
        adreno_gpu->info = adreno_info(config->rev);
@@ -1007,27 +991,59 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 
        adreno_gpu_config.nr_rings = nr_rings;
 
-       adreno_get_pwrlevels(&pdev->dev, gpu);
+       adreno_get_pwrlevels(dev, gpu);
 
-       pm_runtime_set_autosuspend_delay(&pdev->dev,
+       pm_runtime_set_autosuspend_delay(dev,
                adreno_gpu->info->inactive_period);
-       pm_runtime_use_autosuspend(&pdev->dev);
-       pm_runtime_enable(&pdev->dev);
+       pm_runtime_use_autosuspend(dev);
+       pm_runtime_enable(dev);
 
-       return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
+       ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
                        adreno_gpu->info->name, &adreno_gpu_config);
+       if (ret)
+               return ret;
+
+       /*
+        * The legacy case, before "interconnect-names", only has a
+        * single interconnect path which is equivalent to "gfx-mem"
+        */
+       if (!of_find_property(dev->of_node, "interconnect-names", NULL)) {
+               gpu->icc_path = of_icc_get(dev, NULL);
+       } else {
+               gpu->icc_path = of_icc_get(dev, "gfx-mem");
+               gpu->ocmem_icc_path = of_icc_get(dev, "ocmem");
+       }
+
+       if (IS_ERR(gpu->icc_path)) {
+               ret = PTR_ERR(gpu->icc_path);
+               gpu->icc_path = NULL;
+               return ret;
+       }
+
+       if (IS_ERR(gpu->ocmem_icc_path)) {
+               ret = PTR_ERR(gpu->ocmem_icc_path);
+               gpu->ocmem_icc_path = NULL;
+               /* allow -ENODATA, ocmem icc is optional */
+               if (ret != -ENODATA)
+                       return ret;
+       }
+
+       return 0;
 }
 
 void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
 {
        struct msm_gpu *gpu = &adreno_gpu->base;
+       struct msm_drm_private *priv = gpu->dev->dev_private;
        unsigned int i;
 
        for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
                release_firmware(adreno_gpu->fw[i]);
 
-       icc_put(gpu->icc_path);
-       icc_put(gpu->ocmem_icc_path);
+       pm_runtime_disable(&priv->gpu_pdev->dev);
 
        msm_gpu_cleanup(&adreno_gpu->base);
+
+       icc_put(gpu->icc_path);
+       icc_put(gpu->ocmem_icc_path);
 }
index 2f5d2c3..99bb468 100644 (file)
@@ -68,6 +68,13 @@ struct adreno_gpu_funcs {
        int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value);
 };
 
+struct adreno_reglist {
+       u32 offset;
+       u32 value;
+};
+
+extern const struct adreno_reglist a630_hwcg[], a640_hwcg[], a650_hwcg[];
+
 struct adreno_info {
        struct adreno_rev rev;
        uint32_t revn;
@@ -78,6 +85,7 @@ struct adreno_info {
        struct msm_gpu *(*init)(struct drm_device *dev);
        const char *zapfw;
        u32 inactive_period;
+       const struct adreno_reglist *hwcg;
 };
 
 const struct adreno_info *adreno_info(struct adreno_rev rev);
index 79b907a..3931eec 100644 (file)
@@ -8,19 +8,21 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/adreno.xml               (    501 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  42463 bytes, from 2018-11-19 13:44:03)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  14201 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  43052 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 140790 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2018-09-14 13:03:07)
-- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml                     (    594 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml        (   1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml                (  90159 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml       (  14386 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml          (  65048 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml                (  84226 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml                ( 112556 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml                ( 149461 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml                ( 184695 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml            (  11218 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml               (   1773 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml (   4559 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml    (   2872 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
@@ -54,10 +56,13 @@ enum vgt_event_type {
        CACHE_FLUSH_TS = 4,
        CONTEXT_DONE = 5,
        CACHE_FLUSH = 6,
-       HLSQ_FLUSH = 7,
        VIZQUERY_START = 7,
+       HLSQ_FLUSH = 7,
        VIZQUERY_END = 8,
        SC_WAIT_WC = 9,
+       WRITE_PRIMITIVE_COUNTS = 9,
+       START_PRIMITIVE_CTRS = 11,
+       STOP_PRIMITIVE_CTRS = 12,
        RST_PIX_CNT = 13,
        RST_VTX_CNT = 14,
        TILE_FLUSH = 15,
@@ -65,23 +70,31 @@ enum vgt_event_type {
        CACHE_FLUSH_AND_INV_TS_EVENT = 20,
        ZPASS_DONE = 21,
        CACHE_FLUSH_AND_INV_EVENT = 22,
+       RB_DONE_TS = 22,
        PERFCOUNTER_START = 23,
        PERFCOUNTER_STOP = 24,
        VS_FETCH_DONE = 27,
        FACENESS_FLUSH = 28,
+       WT_DONE_TS = 8,
        FLUSH_SO_0 = 17,
        FLUSH_SO_1 = 18,
        FLUSH_SO_2 = 19,
        FLUSH_SO_3 = 20,
        PC_CCU_INVALIDATE_DEPTH = 24,
        PC_CCU_INVALIDATE_COLOR = 25,
-       UNK_1C = 28,
-       UNK_1D = 29,
+       PC_CCU_RESOLVE_TS = 26,
+       PC_CCU_FLUSH_DEPTH_TS = 28,
+       PC_CCU_FLUSH_COLOR_TS = 29,
        BLIT = 30,
        UNK_25 = 37,
        LRZ_FLUSH = 38,
+       BLIT_OP_FILL_2D = 39,
+       BLIT_OP_COPY_2D = 40,
+       BLIT_OP_SCALE_2D = 42,
+       CONTEXT_DONE_2D = 43,
        UNK_2C = 44,
        UNK_2D = 45,
+       CACHE_INVALIDATE = 49,
 };
 
 enum pc_di_primtype {
@@ -99,13 +112,45 @@ enum pc_di_primtype {
        DI_PT_LINESTRIP_ADJ = 11,
        DI_PT_TRI_ADJ = 12,
        DI_PT_TRISTRIP_ADJ = 13,
+       DI_PT_PATCHES0 = 31,
+       DI_PT_PATCHES1 = 32,
+       DI_PT_PATCHES2 = 33,
+       DI_PT_PATCHES3 = 34,
+       DI_PT_PATCHES4 = 35,
+       DI_PT_PATCHES5 = 36,
+       DI_PT_PATCHES6 = 37,
+       DI_PT_PATCHES7 = 38,
+       DI_PT_PATCHES8 = 39,
+       DI_PT_PATCHES9 = 40,
+       DI_PT_PATCHES10 = 41,
+       DI_PT_PATCHES11 = 42,
+       DI_PT_PATCHES12 = 43,
+       DI_PT_PATCHES13 = 44,
+       DI_PT_PATCHES14 = 45,
+       DI_PT_PATCHES15 = 46,
+       DI_PT_PATCHES16 = 47,
+       DI_PT_PATCHES17 = 48,
+       DI_PT_PATCHES18 = 49,
+       DI_PT_PATCHES19 = 50,
+       DI_PT_PATCHES20 = 51,
+       DI_PT_PATCHES21 = 52,
+       DI_PT_PATCHES22 = 53,
+       DI_PT_PATCHES23 = 54,
+       DI_PT_PATCHES24 = 55,
+       DI_PT_PATCHES25 = 56,
+       DI_PT_PATCHES26 = 57,
+       DI_PT_PATCHES27 = 58,
+       DI_PT_PATCHES28 = 59,
+       DI_PT_PATCHES29 = 60,
+       DI_PT_PATCHES30 = 61,
+       DI_PT_PATCHES31 = 62,
 };
 
 enum pc_di_src_sel {
        DI_SRC_SEL_DMA = 0,
        DI_SRC_SEL_IMMEDIATE = 1,
        DI_SRC_SEL_AUTO_INDEX = 2,
-       DI_SRC_SEL_RESERVED = 3,
+       DI_SRC_SEL_AUTO_XFB = 3,
 };
 
 enum pc_di_face_cull_sel {
@@ -143,6 +188,7 @@ enum adreno_pm4_type3_packets {
        CP_PREEMPT_ENABLE = 28,
        CP_PREEMPT_TOKEN = 30,
        CP_INDIRECT_BUFFER = 63,
+       CP_INDIRECT_BUFFER_CHAIN = 87,
        CP_INDIRECT_BUFFER_PFD = 55,
        CP_WAIT_FOR_IDLE = 38,
        CP_WAIT_REG_MEM = 60,
@@ -199,6 +245,7 @@ enum adreno_pm4_type3_packets {
        CP_DRAW_INDX_OFFSET = 56,
        CP_DRAW_INDIRECT = 40,
        CP_DRAW_INDX_INDIRECT = 41,
+       CP_DRAW_INDIRECT_MULTI = 42,
        CP_DRAW_AUTO = 36,
        CP_UNKNOWN_19 = 25,
        CP_UNKNOWN_1A = 26,
@@ -232,6 +279,7 @@ enum adreno_pm4_type3_packets {
        CP_SET_MODE = 99,
        CP_LOAD_STATE6_GEOM = 50,
        CP_LOAD_STATE6_FRAG = 52,
+       CP_LOAD_STATE6 = 54,
        IN_IB_PREFETCH_END = 23,
        IN_SUBBLK_PREFETCH = 31,
        IN_INSTR_PREFETCH = 32,
@@ -241,9 +289,14 @@ enum adreno_pm4_type3_packets {
        IN_INCR_UPDT_CONST = 86,
        IN_INCR_UPDT_INSTR = 87,
        PKT4 = 4,
-       CP_UNK_A6XX_14 = 20,
-       CP_UNK_A6XX_36 = 54,
-       CP_UNK_A6XX_55 = 85,
+       CP_SCRATCH_WRITE = 76,
+       CP_REG_TO_MEM_OFFSET_MEM = 116,
+       CP_REG_TO_MEM_OFFSET_REG = 114,
+       CP_WAIT_MEM_GTE = 20,
+       CP_WAIT_TWO_REGS = 112,
+       CP_MEMCPY = 117,
+       CP_SET_BIN_DATA5_OFFSET = 46,
+       CP_SET_CTXSWITCH_IB = 85,
        CP_REG_WRITE = 109,
 };
 
@@ -292,6 +345,7 @@ enum a4xx_state_block {
 enum a4xx_state_type {
        ST4_SHADER = 0,
        ST4_CONSTANTS = 1,
+       ST4_UBO = 2,
 };
 
 enum a4xx_state_src {
@@ -312,18 +366,22 @@ enum a6xx_state_block {
        SB6_GS_SHADER = 11,
        SB6_FS_SHADER = 12,
        SB6_CS_SHADER = 13,
-       SB6_SSBO = 14,
-       SB6_CS_SSBO = 15,
+       SB6_IBO = 14,
+       SB6_CS_IBO = 15,
 };
 
 enum a6xx_state_type {
        ST6_SHADER = 0,
        ST6_CONSTANTS = 1,
+       ST6_UBO = 2,
+       ST6_IBO = 3,
 };
 
 enum a6xx_state_src {
        SS6_DIRECT = 0,
+       SS6_BINDLESS = 1,
        SS6_INDIRECT = 2,
+       SS6_UBO = 3,
 };
 
 enum a4xx_index_size {
@@ -332,6 +390,17 @@ enum a4xx_index_size {
        INDEX4_SIZE_32_BIT = 2,
 };
 
+enum a6xx_patch_type {
+       TESS_QUADS = 0,
+       TESS_TRIANGLES = 1,
+       TESS_ISOLINES = 2,
+};
+
+enum a6xx_draw_indirect_opcode {
+       INDIRECT_OP_NORMAL = 2,
+       INDIRECT_OP_INDEXED = 4,
+};
+
 enum cp_cond_function {
        WRITE_ALWAYS = 0,
        WRITE_LT = 1,
@@ -361,9 +430,15 @@ enum a6xx_render_mode {
        RM6_BYPASS = 1,
        RM6_BINNING = 2,
        RM6_GMEM = 4,
-       RM6_BLIT2D = 5,
+       RM6_ENDVIS = 5,
        RM6_RESOLVE = 6,
+       RM6_YIELD = 7,
+       RM6_COMPUTE = 8,
        RM6_BLIT2DSCALE = 12,
+       RM6_IB1LIST_START = 13,
+       RM6_IB1LIST_END = 14,
+       RM6_IFPC_ENABLE = 256,
+       RM6_IFPC_DISABLE = 257,
 };
 
 enum pseudo_reg {
@@ -374,6 +449,25 @@ enum pseudo_reg {
        COUNTER = 4,
 };
 
+enum compare_mode {
+       PRED_TEST = 1,
+       REG_COMPARE = 2,
+       RENDER_MODE = 3,
+};
+
+enum ctxswitch_ib {
+       RESTORE_IB = 0,
+       YIELD_RESTORE_IB = 1,
+       SAVE_IB = 2,
+       RB_SAVE_IB = 3,
+};
+
+enum reg_tracker {
+       TRACK_CNTL_REG = 1,
+       TRACK_RENDER_CNTL = 2,
+       UNK_EVENT_WRITE = 4,
+};
+
 #define REG_CP_LOAD_STATE_0                                    0x00000000
 #define CP_LOAD_STATE_0_DST_OFF__MASK                          0x0000ffff
 #define CP_LOAD_STATE_0_DST_OFF__SHIFT                         0
@@ -469,7 +563,7 @@ static inline uint32_t CP_LOAD_STATE6_0_DST_OFF(uint32_t val)
 {
        return ((val) << CP_LOAD_STATE6_0_DST_OFF__SHIFT) & CP_LOAD_STATE6_0_DST_OFF__MASK;
 }
-#define CP_LOAD_STATE6_0_STATE_TYPE__MASK                      0x00004000
+#define CP_LOAD_STATE6_0_STATE_TYPE__MASK                      0x0000c000
 #define CP_LOAD_STATE6_0_STATE_TYPE__SHIFT                     14
 static inline uint32_t CP_LOAD_STATE6_0_STATE_TYPE(enum a6xx_state_type val)
 {
@@ -510,6 +604,8 @@ static inline uint32_t CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(uint32_t val)
        return ((val) << CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__MASK;
 }
 
+#define REG_CP_LOAD_STATE6_EXT_SRC_ADDR                                0x00000001
+
 #define REG_CP_DRAW_INDX_0                                     0x00000000
 #define CP_DRAW_INDX_0_VIZ_QUERY__MASK                         0xffffffff
 #define CP_DRAW_INDX_0_VIZ_QUERY__SHIFT                                0
@@ -653,12 +749,14 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum a4xx_index_size val
 {
        return ((val) << CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK;
 }
-#define CP_DRAW_INDX_OFFSET_0_TESS_MODE__MASK                  0x01f00000
-#define CP_DRAW_INDX_OFFSET_0_TESS_MODE__SHIFT                 20
-static inline uint32_t CP_DRAW_INDX_OFFSET_0_TESS_MODE(uint32_t val)
+#define CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__MASK                 0x00003000
+#define CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__SHIFT                        12
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(enum a6xx_patch_type val)
 {
-       return ((val) << CP_DRAW_INDX_OFFSET_0_TESS_MODE__SHIFT) & CP_DRAW_INDX_OFFSET_0_TESS_MODE__MASK;
+       return ((val) << CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__SHIFT) & CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__MASK;
 }
+#define CP_DRAW_INDX_OFFSET_0_GS_ENABLE                                0x00010000
+#define CP_DRAW_INDX_OFFSET_0_TESS_ENABLE                      0x00020000
 
 #define REG_CP_DRAW_INDX_OFFSET_1                              0x00000001
 #define CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__MASK              0xffffffff
@@ -677,6 +775,39 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_2_NUM_INDICES(uint32_t val)
 }
 
 #define REG_CP_DRAW_INDX_OFFSET_3                              0x00000003
+#define CP_DRAW_INDX_OFFSET_3_FIRST_INDX__MASK                 0xffffffff
+#define CP_DRAW_INDX_OFFSET_3_FIRST_INDX__SHIFT                        0
+static inline uint32_t CP_DRAW_INDX_OFFSET_3_FIRST_INDX(uint32_t val)
+{
+       return ((val) << CP_DRAW_INDX_OFFSET_3_FIRST_INDX__SHIFT) & CP_DRAW_INDX_OFFSET_3_FIRST_INDX__MASK;
+}
+
+
+#define REG_CP_DRAW_INDX_OFFSET_4                              0x00000004
+#define CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__MASK               0xffffffff
+#define CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__SHIFT              0
+static inline uint32_t CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO(uint32_t val)
+{
+       return ((val) << CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__SHIFT) & CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_5                              0x00000005
+#define CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__MASK               0xffffffff
+#define CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__SHIFT              0
+static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI(uint32_t val)
+{
+       return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_INDX_BASE                      0x00000004
+
+#define REG_CP_DRAW_INDX_OFFSET_6                              0x00000006
+#define CP_DRAW_INDX_OFFSET_6_MAX_INDICES__MASK                        0xffffffff
+#define CP_DRAW_INDX_OFFSET_6_MAX_INDICES__SHIFT               0
+static inline uint32_t CP_DRAW_INDX_OFFSET_6_MAX_INDICES(uint32_t val)
+{
+       return ((val) << CP_DRAW_INDX_OFFSET_6_MAX_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_6_MAX_INDICES__MASK;
+}
 
 #define REG_CP_DRAW_INDX_OFFSET_4                              0x00000004
 #define CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK                  0xffffffff
@@ -719,12 +850,15 @@ static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE(enum a4xx_index_size v
 {
        return ((val) << A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__MASK;
 }
-#define A4XX_CP_DRAW_INDIRECT_0_TESS_MODE__MASK                        0x01f00000
-#define A4XX_CP_DRAW_INDIRECT_0_TESS_MODE__SHIFT               20
-static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_TESS_MODE(uint32_t val)
+#define A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__MASK               0x00003000
+#define A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__SHIFT              12
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE(enum a6xx_patch_type val)
 {
-       return ((val) << A4XX_CP_DRAW_INDIRECT_0_TESS_MODE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_TESS_MODE__MASK;
+       return ((val) << A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__MASK;
 }
+#define A4XX_CP_DRAW_INDIRECT_0_GS_ENABLE                      0x00010000
+#define A4XX_CP_DRAW_INDIRECT_0_TESS_ENABLE                    0x00020000
+
 
 #define REG_A4XX_CP_DRAW_INDIRECT_1                            0x00000001
 #define A4XX_CP_DRAW_INDIRECT_1_INDIRECT__MASK                 0xffffffff
@@ -735,6 +869,14 @@ static inline uint32_t A4XX_CP_DRAW_INDIRECT_1_INDIRECT(uint32_t val)
 }
 
 
+#define REG_A5XX_CP_DRAW_INDIRECT_1                            0x00000001
+#define A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__MASK              0xffffffff
+#define A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__SHIFT             0
+static inline uint32_t A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO(uint32_t val)
+{
+       return ((val) << A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__SHIFT) & A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__MASK;
+}
+
 #define REG_A5XX_CP_DRAW_INDIRECT_2                            0x00000002
 #define A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__MASK              0xffffffff
 #define A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__SHIFT             0
@@ -743,6 +885,8 @@ static inline uint32_t A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI(uint32_t val)
        return ((val) << A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__SHIFT) & A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__MASK;
 }
 
+#define REG_A5XX_CP_DRAW_INDIRECT_INDIRECT                     0x00000001
+
 #define REG_A4XX_CP_DRAW_INDX_INDIRECT_0                       0x00000000
 #define A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__MASK           0x0000003f
 #define A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__SHIFT          0
@@ -768,12 +912,14 @@ static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE(enum a4xx_index_s
 {
        return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__MASK;
 }
-#define A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE__MASK           0x01f00000
-#define A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE__SHIFT          20
-static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE(uint32_t val)
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__MASK          0x00003000
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__SHIFT         12
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE(enum a6xx_patch_type val)
 {
-       return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE__MASK;
+       return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__MASK;
 }
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_GS_ENABLE                 0x00010000
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_ENABLE               0x00020000
 
 
 #define REG_A4XX_CP_DRAW_INDX_INDIRECT_1                       0x00000001
@@ -817,6 +963,8 @@ static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI(uint32_t val)
        return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__MASK;
 }
 
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_INDX_BASE               0x00000001
+
 #define REG_A5XX_CP_DRAW_INDX_INDIRECT_3                       0x00000003
 #define A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__MASK         0xffffffff
 #define A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__SHIFT                0
@@ -841,6 +989,84 @@ static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI(uint32_t val)
        return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__MASK;
 }
 
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_INDIRECT                        0x00000004
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_0                      0x00000000
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__MASK          0x0000003f
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__SHIFT         0
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE(enum pc_di_primtype val)
+{
+       return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__MASK;
+}
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__MASK      0x000000c0
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__SHIFT     6
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+       return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__MASK;
+}
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__MASK           0x00000300
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__SHIFT          8
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+       return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__MASK;
+}
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__MASK         0x00000c00
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__SHIFT                10
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE(enum a4xx_index_size val)
+{
+       return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__MASK;
+}
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__MASK         0x00003000
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__SHIFT                12
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE(enum a6xx_patch_type val)
+{
+       return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__MASK;
+}
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_GS_ENABLE                        0x00010000
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_TESS_ENABLE              0x00020000
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_1                      0x00000001
+#define A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__MASK             0x0000000f
+#define A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__SHIFT            0
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(enum a6xx_draw_indirect_opcode val)
+{
+       return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__MASK;
+}
+#define A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__MASK            0x003fff00
+#define A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__SHIFT           8
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(uint32_t val)
+{
+       return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__MASK;
+}
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_2                      0x00000002
+#define A6XX_CP_DRAW_INDIRECT_MULTI_2_DRAW_COUNT__MASK         0xffffffff
+#define A6XX_CP_DRAW_INDIRECT_MULTI_2_DRAW_COUNT__SHIFT                0
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_2_DRAW_COUNT(uint32_t val)
+{
+       return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_2_DRAW_COUNT__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_2_DRAW_COUNT__MASK;
+}
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_ADDRESS_0              0x00000003
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_5                      0x00000005
+#define A6XX_CP_DRAW_INDIRECT_MULTI_5_PARAM_0__MASK            0xffffffff
+#define A6XX_CP_DRAW_INDIRECT_MULTI_5_PARAM_0__SHIFT           0
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_5_PARAM_0(uint32_t val)
+{
+       return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_5_PARAM_0__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_5_PARAM_0__MASK;
+}
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_INDIRECT               0x00000006
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_8                      0x00000008
+#define A6XX_CP_DRAW_INDIRECT_MULTI_8_STRIDE__MASK             0xffffffff
+#define A6XX_CP_DRAW_INDIRECT_MULTI_8_STRIDE__SHIFT            0
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_8_STRIDE(uint32_t val)
+{
+       return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_8_STRIDE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_8_STRIDE__MASK;
+}
+
 static inline uint32_t REG_CP_SET_DRAW_STATE_(uint32_t i0) { return 0x00000000 + 0x3*i0; }
 
 static inline uint32_t REG_CP_SET_DRAW_STATE__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
@@ -854,12 +1080,9 @@ static inline uint32_t CP_SET_DRAW_STATE__0_COUNT(uint32_t val)
 #define CP_SET_DRAW_STATE__0_DISABLE                           0x00020000
 #define CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS                        0x00040000
 #define CP_SET_DRAW_STATE__0_LOAD_IMMED                                0x00080000
-#define CP_SET_DRAW_STATE__0_ENABLE_MASK__MASK                 0x00f00000
-#define CP_SET_DRAW_STATE__0_ENABLE_MASK__SHIFT                        20
-static inline uint32_t CP_SET_DRAW_STATE__0_ENABLE_MASK(uint32_t val)
-{
-       return ((val) << CP_SET_DRAW_STATE__0_ENABLE_MASK__SHIFT) & CP_SET_DRAW_STATE__0_ENABLE_MASK__MASK;
-}
+#define CP_SET_DRAW_STATE__0_BINNING                           0x00100000
+#define CP_SET_DRAW_STATE__0_GMEM                              0x00200000
+#define CP_SET_DRAW_STATE__0_SYSMEM                            0x00400000
 #define CP_SET_DRAW_STATE__0_GROUP_ID__MASK                    0x1f000000
 #define CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT                   24
 static inline uint32_t CP_SET_DRAW_STATE__0_GROUP_ID(uint32_t val)
@@ -976,30 +1199,101 @@ static inline uint32_t CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI(uint32_t val)
 }
 
 #define REG_CP_SET_BIN_DATA5_5                                 0x00000005
-#define CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__MASK             0xffffffff
-#define CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__SHIFT            0
-static inline uint32_t CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO(uint32_t val)
+#define CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__MASK              0xffffffff
+#define CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__SHIFT             0
+static inline uint32_t CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO(uint32_t val)
 {
-       return ((val) << CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__SHIFT) & CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__MASK;
+       return ((val) << CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__SHIFT) & CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__MASK;
 }
 
 #define REG_CP_SET_BIN_DATA5_6                                 0x00000006
-#define CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__MASK             0xffffffff
-#define CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__SHIFT            0
-static inline uint32_t CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO(uint32_t val)
+#define CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__MASK              0xffffffff
+#define CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__SHIFT             0
+static inline uint32_t CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI(uint32_t val)
+{
+       return ((val) << CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__SHIFT) & CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_OFFSET_0                          0x00000000
+#define CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__MASK               0x003f0000
+#define CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__SHIFT              16
+static inline uint32_t CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE(uint32_t val)
+{
+       return ((val) << CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__SHIFT) & CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__MASK;
+}
+#define CP_SET_BIN_DATA5_OFFSET_0_VSC_N__MASK                  0x07c00000
+#define CP_SET_BIN_DATA5_OFFSET_0_VSC_N__SHIFT                 22
+static inline uint32_t CP_SET_BIN_DATA5_OFFSET_0_VSC_N(uint32_t val)
+{
+       return ((val) << CP_SET_BIN_DATA5_OFFSET_0_VSC_N__SHIFT) & CP_SET_BIN_DATA5_OFFSET_0_VSC_N__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_OFFSET_1                          0x00000001
+#define CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__MASK                0xffffffff
+#define CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__SHIFT       0
+static inline uint32_t CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET(uint32_t val)
+{
+       return ((val) << CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__SHIFT) & CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_OFFSET_2                          0x00000002
+#define CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__MASK                0xffffffff
+#define CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__SHIFT       0
+static inline uint32_t CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET(uint32_t val)
+{
+       return ((val) << CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__SHIFT) & CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_OFFSET_3                          0x00000003
+#define CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__MASK       0xffffffff
+#define CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__SHIFT      0
+static inline uint32_t CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET(uint32_t val)
+{
+       return ((val) << CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__SHIFT) & CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__MASK;
+}
+
+#define REG_CP_REG_RMW_0                                       0x00000000
+#define CP_REG_RMW_0_DST_REG__MASK                             0x0003ffff
+#define CP_REG_RMW_0_DST_REG__SHIFT                            0
+static inline uint32_t CP_REG_RMW_0_DST_REG(uint32_t val)
+{
+       return ((val) << CP_REG_RMW_0_DST_REG__SHIFT) & CP_REG_RMW_0_DST_REG__MASK;
+}
+#define CP_REG_RMW_0_ROTATE__MASK                              0x1f000000
+#define CP_REG_RMW_0_ROTATE__SHIFT                             24
+static inline uint32_t CP_REG_RMW_0_ROTATE(uint32_t val)
+{
+       return ((val) << CP_REG_RMW_0_ROTATE__SHIFT) & CP_REG_RMW_0_ROTATE__MASK;
+}
+#define CP_REG_RMW_0_SRC1_ADD                                  0x20000000
+#define CP_REG_RMW_0_SRC1_IS_REG                               0x40000000
+#define CP_REG_RMW_0_SRC0_IS_REG                               0x80000000
+
+#define REG_CP_REG_RMW_1                                       0x00000001
+#define CP_REG_RMW_1_SRC0__MASK                                        0xffffffff
+#define CP_REG_RMW_1_SRC0__SHIFT                               0
+static inline uint32_t CP_REG_RMW_1_SRC0(uint32_t val)
+{
+       return ((val) << CP_REG_RMW_1_SRC0__SHIFT) & CP_REG_RMW_1_SRC0__MASK;
+}
+
+#define REG_CP_REG_RMW_2                                       0x00000002
+#define CP_REG_RMW_2_SRC1__MASK                                        0xffffffff
+#define CP_REG_RMW_2_SRC1__SHIFT                               0
+static inline uint32_t CP_REG_RMW_2_SRC1(uint32_t val)
 {
-       return ((val) << CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__SHIFT) & CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__MASK;
+       return ((val) << CP_REG_RMW_2_SRC1__SHIFT) & CP_REG_RMW_2_SRC1__MASK;
 }
 
 #define REG_CP_REG_TO_MEM_0                                    0x00000000
-#define CP_REG_TO_MEM_0_REG__MASK                              0x0000ffff
+#define CP_REG_TO_MEM_0_REG__MASK                              0x0003ffff
 #define CP_REG_TO_MEM_0_REG__SHIFT                             0
 static inline uint32_t CP_REG_TO_MEM_0_REG(uint32_t val)
 {
        return ((val) << CP_REG_TO_MEM_0_REG__SHIFT) & CP_REG_TO_MEM_0_REG__MASK;
 }
-#define CP_REG_TO_MEM_0_CNT__MASK                              0x3ff80000
-#define CP_REG_TO_MEM_0_CNT__SHIFT                             19
+#define CP_REG_TO_MEM_0_CNT__MASK                              0x3ffc0000
+#define CP_REG_TO_MEM_0_CNT__SHIFT                             18
 static inline uint32_t CP_REG_TO_MEM_0_CNT(uint32_t val)
 {
        return ((val) << CP_REG_TO_MEM_0_CNT__SHIFT) & CP_REG_TO_MEM_0_CNT__MASK;
@@ -1023,8 +1317,97 @@ static inline uint32_t CP_REG_TO_MEM_2_DEST_HI(uint32_t val)
        return ((val) << CP_REG_TO_MEM_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_2_DEST_HI__MASK;
 }
 
+#define REG_CP_REG_TO_MEM_OFFSET_REG_0                         0x00000000
+#define CP_REG_TO_MEM_OFFSET_REG_0_REG__MASK                   0x0003ffff
+#define CP_REG_TO_MEM_OFFSET_REG_0_REG__SHIFT                  0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_0_REG(uint32_t val)
+{
+       return ((val) << CP_REG_TO_MEM_OFFSET_REG_0_REG__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_0_REG__MASK;
+}
+#define CP_REG_TO_MEM_OFFSET_REG_0_CNT__MASK                   0x3ffc0000
+#define CP_REG_TO_MEM_OFFSET_REG_0_CNT__SHIFT                  18
+static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_0_CNT(uint32_t val)
+{
+       return ((val) << CP_REG_TO_MEM_OFFSET_REG_0_CNT__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_0_CNT__MASK;
+}
+#define CP_REG_TO_MEM_OFFSET_REG_0_64B                         0x40000000
+#define CP_REG_TO_MEM_OFFSET_REG_0_ACCUMULATE                  0x80000000
+
+#define REG_CP_REG_TO_MEM_OFFSET_REG_1                         0x00000001
+#define CP_REG_TO_MEM_OFFSET_REG_1_DEST__MASK                  0xffffffff
+#define CP_REG_TO_MEM_OFFSET_REG_1_DEST__SHIFT                 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_1_DEST(uint32_t val)
+{
+       return ((val) << CP_REG_TO_MEM_OFFSET_REG_1_DEST__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_1_DEST__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_OFFSET_REG_2                         0x00000002
+#define CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__MASK               0xffffffff
+#define CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__SHIFT              0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI(uint32_t val)
+{
+       return ((val) << CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_OFFSET_REG_3                         0x00000003
+#define CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__MASK               0x0003ffff
+#define CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__SHIFT              0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0(uint32_t val)
+{
+       return ((val) << CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__MASK;
+}
+#define CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0_SCRATCH             0x00080000
+
+#define REG_CP_REG_TO_MEM_OFFSET_MEM_0                         0x00000000
+#define CP_REG_TO_MEM_OFFSET_MEM_0_REG__MASK                   0x0003ffff
+#define CP_REG_TO_MEM_OFFSET_MEM_0_REG__SHIFT                  0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_0_REG(uint32_t val)
+{
+       return ((val) << CP_REG_TO_MEM_OFFSET_MEM_0_REG__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_0_REG__MASK;
+}
+#define CP_REG_TO_MEM_OFFSET_MEM_0_CNT__MASK                   0x3ffc0000
+#define CP_REG_TO_MEM_OFFSET_MEM_0_CNT__SHIFT                  18
+static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_0_CNT(uint32_t val)
+{
+       return ((val) << CP_REG_TO_MEM_OFFSET_MEM_0_CNT__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_0_CNT__MASK;
+}
+#define CP_REG_TO_MEM_OFFSET_MEM_0_64B                         0x40000000
+#define CP_REG_TO_MEM_OFFSET_MEM_0_ACCUMULATE                  0x80000000
+
+#define REG_CP_REG_TO_MEM_OFFSET_MEM_1                         0x00000001
+#define CP_REG_TO_MEM_OFFSET_MEM_1_DEST__MASK                  0xffffffff
+#define CP_REG_TO_MEM_OFFSET_MEM_1_DEST__SHIFT                 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_1_DEST(uint32_t val)
+{
+       return ((val) << CP_REG_TO_MEM_OFFSET_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_1_DEST__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_OFFSET_MEM_2                         0x00000002
+#define CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__MASK               0xffffffff
+#define CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__SHIFT              0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI(uint32_t val)
+{
+       return ((val) << CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_OFFSET_MEM_3                         0x00000003
+#define CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__MASK             0xffffffff
+#define CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__SHIFT            0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO(uint32_t val)
+{
+       return ((val) << CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_OFFSET_MEM_4                         0x00000004
+#define CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__MASK             0xffffffff
+#define CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__SHIFT            0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI(uint32_t val)
+{
+       return ((val) << CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__MASK;
+}
+
 #define REG_CP_MEM_TO_REG_0                                    0x00000000
-#define CP_MEM_TO_REG_0_REG__MASK                              0x0000ffff
+#define CP_MEM_TO_REG_0_REG__MASK                              0x0003ffff
 #define CP_MEM_TO_REG_0_REG__SHIFT                             0
 static inline uint32_t CP_MEM_TO_REG_0_REG(uint32_t val)
 {
@@ -1036,8 +1419,8 @@ static inline uint32_t CP_MEM_TO_REG_0_CNT(uint32_t val)
 {
        return ((val) << CP_MEM_TO_REG_0_CNT__SHIFT) & CP_MEM_TO_REG_0_CNT__MASK;
 }
-#define CP_MEM_TO_REG_0_64B                                    0x40000000
-#define CP_MEM_TO_REG_0_ACCUMULATE                             0x80000000
+#define CP_MEM_TO_REG_0_SHIFT_BY_2                             0x40000000
+#define CP_MEM_TO_REG_0_UNK31                                  0x80000000
 
 #define REG_CP_MEM_TO_REG_1                                    0x00000001
 #define CP_MEM_TO_REG_1_SRC__MASK                              0xffffffff
@@ -1060,6 +1443,113 @@ static inline uint32_t CP_MEM_TO_REG_2_SRC_HI(uint32_t val)
 #define CP_MEM_TO_MEM_0_NEG_B                                  0x00000002
 #define CP_MEM_TO_MEM_0_NEG_C                                  0x00000004
 #define CP_MEM_TO_MEM_0_DOUBLE                                 0x20000000
+#define CP_MEM_TO_MEM_0_WAIT_FOR_MEM_WRITES                    0x40000000
+#define CP_MEM_TO_MEM_0_UNK31                                  0x80000000
+
+#define REG_CP_MEMCPY_0                                                0x00000000
+#define CP_MEMCPY_0_DWORDS__MASK                               0xffffffff
+#define CP_MEMCPY_0_DWORDS__SHIFT                              0
+static inline uint32_t CP_MEMCPY_0_DWORDS(uint32_t val)
+{
+       return ((val) << CP_MEMCPY_0_DWORDS__SHIFT) & CP_MEMCPY_0_DWORDS__MASK;
+}
+
+#define REG_CP_MEMCPY_1                                                0x00000001
+#define CP_MEMCPY_1_SRC_LO__MASK                               0xffffffff
+#define CP_MEMCPY_1_SRC_LO__SHIFT                              0
+static inline uint32_t CP_MEMCPY_1_SRC_LO(uint32_t val)
+{
+       return ((val) << CP_MEMCPY_1_SRC_LO__SHIFT) & CP_MEMCPY_1_SRC_LO__MASK;
+}
+
+#define REG_CP_MEMCPY_2                                                0x00000002
+#define CP_MEMCPY_2_SRC_HI__MASK                               0xffffffff
+#define CP_MEMCPY_2_SRC_HI__SHIFT                              0
+static inline uint32_t CP_MEMCPY_2_SRC_HI(uint32_t val)
+{
+       return ((val) << CP_MEMCPY_2_SRC_HI__SHIFT) & CP_MEMCPY_2_SRC_HI__MASK;
+}
+
+#define REG_CP_MEMCPY_3                                                0x00000003
+#define CP_MEMCPY_3_DST_LO__MASK                               0xffffffff
+#define CP_MEMCPY_3_DST_LO__SHIFT                              0
+static inline uint32_t CP_MEMCPY_3_DST_LO(uint32_t val)
+{
+       return ((val) << CP_MEMCPY_3_DST_LO__SHIFT) & CP_MEMCPY_3_DST_LO__MASK;
+}
+
+#define REG_CP_MEMCPY_4                                                0x00000004
+#define CP_MEMCPY_4_DST_HI__MASK                               0xffffffff
+#define CP_MEMCPY_4_DST_HI__SHIFT                              0
+static inline uint32_t CP_MEMCPY_4_DST_HI(uint32_t val)
+{
+       return ((val) << CP_MEMCPY_4_DST_HI__SHIFT) & CP_MEMCPY_4_DST_HI__MASK;
+}
+
+#define REG_CP_REG_TO_SCRATCH_0                                        0x00000000
+#define CP_REG_TO_SCRATCH_0_REG__MASK                          0x0003ffff
+#define CP_REG_TO_SCRATCH_0_REG__SHIFT                         0
+static inline uint32_t CP_REG_TO_SCRATCH_0_REG(uint32_t val)
+{
+       return ((val) << CP_REG_TO_SCRATCH_0_REG__SHIFT) & CP_REG_TO_SCRATCH_0_REG__MASK;
+}
+#define CP_REG_TO_SCRATCH_0_SCRATCH__MASK                      0x00700000
+#define CP_REG_TO_SCRATCH_0_SCRATCH__SHIFT                     20
+static inline uint32_t CP_REG_TO_SCRATCH_0_SCRATCH(uint32_t val)
+{
+       return ((val) << CP_REG_TO_SCRATCH_0_SCRATCH__SHIFT) & CP_REG_TO_SCRATCH_0_SCRATCH__MASK;
+}
+#define CP_REG_TO_SCRATCH_0_CNT__MASK                          0x07000000
+#define CP_REG_TO_SCRATCH_0_CNT__SHIFT                         24
+static inline uint32_t CP_REG_TO_SCRATCH_0_CNT(uint32_t val)
+{
+       return ((val) << CP_REG_TO_SCRATCH_0_CNT__SHIFT) & CP_REG_TO_SCRATCH_0_CNT__MASK;
+}
+
+#define REG_CP_SCRATCH_TO_REG_0                                        0x00000000
+#define CP_SCRATCH_TO_REG_0_REG__MASK                          0x0003ffff
+#define CP_SCRATCH_TO_REG_0_REG__SHIFT                         0
+static inline uint32_t CP_SCRATCH_TO_REG_0_REG(uint32_t val)
+{
+       return ((val) << CP_SCRATCH_TO_REG_0_REG__SHIFT) & CP_SCRATCH_TO_REG_0_REG__MASK;
+}
+#define CP_SCRATCH_TO_REG_0_UNK18                              0x00040000
+#define CP_SCRATCH_TO_REG_0_SCRATCH__MASK                      0x00700000
+#define CP_SCRATCH_TO_REG_0_SCRATCH__SHIFT                     20
+static inline uint32_t CP_SCRATCH_TO_REG_0_SCRATCH(uint32_t val)
+{
+       return ((val) << CP_SCRATCH_TO_REG_0_SCRATCH__SHIFT) & CP_SCRATCH_TO_REG_0_SCRATCH__MASK;
+}
+#define CP_SCRATCH_TO_REG_0_CNT__MASK                          0x07000000
+#define CP_SCRATCH_TO_REG_0_CNT__SHIFT                         24
+static inline uint32_t CP_SCRATCH_TO_REG_0_CNT(uint32_t val)
+{
+       return ((val) << CP_SCRATCH_TO_REG_0_CNT__SHIFT) & CP_SCRATCH_TO_REG_0_CNT__MASK;
+}
+
+#define REG_CP_SCRATCH_WRITE_0                                 0x00000000
+#define CP_SCRATCH_WRITE_0_SCRATCH__MASK                       0x00700000
+#define CP_SCRATCH_WRITE_0_SCRATCH__SHIFT                      20
+static inline uint32_t CP_SCRATCH_WRITE_0_SCRATCH(uint32_t val)
+{
+       return ((val) << CP_SCRATCH_WRITE_0_SCRATCH__SHIFT) & CP_SCRATCH_WRITE_0_SCRATCH__MASK;
+}
+
+#define REG_CP_MEM_WRITE_0                                     0x00000000
+#define CP_MEM_WRITE_0_ADDR_LO__MASK                           0xffffffff
+#define CP_MEM_WRITE_0_ADDR_LO__SHIFT                          0
+static inline uint32_t CP_MEM_WRITE_0_ADDR_LO(uint32_t val)
+{
+       return ((val) << CP_MEM_WRITE_0_ADDR_LO__SHIFT) & CP_MEM_WRITE_0_ADDR_LO__MASK;
+}
+
+#define REG_CP_MEM_WRITE_1                                     0x00000001
+#define CP_MEM_WRITE_1_ADDR_HI__MASK                           0xffffffff
+#define CP_MEM_WRITE_1_ADDR_HI__SHIFT                          0
+static inline uint32_t CP_MEM_WRITE_1_ADDR_HI(uint32_t val)
+{
+       return ((val) << CP_MEM_WRITE_1_ADDR_HI__SHIFT) & CP_MEM_WRITE_1_ADDR_HI__MASK;
+}
 
 #define REG_CP_COND_WRITE_0                                    0x00000000
 #define CP_COND_WRITE_0_FUNCTION__MASK                         0x00000007
@@ -1118,7 +1608,9 @@ static inline uint32_t CP_COND_WRITE5_0_FUNCTION(enum cp_cond_function val)
 {
        return ((val) << CP_COND_WRITE5_0_FUNCTION__SHIFT) & CP_COND_WRITE5_0_FUNCTION__MASK;
 }
+#define CP_COND_WRITE5_0_SIGNED_COMPARE                                0x00000008
 #define CP_COND_WRITE5_0_POLL_MEMORY                           0x00000010
+#define CP_COND_WRITE5_0_POLL_SCRATCH                          0x00000020
 #define CP_COND_WRITE5_0_WRITE_MEMORY                          0x00000100
 
 #define REG_CP_COND_WRITE5_1                                   0x00000001
@@ -1177,6 +1669,114 @@ static inline uint32_t CP_COND_WRITE5_7_WRITE_DATA(uint32_t val)
        return ((val) << CP_COND_WRITE5_7_WRITE_DATA__SHIFT) & CP_COND_WRITE5_7_WRITE_DATA__MASK;
 }
 
+#define REG_CP_WAIT_MEM_GTE_0                                  0x00000000
+#define CP_WAIT_MEM_GTE_0_RESERVED__MASK                       0xffffffff
+#define CP_WAIT_MEM_GTE_0_RESERVED__SHIFT                      0
+static inline uint32_t CP_WAIT_MEM_GTE_0_RESERVED(uint32_t val)
+{
+       return ((val) << CP_WAIT_MEM_GTE_0_RESERVED__SHIFT) & CP_WAIT_MEM_GTE_0_RESERVED__MASK;
+}
+
+#define REG_CP_WAIT_MEM_GTE_1                                  0x00000001
+#define CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__MASK                   0xffffffff
+#define CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__SHIFT                  0
+static inline uint32_t CP_WAIT_MEM_GTE_1_POLL_ADDR_LO(uint32_t val)
+{
+       return ((val) << CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__SHIFT) & CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__MASK;
+}
+
+#define REG_CP_WAIT_MEM_GTE_2                                  0x00000002
+#define CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__MASK                   0xffffffff
+#define CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__SHIFT                  0
+static inline uint32_t CP_WAIT_MEM_GTE_2_POLL_ADDR_HI(uint32_t val)
+{
+       return ((val) << CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__SHIFT) & CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__MASK;
+}
+
+#define REG_CP_WAIT_MEM_GTE_3                                  0x00000003
+#define CP_WAIT_MEM_GTE_3_REF__MASK                            0xffffffff
+#define CP_WAIT_MEM_GTE_3_REF__SHIFT                           0
+static inline uint32_t CP_WAIT_MEM_GTE_3_REF(uint32_t val)
+{
+       return ((val) << CP_WAIT_MEM_GTE_3_REF__SHIFT) & CP_WAIT_MEM_GTE_3_REF__MASK;
+}
+
+#define REG_CP_WAIT_REG_MEM_0                                  0x00000000
+#define CP_WAIT_REG_MEM_0_FUNCTION__MASK                       0x00000007
+#define CP_WAIT_REG_MEM_0_FUNCTION__SHIFT                      0
+static inline uint32_t CP_WAIT_REG_MEM_0_FUNCTION(enum cp_cond_function val)
+{
+       return ((val) << CP_WAIT_REG_MEM_0_FUNCTION__SHIFT) & CP_WAIT_REG_MEM_0_FUNCTION__MASK;
+}
+#define CP_WAIT_REG_MEM_0_SIGNED_COMPARE                       0x00000008
+#define CP_WAIT_REG_MEM_0_POLL_MEMORY                          0x00000010
+#define CP_WAIT_REG_MEM_0_POLL_SCRATCH                         0x00000020
+#define CP_WAIT_REG_MEM_0_WRITE_MEMORY                         0x00000100
+
+#define REG_CP_WAIT_REG_MEM_1                                  0x00000001
+#define CP_WAIT_REG_MEM_1_POLL_ADDR_LO__MASK                   0xffffffff
+#define CP_WAIT_REG_MEM_1_POLL_ADDR_LO__SHIFT                  0
+static inline uint32_t CP_WAIT_REG_MEM_1_POLL_ADDR_LO(uint32_t val)
+{
+       return ((val) << CP_WAIT_REG_MEM_1_POLL_ADDR_LO__SHIFT) & CP_WAIT_REG_MEM_1_POLL_ADDR_LO__MASK;
+}
+
+#define REG_CP_WAIT_REG_MEM_2                                  0x00000002
+#define CP_WAIT_REG_MEM_2_POLL_ADDR_HI__MASK                   0xffffffff
+#define CP_WAIT_REG_MEM_2_POLL_ADDR_HI__SHIFT                  0
+static inline uint32_t CP_WAIT_REG_MEM_2_POLL_ADDR_HI(uint32_t val)
+{
+       return ((val) << CP_WAIT_REG_MEM_2_POLL_ADDR_HI__SHIFT) & CP_WAIT_REG_MEM_2_POLL_ADDR_HI__MASK;
+}
+
+#define REG_CP_WAIT_REG_MEM_3                                  0x00000003
+#define CP_WAIT_REG_MEM_3_REF__MASK                            0xffffffff
+#define CP_WAIT_REG_MEM_3_REF__SHIFT                           0
+static inline uint32_t CP_WAIT_REG_MEM_3_REF(uint32_t val)
+{
+       return ((val) << CP_WAIT_REG_MEM_3_REF__SHIFT) & CP_WAIT_REG_MEM_3_REF__MASK;
+}
+
+#define REG_CP_WAIT_REG_MEM_4                                  0x00000004
+#define CP_WAIT_REG_MEM_4_MASK__MASK                           0xffffffff
+#define CP_WAIT_REG_MEM_4_MASK__SHIFT                          0
+static inline uint32_t CP_WAIT_REG_MEM_4_MASK(uint32_t val)
+{
+       return ((val) << CP_WAIT_REG_MEM_4_MASK__SHIFT) & CP_WAIT_REG_MEM_4_MASK__MASK;
+}
+
+#define REG_CP_WAIT_REG_MEM_5                                  0x00000005
+#define CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__MASK              0xffffffff
+#define CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__SHIFT             0
+static inline uint32_t CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(uint32_t val)
+{
+       return ((val) << CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__SHIFT) & CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__MASK;
+}
+
+#define REG_CP_WAIT_TWO_REGS_0                                 0x00000000
+#define CP_WAIT_TWO_REGS_0_REG0__MASK                          0x0003ffff
+#define CP_WAIT_TWO_REGS_0_REG0__SHIFT                         0
+static inline uint32_t CP_WAIT_TWO_REGS_0_REG0(uint32_t val)
+{
+       return ((val) << CP_WAIT_TWO_REGS_0_REG0__SHIFT) & CP_WAIT_TWO_REGS_0_REG0__MASK;
+}
+
+#define REG_CP_WAIT_TWO_REGS_1                                 0x00000001
+#define CP_WAIT_TWO_REGS_1_REG1__MASK                          0x0003ffff
+#define CP_WAIT_TWO_REGS_1_REG1__SHIFT                         0
+static inline uint32_t CP_WAIT_TWO_REGS_1_REG1(uint32_t val)
+{
+       return ((val) << CP_WAIT_TWO_REGS_1_REG1__SHIFT) & CP_WAIT_TWO_REGS_1_REG1__MASK;
+}
+
+#define REG_CP_WAIT_TWO_REGS_2                                 0x00000002
+#define CP_WAIT_TWO_REGS_2_REF__MASK                           0xffffffff
+#define CP_WAIT_TWO_REGS_2_REF__SHIFT                          0
+static inline uint32_t CP_WAIT_TWO_REGS_2_REF(uint32_t val)
+{
+       return ((val) << CP_WAIT_TWO_REGS_2_REF__SHIFT) & CP_WAIT_TWO_REGS_2_REF__MASK;
+}
+
 #define REG_CP_DISPATCH_COMPUTE_0                              0x00000000
 
 #define REG_CP_DISPATCH_COMPUTE_1                              0x00000001
@@ -1329,6 +1929,7 @@ static inline uint32_t CP_EVENT_WRITE_0_EVENT(enum vgt_event_type val)
        return ((val) << CP_EVENT_WRITE_0_EVENT__SHIFT) & CP_EVENT_WRITE_0_EVENT__MASK;
 }
 #define CP_EVENT_WRITE_0_TIMESTAMP                             0x40000000
+#define CP_EVENT_WRITE_0_IRQ                                   0x80000000
 
 #define REG_CP_EVENT_WRITE_1                                   0x00000001
 #define CP_EVENT_WRITE_1_ADDR_0_LO__MASK                       0xffffffff
@@ -1506,61 +2107,209 @@ static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(uint32_t val)
        return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__MASK;
 }
 
-#define REG_A2XX_CP_SET_MARKER_0                               0x00000000
-#define A2XX_CP_SET_MARKER_0_MARKER__MASK                      0x0000000f
-#define A2XX_CP_SET_MARKER_0_MARKER__SHIFT                     0
-static inline uint32_t A2XX_CP_SET_MARKER_0_MARKER(uint32_t val)
+#define REG_A6XX_CP_SET_MARKER_0                               0x00000000
+#define A6XX_CP_SET_MARKER_0_MODE__MASK                                0x000001ff
+#define A6XX_CP_SET_MARKER_0_MODE__SHIFT                       0
+static inline uint32_t A6XX_CP_SET_MARKER_0_MODE(enum a6xx_render_mode val)
+{
+       return ((val) << A6XX_CP_SET_MARKER_0_MODE__SHIFT) & A6XX_CP_SET_MARKER_0_MODE__MASK;
+}
+#define A6XX_CP_SET_MARKER_0_MARKER__MASK                      0x0000000f
+#define A6XX_CP_SET_MARKER_0_MARKER__SHIFT                     0
+static inline uint32_t A6XX_CP_SET_MARKER_0_MARKER(enum a6xx_render_mode val)
+{
+       return ((val) << A6XX_CP_SET_MARKER_0_MARKER__SHIFT) & A6XX_CP_SET_MARKER_0_MARKER__MASK;
+}
+
+static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG_(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+
+static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+#define A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK             0x00000007
+#define A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT            0
+static inline uint32_t A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG(enum pseudo_reg val)
+{
+       return ((val) << A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT) & A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK;
+}
+
+static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG__1(uint32_t i0) { return 0x00000001 + 0x3*i0; }
+#define A6XX_CP_SET_PSEUDO_REG__1_LO__MASK                     0xffffffff
+#define A6XX_CP_SET_PSEUDO_REG__1_LO__SHIFT                    0
+static inline uint32_t A6XX_CP_SET_PSEUDO_REG__1_LO(uint32_t val)
+{
+       return ((val) << A6XX_CP_SET_PSEUDO_REG__1_LO__SHIFT) & A6XX_CP_SET_PSEUDO_REG__1_LO__MASK;
+}
+
+static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG__2(uint32_t i0) { return 0x00000002 + 0x3*i0; }
+#define A6XX_CP_SET_PSEUDO_REG__2_HI__MASK                     0xffffffff
+#define A6XX_CP_SET_PSEUDO_REG__2_HI__SHIFT                    0
+static inline uint32_t A6XX_CP_SET_PSEUDO_REG__2_HI(uint32_t val)
+{
+       return ((val) << A6XX_CP_SET_PSEUDO_REG__2_HI__SHIFT) & A6XX_CP_SET_PSEUDO_REG__2_HI__MASK;
+}
+
+#define REG_A6XX_CP_REG_TEST_0                                 0x00000000
+#define A6XX_CP_REG_TEST_0_REG__MASK                           0x0003ffff
+#define A6XX_CP_REG_TEST_0_REG__SHIFT                          0
+static inline uint32_t A6XX_CP_REG_TEST_0_REG(uint32_t val)
+{
+       return ((val) << A6XX_CP_REG_TEST_0_REG__SHIFT) & A6XX_CP_REG_TEST_0_REG__MASK;
+}
+#define A6XX_CP_REG_TEST_0_BIT__MASK                           0x01f00000
+#define A6XX_CP_REG_TEST_0_BIT__SHIFT                          20
+static inline uint32_t A6XX_CP_REG_TEST_0_BIT(uint32_t val)
+{
+       return ((val) << A6XX_CP_REG_TEST_0_BIT__SHIFT) & A6XX_CP_REG_TEST_0_BIT__MASK;
+}
+#define A6XX_CP_REG_TEST_0_WAIT_FOR_ME                         0x02000000
+
+#define REG_CP_COND_REG_EXEC_0                                 0x00000000
+#define CP_COND_REG_EXEC_0_REG0__MASK                          0x0003ffff
+#define CP_COND_REG_EXEC_0_REG0__SHIFT                         0
+static inline uint32_t CP_COND_REG_EXEC_0_REG0(uint32_t val)
+{
+       return ((val) << CP_COND_REG_EXEC_0_REG0__SHIFT) & CP_COND_REG_EXEC_0_REG0__MASK;
+}
+#define CP_COND_REG_EXEC_0_BINNING                             0x02000000
+#define CP_COND_REG_EXEC_0_GMEM                                        0x04000000
+#define CP_COND_REG_EXEC_0_SYSMEM                              0x08000000
+#define CP_COND_REG_EXEC_0_MODE__MASK                          0xf0000000
+#define CP_COND_REG_EXEC_0_MODE__SHIFT                         28
+static inline uint32_t CP_COND_REG_EXEC_0_MODE(enum compare_mode val)
+{
+       return ((val) << CP_COND_REG_EXEC_0_MODE__SHIFT) & CP_COND_REG_EXEC_0_MODE__MASK;
+}
+
+#define REG_CP_COND_REG_EXEC_1                                 0x00000001
+#define CP_COND_REG_EXEC_1_DWORDS__MASK                                0xffffffff
+#define CP_COND_REG_EXEC_1_DWORDS__SHIFT                       0
+static inline uint32_t CP_COND_REG_EXEC_1_DWORDS(uint32_t val)
+{
+       return ((val) << CP_COND_REG_EXEC_1_DWORDS__SHIFT) & CP_COND_REG_EXEC_1_DWORDS__MASK;
+}
+
+#define REG_CP_COND_EXEC_0                                     0x00000000
+#define CP_COND_EXEC_0_ADDR0_LO__MASK                          0xffffffff
+#define CP_COND_EXEC_0_ADDR0_LO__SHIFT                         0
+static inline uint32_t CP_COND_EXEC_0_ADDR0_LO(uint32_t val)
+{
+       return ((val) << CP_COND_EXEC_0_ADDR0_LO__SHIFT) & CP_COND_EXEC_0_ADDR0_LO__MASK;
+}
+
+#define REG_CP_COND_EXEC_1                                     0x00000001
+#define CP_COND_EXEC_1_ADDR0_HI__MASK                          0xffffffff
+#define CP_COND_EXEC_1_ADDR0_HI__SHIFT                         0
+static inline uint32_t CP_COND_EXEC_1_ADDR0_HI(uint32_t val)
 {
-       return ((val) << A2XX_CP_SET_MARKER_0_MARKER__SHIFT) & A2XX_CP_SET_MARKER_0_MARKER__MASK;
+       return ((val) << CP_COND_EXEC_1_ADDR0_HI__SHIFT) & CP_COND_EXEC_1_ADDR0_HI__MASK;
 }
-#define A2XX_CP_SET_MARKER_0_MODE__MASK                                0x0000000f
-#define A2XX_CP_SET_MARKER_0_MODE__SHIFT                       0
-static inline uint32_t A2XX_CP_SET_MARKER_0_MODE(enum a6xx_render_mode val)
+
+#define REG_CP_COND_EXEC_2                                     0x00000002
+#define CP_COND_EXEC_2_ADDR1_LO__MASK                          0xffffffff
+#define CP_COND_EXEC_2_ADDR1_LO__SHIFT                         0
+static inline uint32_t CP_COND_EXEC_2_ADDR1_LO(uint32_t val)
+{
+       return ((val) << CP_COND_EXEC_2_ADDR1_LO__SHIFT) & CP_COND_EXEC_2_ADDR1_LO__MASK;
+}
+
+#define REG_CP_COND_EXEC_3                                     0x00000003
+#define CP_COND_EXEC_3_ADDR1_HI__MASK                          0xffffffff
+#define CP_COND_EXEC_3_ADDR1_HI__SHIFT                         0
+static inline uint32_t CP_COND_EXEC_3_ADDR1_HI(uint32_t val)
 {
-       return ((val) << A2XX_CP_SET_MARKER_0_MODE__SHIFT) & A2XX_CP_SET_MARKER_0_MODE__MASK;
+       return ((val) << CP_COND_EXEC_3_ADDR1_HI__SHIFT) & CP_COND_EXEC_3_ADDR1_HI__MASK;
 }
-#define A2XX_CP_SET_MARKER_0_IFPC                              0x00000100
 
-static inline uint32_t REG_A2XX_CP_SET_PSEUDO_REG_(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+#define REG_CP_COND_EXEC_4                                     0x00000004
+#define CP_COND_EXEC_4_REF__MASK                               0xffffffff
+#define CP_COND_EXEC_4_REF__SHIFT                              0
+static inline uint32_t CP_COND_EXEC_4_REF(uint32_t val)
+{
+       return ((val) << CP_COND_EXEC_4_REF__SHIFT) & CP_COND_EXEC_4_REF__MASK;
+}
 
-static inline uint32_t REG_A2XX_CP_SET_PSEUDO_REG__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
-#define A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK             0x00000007
-#define A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT            0
-static inline uint32_t A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG(enum pseudo_reg val)
+#define REG_CP_COND_EXEC_5                                     0x00000005
+#define CP_COND_EXEC_5_DWORDS__MASK                            0xffffffff
+#define CP_COND_EXEC_5_DWORDS__SHIFT                           0
+static inline uint32_t CP_COND_EXEC_5_DWORDS(uint32_t val)
 {
-       return ((val) << A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT) & A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK;
+       return ((val) << CP_COND_EXEC_5_DWORDS__SHIFT) & CP_COND_EXEC_5_DWORDS__MASK;
 }
 
-static inline uint32_t REG_A2XX_CP_SET_PSEUDO_REG__1(uint32_t i0) { return 0x00000001 + 0x3*i0; }
-#define A2XX_CP_SET_PSEUDO_REG__1_LO__MASK                     0xffffffff
-#define A2XX_CP_SET_PSEUDO_REG__1_LO__SHIFT                    0
-static inline uint32_t A2XX_CP_SET_PSEUDO_REG__1_LO(uint32_t val)
+#define REG_CP_SET_CTXSWITCH_IB_0                              0x00000000
+#define CP_SET_CTXSWITCH_IB_0_ADDR_LO__MASK                    0xffffffff
+#define CP_SET_CTXSWITCH_IB_0_ADDR_LO__SHIFT                   0
+static inline uint32_t CP_SET_CTXSWITCH_IB_0_ADDR_LO(uint32_t val)
 {
-       return ((val) << A2XX_CP_SET_PSEUDO_REG__1_LO__SHIFT) & A2XX_CP_SET_PSEUDO_REG__1_LO__MASK;
+       return ((val) << CP_SET_CTXSWITCH_IB_0_ADDR_LO__SHIFT) & CP_SET_CTXSWITCH_IB_0_ADDR_LO__MASK;
 }
 
-static inline uint32_t REG_A2XX_CP_SET_PSEUDO_REG__2(uint32_t i0) { return 0x00000002 + 0x3*i0; }
-#define A2XX_CP_SET_PSEUDO_REG__2_HI__MASK                     0xffffffff
-#define A2XX_CP_SET_PSEUDO_REG__2_HI__SHIFT                    0
-static inline uint32_t A2XX_CP_SET_PSEUDO_REG__2_HI(uint32_t val)
+#define REG_CP_SET_CTXSWITCH_IB_1                              0x00000001
+#define CP_SET_CTXSWITCH_IB_1_ADDR_HI__MASK                    0xffffffff
+#define CP_SET_CTXSWITCH_IB_1_ADDR_HI__SHIFT                   0
+static inline uint32_t CP_SET_CTXSWITCH_IB_1_ADDR_HI(uint32_t val)
 {
-       return ((val) << A2XX_CP_SET_PSEUDO_REG__2_HI__SHIFT) & A2XX_CP_SET_PSEUDO_REG__2_HI__MASK;
+       return ((val) << CP_SET_CTXSWITCH_IB_1_ADDR_HI__SHIFT) & CP_SET_CTXSWITCH_IB_1_ADDR_HI__MASK;
 }
 
-#define REG_A2XX_CP_REG_TEST_0                                 0x00000000
-#define A2XX_CP_REG_TEST_0_REG__MASK                           0x00000fff
-#define A2XX_CP_REG_TEST_0_REG__SHIFT                          0
-static inline uint32_t A2XX_CP_REG_TEST_0_REG(uint32_t val)
+#define REG_CP_SET_CTXSWITCH_IB_2                              0x00000002
+#define CP_SET_CTXSWITCH_IB_2_DWORDS__MASK                     0x000fffff
+#define CP_SET_CTXSWITCH_IB_2_DWORDS__SHIFT                    0
+static inline uint32_t CP_SET_CTXSWITCH_IB_2_DWORDS(uint32_t val)
+{
+       return ((val) << CP_SET_CTXSWITCH_IB_2_DWORDS__SHIFT) & CP_SET_CTXSWITCH_IB_2_DWORDS__MASK;
+}
+#define CP_SET_CTXSWITCH_IB_2_TYPE__MASK                       0x00300000
+#define CP_SET_CTXSWITCH_IB_2_TYPE__SHIFT                      20
+static inline uint32_t CP_SET_CTXSWITCH_IB_2_TYPE(enum ctxswitch_ib val)
 {
-       return ((val) << A2XX_CP_REG_TEST_0_REG__SHIFT) & A2XX_CP_REG_TEST_0_REG__MASK;
+       return ((val) << CP_SET_CTXSWITCH_IB_2_TYPE__SHIFT) & CP_SET_CTXSWITCH_IB_2_TYPE__MASK;
 }
-#define A2XX_CP_REG_TEST_0_BIT__MASK                           0x01f00000
-#define A2XX_CP_REG_TEST_0_BIT__SHIFT                          20
-static inline uint32_t A2XX_CP_REG_TEST_0_BIT(uint32_t val)
+
+#define REG_CP_REG_WRITE_0                                     0x00000000
+#define CP_REG_WRITE_0_TRACKER__MASK                           0x00000007
+#define CP_REG_WRITE_0_TRACKER__SHIFT                          0
+static inline uint32_t CP_REG_WRITE_0_TRACKER(enum reg_tracker val)
+{
+       return ((val) << CP_REG_WRITE_0_TRACKER__SHIFT) & CP_REG_WRITE_0_TRACKER__MASK;
+}
+
+#define REG_CP_SMMU_TABLE_UPDATE_0                             0x00000000
+#define CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__MASK                  0xffffffff
+#define CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__SHIFT                 0
+static inline uint32_t CP_SMMU_TABLE_UPDATE_0_TTBR0_LO(uint32_t val)
+{
+       return ((val) << CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__SHIFT) & CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__MASK;
+}
+
+#define REG_CP_SMMU_TABLE_UPDATE_1                             0x00000001
+#define CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__MASK                  0x0000ffff
+#define CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__SHIFT                 0
+static inline uint32_t CP_SMMU_TABLE_UPDATE_1_TTBR0_HI(uint32_t val)
+{
+       return ((val) << CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__SHIFT) & CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__MASK;
+}
+#define CP_SMMU_TABLE_UPDATE_1_ASID__MASK                      0xffff0000
+#define CP_SMMU_TABLE_UPDATE_1_ASID__SHIFT                     16
+static inline uint32_t CP_SMMU_TABLE_UPDATE_1_ASID(uint32_t val)
+{
+       return ((val) << CP_SMMU_TABLE_UPDATE_1_ASID__SHIFT) & CP_SMMU_TABLE_UPDATE_1_ASID__MASK;
+}
+
+#define REG_CP_SMMU_TABLE_UPDATE_2                             0x00000002
+#define CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__MASK                        0xffffffff
+#define CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__SHIFT               0
+static inline uint32_t CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR(uint32_t val)
+{
+       return ((val) << CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__SHIFT) & CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__MASK;
+}
+
+#define REG_CP_SMMU_TABLE_UPDATE_3                             0x00000003
+#define CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__MASK               0xffffffff
+#define CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__SHIFT              0
+static inline uint32_t CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(uint32_t val)
 {
-       return ((val) << A2XX_CP_REG_TEST_0_BIT__SHIFT) & A2XX_CP_REG_TEST_0_BIT__MASK;
+       return ((val) << CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__SHIFT) & CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__MASK;
 }
-#define A2XX_CP_REG_TEST_0_UNK25                               0x02000000
 
 
 #endif /* ADRENO_PM4_XML */
index 7c230f7..b36919d 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/debugfs.h>
 #include <linux/errno.h>
 #include <linux/mutex.h>
+#include <linux/pm_opp.h>
 #include <linux/sort.h>
 #include <linux/clk.h>
 #include <linux/bitmap.h>
@@ -218,7 +219,7 @@ static int _dpu_core_perf_set_core_clk_rate(struct dpu_kms *kms, u64 rate)
                rate = core_clk->max_rate;
 
        core_clk->rate = rate;
-       return msm_dss_clk_set_rate(core_clk, 1);
+       return dev_pm_opp_set_rate(&kms->pdev->dev, core_clk->rate);
 }
 
 static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
index e15b42a..f272a8d 100644 (file)
@@ -389,14 +389,14 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event)
        spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
 
        if (!fevent) {
-               DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
+               DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
                return;
        }
 
        fevent->event = event;
        fevent->crtc = crtc;
        fevent->ts = ktime_get();
-       kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
+       kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
 }
 
 void dpu_crtc_complete_commit(struct drm_crtc *crtc)
index 797e8fd..a97f6d2 100644 (file)
@@ -208,6 +208,36 @@ struct dpu_encoder_virt {
 
 #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
 
+static u32 dither_matrix[DITHER_MATRIX_SZ] = {
+       15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
+};
+
+static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
+{
+       struct dpu_hw_dither_cfg dither_cfg = { 0 };
+
+       if (!hw_pp->ops.setup_dither)
+               return;
+
+       switch (bpc) {
+       case 6:
+               dither_cfg.c0_bitdepth = 6;
+               dither_cfg.c1_bitdepth = 6;
+               dither_cfg.c2_bitdepth = 6;
+               dither_cfg.c3_bitdepth = 6;
+               dither_cfg.temporal_en = 0;
+               break;
+       default:
+               hw_pp->ops.setup_dither(hw_pp, NULL);
+               return;
+       }
+
+       memcpy(&dither_cfg.matrix, dither_matrix,
+                       sizeof(u32) * DITHER_MATRIX_SZ);
+
+       hw_pp->ops.setup_dither(hw_pp, &dither_cfg);
+}
+
 void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
                enum dpu_intr_idx intr_idx)
 {
@@ -504,7 +534,7 @@ static struct msm_display_topology dpu_encoder_get_topology(
                        struct dpu_kms *dpu_kms,
                        struct drm_display_mode *mode)
 {
-       struct msm_display_topology topology;
+       struct msm_display_topology topology = {0};
        int i, intf_count = 0;
 
        for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
@@ -520,7 +550,8 @@ static struct msm_display_topology dpu_encoder_get_topology(
         * 1 LM, 1 INTF
         * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
         *
-        * Adding color blocks only to primary interface
+        * Adding color blocks only to primary interface if available in
+        * sufficient number
         */
        if (intf_count == 2)
                topology.num_lm = 2;
@@ -529,8 +560,11 @@ static struct msm_display_topology dpu_encoder_get_topology(
        else
                topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
 
-       if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI)
-               topology.num_dspp = topology.num_lm;
+       if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI) {
+               if (dpu_kms->catalog->dspp &&
+                       (dpu_kms->catalog->dspp_count >= topology.num_lm))
+                       topology.num_dspp = topology.num_lm;
+       }
 
        topology.num_enc = 0;
        topology.num_intf = intf_count;
@@ -1054,7 +1088,7 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
 {
        struct dpu_encoder_virt *dpu_enc = NULL;
        struct msm_drm_private *priv;
-       struct dpu_kms *dpu_kms;
+       int i;
 
        if (!drm_enc || !drm_enc->dev) {
                DPU_ERROR("invalid parameters\n");
@@ -1062,7 +1096,6 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
        }
 
        priv = drm_enc->dev->dev_private;
-       dpu_kms = to_dpu_kms(priv->kms);
 
        dpu_enc = to_dpu_encoder_virt(drm_enc);
        if (!dpu_enc || !dpu_enc->cur_master) {
@@ -1070,13 +1103,17 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
                return;
        }
 
-       if (dpu_enc->cur_master->hw_mdptop &&
-                       dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc)
-               dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc(
-                               dpu_enc->cur_master->hw_mdptop,
-                               dpu_kms->catalog);
-
        _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
+
+       if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
+                       !WARN_ON(dpu_enc->num_phys_encs == 0)) {
+               unsigned bpc = dpu_enc->phys_encs[0]->connector->display_info.bpc;
+               for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+                       if (!dpu_enc->hw_pp[i])
+                               continue;
+                       _dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc);
+               }
+       }
 }
 
 void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
@@ -2109,7 +2146,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
 
        dpu_enc = to_dpu_encoder_virt(enc);
 
-       mutex_init(&dpu_enc->enc_lock);
        ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
        if (ret)
                goto fail;
@@ -2124,7 +2160,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
                                0);
 
 
-       mutex_init(&dpu_enc->rc_lock);
        INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
                        dpu_encoder_off_work);
        dpu_enc->idle_timeout = IDLE_TIMEOUT;
@@ -2156,7 +2191,7 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
 
        dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
        if (!dpu_enc)
-               return ERR_PTR(ENOMEM);
+               return ERR_PTR(-ENOMEM);
 
        rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
                        drm_enc_mode, NULL);
@@ -2169,6 +2204,8 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
 
        spin_lock_init(&dpu_enc->enc_spinlock);
        dpu_enc->enabled = false;
+       mutex_init(&dpu_enc->enc_lock);
+       mutex_init(&dpu_enc->rc_lock);
 
        return &dpu_enc->base;
 }
index 29d4fde..97d122e 100644 (file)
 
 #define DSPP_SC7180_MASK BIT(DPU_DSPP_PCC)
 
+#define INTF_SDM845_MASK (0)
+
+#define INTF_SC7180_MASK BIT(DPU_INTF_INPUT_CTRL) | BIT(DPU_INTF_TE)
+
 #define DEFAULT_PIXEL_RAM_SIZE         (50 * 1024)
 #define DEFAULT_DPU_LINE_WIDTH         2048
 #define DEFAULT_DPU_OUTPUT_LINE_WIDTH  2560
@@ -70,6 +74,10 @@ static const struct dpu_caps sdm845_dpu_caps = {
        .has_dim_layer = true,
        .has_idle_pc = true,
        .has_3d_merge = true,
+       .max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+       .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+       .max_hdeci_exp = MAX_HORZ_DECIMATION,
+       .max_vdeci_exp = MAX_VERT_DECIMATION,
 };
 
 static const struct dpu_caps sc7180_dpu_caps = {
@@ -80,6 +88,39 @@ static const struct dpu_caps sc7180_dpu_caps = {
        .ubwc_version = DPU_HW_UBWC_VER_20,
        .has_dim_layer = true,
        .has_idle_pc = true,
+       .max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+       .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_caps sm8150_dpu_caps = {
+       .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+       .max_mixer_blendstages = 0xb,
+       .qseed_type = DPU_SSPP_SCALER_QSEED3,
+       .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
+       .ubwc_version = DPU_HW_UBWC_VER_30,
+       .has_src_split = true,
+       .has_dim_layer = true,
+       .has_idle_pc = true,
+       .has_3d_merge = true,
+       .max_linewidth = 4096,
+       .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+       .max_hdeci_exp = MAX_HORZ_DECIMATION,
+       .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_caps sm8250_dpu_caps = {
+       .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+       .max_mixer_blendstages = 0xb,
+       .max_linewidth = 4096,
+       .qseed_type = DPU_SSPP_SCALER_QSEED3, /* TODO: qseed3 lite */
+       .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
+       .ubwc_version = DPU_HW_UBWC_VER_40,
+       .has_src_split = true,
+       .has_dim_layer = true,
+       .has_idle_pc = true,
+       .has_3d_merge = true,
+       .max_linewidth = 4096,
+       .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
 };
 
 static const struct dpu_mdp_cfg sdm845_mdp[] = {
@@ -117,10 +158,37 @@ static const struct dpu_mdp_cfg sc7180_mdp[] = {
                .reg_off = 0x2AC, .bit_off = 0},
        .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
                .reg_off = 0x2AC, .bit_off = 8},
-       .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
+       .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
                .reg_off = 0x2B4, .bit_off = 8},
+       .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+               .reg_off = 0x2C4, .bit_off = 8},
+       },
+};
+
+static const struct dpu_mdp_cfg sm8250_mdp[] = {
+       {
+       .name = "top_0", .id = MDP_TOP,
+       .base = 0x0, .len = 0x45C,
+       .features = 0,
+       .highest_bank_bit = 0x3, /* TODO: 2 for LP_DDR4 */
+       .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+                       .reg_off = 0x2AC, .bit_off = 0},
+       .clk_ctrls[DPU_CLK_CTRL_VIG1] = {
+                       .reg_off = 0x2B4, .bit_off = 0},
+       .clk_ctrls[DPU_CLK_CTRL_VIG2] = {
+                       .reg_off = 0x2BC, .bit_off = 0},
+       .clk_ctrls[DPU_CLK_CTRL_VIG3] = {
+                       .reg_off = 0x2C4, .bit_off = 0},
+       .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+                       .reg_off = 0x2AC, .bit_off = 8},
+       .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
+                       .reg_off = 0x2B4, .bit_off = 8},
        .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
-               .reg_off = 0x2BC, .bit_off = 8},
+                       .reg_off = 0x2BC, .bit_off = 8},
+       .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+                       .reg_off = 0x2C4, .bit_off = 8},
+       .clk_ctrls[DPU_CLK_CTRL_REG_DMA] = {
+                       .reg_off = 0x2BC, .bit_off = 20},
        },
 };
 
@@ -173,21 +241,47 @@ static const struct dpu_ctl_cfg sc7180_ctl[] = {
        },
 };
 
+static const struct dpu_ctl_cfg sm8150_ctl[] = {
+       {
+       .name = "ctl_0", .id = CTL_0,
+       .base = 0x1000, .len = 0x1e0,
+       .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY)
+       },
+       {
+       .name = "ctl_1", .id = CTL_1,
+       .base = 0x1200, .len = 0x1e0,
+       .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY)
+       },
+       {
+       .name = "ctl_2", .id = CTL_2,
+       .base = 0x1400, .len = 0x1e0,
+       .features = BIT(DPU_CTL_ACTIVE_CFG)
+       },
+       {
+       .name = "ctl_3", .id = CTL_3,
+       .base = 0x1600, .len = 0x1e0,
+       .features = BIT(DPU_CTL_ACTIVE_CFG)
+       },
+       {
+       .name = "ctl_4", .id = CTL_4,
+       .base = 0x1800, .len = 0x1e0,
+       .features = BIT(DPU_CTL_ACTIVE_CFG)
+       },
+       {
+       .name = "ctl_5", .id = CTL_5,
+       .base = 0x1a00, .len = 0x1e0,
+       .features = BIT(DPU_CTL_ACTIVE_CFG)
+       },
+};
+
 /*************************************************************
  * SSPP sub blocks config
  *************************************************************/
 
 /* SSPP common configuration */
-static const struct dpu_sspp_blks_common sdm845_sspp_common = {
-       .maxlinewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
-       .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
-       .maxhdeciexp = MAX_HORZ_DECIMATION,
-       .maxvdeciexp = MAX_VERT_DECIMATION,
-};
 
 #define _VIG_SBLK(num, sdma_pri, qseed_ver) \
        { \
-       .common = &sdm845_sspp_common, \
        .maxdwnscale = MAX_DOWNSCALE_RATIO, \
        .maxupscale = MAX_UPSCALE_RATIO, \
        .smart_dma_priority = sdma_pri, \
@@ -207,7 +301,6 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = {
 
 #define _DMA_SBLK(num, sdma_pri) \
        { \
-       .common = &sdm845_sspp_common, \
        .maxdwnscale = SSPP_UNITY_SCALE, \
        .maxupscale = SSPP_UNITY_SCALE, \
        .smart_dma_priority = sdma_pri, \
@@ -272,10 +365,10 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
                sc7180_vig_sblk_0, 0,  SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
        SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000,  DMA_SDM845_MASK,
                sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
-       SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000,  DMA_SDM845_MASK,
-               sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
+       SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000,  DMA_CURSOR_SDM845_MASK,
+               sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
        SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000,  DMA_CURSOR_SDM845_MASK,
-               sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+               sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
 };
 
 /*************************************************************
@@ -336,6 +429,23 @@ static const struct dpu_lm_cfg sc7180_lm[] = {
                &sc7180_lm_sblk, PINGPONG_1, LM_0, 0),
 };
 
+/* SM8150 */
+
+static const struct dpu_lm_cfg sm8150_lm[] = {
+       LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
+               &sdm845_lm_sblk, PINGPONG_0, LM_1, 0),
+       LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
+               &sdm845_lm_sblk, PINGPONG_1, LM_0, 0),
+       LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
+               &sdm845_lm_sblk, PINGPONG_2, LM_3, 0),
+       LM_BLK("lm_3", LM_3, 0x47000, MIXER_SDM845_MASK,
+               &sdm845_lm_sblk, PINGPONG_3, LM_2, 0),
+       LM_BLK("lm_4", LM_4, 0x48000, MIXER_SDM845_MASK,
+               &sdm845_lm_sblk, PINGPONG_4, LM_5, 0),
+       LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK,
+               &sdm845_lm_sblk, PINGPONG_5, LM_4, 0),
+};
+
 /*************************************************************
  * DSPP sub blocks config
  *************************************************************/
@@ -355,6 +465,7 @@ static const struct dpu_dspp_sub_blks sc7180_dspp_sblk = {
 static const struct dpu_dspp_cfg sc7180_dspp[] = {
        DSPP_BLK("dspp_0", DSPP_0, 0x54000),
 };
+
 /*************************************************************
  * PINGPONG sub blocks config
  *************************************************************/
@@ -397,29 +508,45 @@ static struct dpu_pingpong_cfg sc7180_pp[] = {
        PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
 };
 
+static const struct dpu_pingpong_cfg sm8150_pp[] = {
+       PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000),
+       PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
+       PP_BLK("pingpong_2", PINGPONG_2, 0x71000),
+       PP_BLK("pingpong_3", PINGPONG_3, 0x71800),
+       PP_BLK("pingpong_4", PINGPONG_4, 0x72000),
+       PP_BLK("pingpong_5", PINGPONG_5, 0x72800),
+};
+
 /*************************************************************
  * INTF sub blocks config
  *************************************************************/
-#define INTF_BLK(_name, _id, _base, _type, _ctrl_id) \
+#define INTF_BLK(_name, _id, _base, _type, _ctrl_id, _features) \
        {\
        .name = _name, .id = _id, \
        .base = _base, .len = 0x280, \
-       .features = BIT(DPU_CTL_ACTIVE_CFG), \
+       .features = _features, \
        .type = _type, \
        .controller_id = _ctrl_id, \
        .prog_fetch_lines_worst_case = 24 \
        }
 
 static const struct dpu_intf_cfg sdm845_intf[] = {
-       INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0),
-       INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0),
-       INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1),
-       INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1),
+       INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SDM845_MASK),
+       INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SDM845_MASK),
+       INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, INTF_SDM845_MASK),
+       INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, INTF_SDM845_MASK),
 };
 
 static const struct dpu_intf_cfg sc7180_intf[] = {
-       INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0),
-       INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0),
+       INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SC7180_MASK),
+       INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SC7180_MASK),
+};
+
+static const struct dpu_intf_cfg sm8150_intf[] = {
+       INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SC7180_MASK),
+       INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SC7180_MASK),
+       INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, INTF_SC7180_MASK),
+       INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, INTF_SC7180_MASK),
 };
 
 /*************************************************************
@@ -452,6 +579,18 @@ static const struct dpu_reg_dma_cfg sdm845_regdma = {
        .base = 0x0, .version = 0x1, .trigger_sel_off = 0x119c
 };
 
+static const struct dpu_reg_dma_cfg sm8150_regdma = {
+       .base = 0x0, .version = 0x00010001, .trigger_sel_off = 0x119c
+};
+
+static const struct dpu_reg_dma_cfg sm8250_regdma = {
+       .base = 0x0,
+       .version = 0x00010002,
+       .trigger_sel_off = 0x119c,
+       .xin_id = 7,
+       .clk_ctrl = DPU_CLK_CTRL_REG_DMA,
+};
+
 /*************************************************************
  * PERF data config
  *************************************************************/
@@ -476,6 +615,10 @@ static const struct dpu_qos_lut_entry sc7180_qos_linear[] = {
        {.fl = 0, .lut = 0x0011222222335777},
 };
 
+static const struct dpu_qos_lut_entry sm8150_qos_linear[] = {
+       {.fl = 0, .lut = 0x0011222222223357 },
+};
+
 static const struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
        {.fl = 10, .lut = 0x344556677},
        {.fl = 11, .lut = 0x3344556677},
@@ -560,6 +703,56 @@ static const struct dpu_perf_cfg sc7180_perf_data = {
        },
 };
 
+static const struct dpu_perf_cfg sm8150_perf_data = {
+       .max_bw_low = 12800000,
+       .max_bw_high = 12800000,
+       .min_core_ib = 2400000,
+       .min_llcc_ib = 800000,
+       .min_dram_ib = 800000,
+       .danger_lut_tbl = {0xf, 0xffff, 0x0},
+       .qos_lut_tbl = {
+               {.nentry = ARRAY_SIZE(sm8150_qos_linear),
+               .entries = sm8150_qos_linear
+               },
+               {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+               .entries = sc7180_qos_macrotile
+               },
+               {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+               .entries = sc7180_qos_nrt
+               },
+               /* TODO: macrotile-qseed is different from macrotile */
+       },
+       .cdp_cfg = {
+               {.rd_enable = 1, .wr_enable = 1},
+               {.rd_enable = 1, .wr_enable = 0}
+       },
+};
+
+static const struct dpu_perf_cfg sm8250_perf_data = {
+       .max_bw_low = 13700000,
+       .max_bw_high = 16600000,
+       .min_core_ib = 4800000,
+       .min_llcc_ib = 0,
+       .min_dram_ib = 800000,
+       .danger_lut_tbl = {0xf, 0xffff, 0x0},
+       .qos_lut_tbl = {
+               {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+               .entries = sc7180_qos_linear
+               },
+               {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+               .entries = sc7180_qos_macrotile
+               },
+               {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+               .entries = sc7180_qos_nrt
+               },
+               /* TODO: macrotile-qseed is different from macrotile */
+       },
+       .cdp_cfg = {
+               {.rd_enable = 1, .wr_enable = 1},
+               {.rd_enable = 1, .wr_enable = 0}
+       },
+};
+
 /*************************************************************
  * Hardware catalog init
  *************************************************************/
@@ -624,9 +817,71 @@ static void sc7180_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
        };
 }
 
+/*
+ * sm8150_cfg_init(): populate sm8150 dpu sub-blocks reg offsets
+ * and instance counts.
+ */
+static void sm8150_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
+{
+       *dpu_cfg = (struct dpu_mdss_cfg){
+               .caps = &sm8150_dpu_caps,
+               .mdp_count = ARRAY_SIZE(sdm845_mdp),
+               .mdp = sdm845_mdp,
+               .ctl_count = ARRAY_SIZE(sm8150_ctl),
+               .ctl = sm8150_ctl,
+               .sspp_count = ARRAY_SIZE(sdm845_sspp),
+               .sspp = sdm845_sspp,
+               .mixer_count = ARRAY_SIZE(sm8150_lm),
+               .mixer = sm8150_lm,
+               .pingpong_count = ARRAY_SIZE(sm8150_pp),
+               .pingpong = sm8150_pp,
+               .intf_count = ARRAY_SIZE(sm8150_intf),
+               .intf = sm8150_intf,
+               .vbif_count = ARRAY_SIZE(sdm845_vbif),
+               .vbif = sdm845_vbif,
+               .reg_dma_count = 1,
+               .dma_cfg = sm8150_regdma,
+               .perf = sm8150_perf_data,
+               .mdss_irqs = 0x3ff,
+       };
+}
+
+/*
+ * sm8250_cfg_init(): populate sm8250 dpu sub-blocks reg offsets
+ * and instance counts.
+ */
+static void sm8250_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
+{
+       *dpu_cfg = (struct dpu_mdss_cfg){
+               .caps = &sm8250_dpu_caps,
+               .mdp_count = ARRAY_SIZE(sm8250_mdp),
+               .mdp = sm8250_mdp,
+               .ctl_count = ARRAY_SIZE(sm8150_ctl),
+               .ctl = sm8150_ctl,
+               /* TODO: sspp qseed version differs from 845 */
+               .sspp_count = ARRAY_SIZE(sdm845_sspp),
+               .sspp = sdm845_sspp,
+               .mixer_count = ARRAY_SIZE(sm8150_lm),
+               .mixer = sm8150_lm,
+               .pingpong_count = ARRAY_SIZE(sm8150_pp),
+               .pingpong = sm8150_pp,
+               .intf_count = ARRAY_SIZE(sm8150_intf),
+               .intf = sm8150_intf,
+               .vbif_count = ARRAY_SIZE(sdm845_vbif),
+               .vbif = sdm845_vbif,
+               .reg_dma_count = 1,
+               .dma_cfg = sm8250_regdma,
+               .perf = sm8250_perf_data,
+               .mdss_irqs = 0xff,
+       };
+}
+
 static const struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
        { .hw_rev = DPU_HW_VER_400, .cfg_init = sdm845_cfg_init},
        { .hw_rev = DPU_HW_VER_401, .cfg_init = sdm845_cfg_init},
+       { .hw_rev = DPU_HW_VER_500, .cfg_init = sm8150_cfg_init},
+       { .hw_rev = DPU_HW_VER_501, .cfg_init = sm8150_cfg_init},
+       { .hw_rev = DPU_HW_VER_600, .cfg_init = sm8250_cfg_init},
        { .hw_rev = DPU_HW_VER_620, .cfg_init = sc7180_cfg_init},
 };
 
index f7de438..1b7a921 100644 (file)
@@ -37,7 +37,9 @@
 #define DPU_HW_VER_400 DPU_HW_VER(4, 0, 0) /* sdm845 v1.0 */
 #define DPU_HW_VER_401 DPU_HW_VER(4, 0, 1) /* sdm845 v2.0 */
 #define DPU_HW_VER_410 DPU_HW_VER(4, 1, 0) /* sdm670 v1.0 */
-#define DPU_HW_VER_500 DPU_HW_VER(5, 0, 0) /* sdm855 v1.0 */
+#define DPU_HW_VER_500 DPU_HW_VER(5, 0, 0) /* sm8150 v1.0 */
+#define DPU_HW_VER_501 DPU_HW_VER(5, 0, 1) /* sm8150 v2.0 */
+#define DPU_HW_VER_600 DPU_HW_VER(6, 0, 0) /* sm8250 */
 #define DPU_HW_VER_620 DPU_HW_VER(6, 2, 0) /* sc7180 v1.0 */
 
 
@@ -65,10 +67,9 @@ enum {
        DPU_HW_UBWC_VER_10 = 0x100,
        DPU_HW_UBWC_VER_20 = 0x200,
        DPU_HW_UBWC_VER_30 = 0x300,
+       DPU_HW_UBWC_VER_40 = 0x400,
 };
 
-#define IS_UBWC_20_SUPPORTED(rev)       ((rev) >= DPU_HW_UBWC_VER_20)
-
 /**
  * MDP TOP BLOCK features
  * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
@@ -186,6 +187,19 @@ enum {
 };
 
 /**
+ * INTF sub-blocks
+ * @DPU_INTF_INPUT_CTRL         Supports the setting of pp block from which
+ *                              pixel data arrives to this INTF
+ * @DPU_INTF_TE                 INTF block has TE configuration support
+ * @DPU_INTF_MAX
+ */
+enum {
+       DPU_INTF_INPUT_CTRL = 0x1,
+       DPU_INTF_TE,
+       DPU_INTF_MAX
+};
+
+/**
  * VBIF sub-blocks and features
  * @DPU_VBIF_QOS_OTLIM        VBIF supports OT Limit
  * @DPU_VBIF_QOS_REMAP        VBIF supports QoS priority remap
@@ -300,6 +314,10 @@ struct dpu_qos_lut_tbl {
  * @has_dim_layer      dim layer feature status
  * @has_idle_pc        indicate if idle power collapse feature is supported
  * @has_3d_merge       indicate if 3D merge is supported
+ * @max_linewidth      max linewidth for sspp
+ * @pixel_ram_size     size of latency hiding and de-tiling buffer in bytes
+ * @max_hdeci_exp      max horizontal decimation supported (max is 2^value)
+ * @max_vdeci_exp      max vertical decimation supported (max is 2^value)
  */
 struct dpu_caps {
        u32 max_mixer_width;
@@ -311,22 +329,11 @@ struct dpu_caps {
        bool has_dim_layer;
        bool has_idle_pc;
        bool has_3d_merge;
-};
-
-/**
- * struct dpu_sspp_blks_common : SSPP sub-blocks common configuration
- * @maxwidth: max pixelwidth supported by this pipe
- * @pixel_ram_size: size of latency hiding and de-tiling buffer in bytes
- * @maxhdeciexp: max horizontal decimation supported by this pipe
- *                             (max is 2^value)
- * @maxvdeciexp: max vertical decimation supported by this pipe
- *                             (max is 2^value)
- */
-struct dpu_sspp_blks_common {
-       u32 maxlinewidth;
+       /* SSPP limits */
+       u32 max_linewidth;
        u32 pixel_ram_size;
-       u32 maxhdeciexp;
-       u32 maxvdeciexp;
+       u32 max_hdeci_exp;
+       u32 max_vdeci_exp;
 };
 
 /**
@@ -352,7 +359,6 @@ struct dpu_sspp_blks_common {
  * @virt_num_formats: Number of supported formats for virtual planes
  */
 struct dpu_sspp_sub_blks {
-       const struct dpu_sspp_blks_common *common;
        u32 creq_vblank;
        u32 danger_vblank;
        u32 maxdwnscale;
@@ -423,6 +429,7 @@ enum dpu_clk_ctrl_type {
        DPU_CLK_CTRL_CURSOR0,
        DPU_CLK_CTRL_CURSOR1,
        DPU_CLK_CTRL_INLINE_ROT0_SSPP,
+       DPU_CLK_CTRL_REG_DMA,
        DPU_CLK_CTRL_MAX,
 };
 
@@ -447,7 +454,6 @@ struct dpu_clk_ctrl_reg {
 struct dpu_mdp_cfg {
        DPU_HW_BLK_INFO;
        u32 highest_bank_bit;
-       u32 ubwc_static;
        u32 ubwc_swizzle;
        struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX];
 };
@@ -607,6 +613,8 @@ struct dpu_reg_dma_cfg {
        DPU_HW_BLK_INFO;
        u32 version;
        u32 trigger_sel_off;
+       u32 xin_id;
+       enum dpu_clk_ctrl_type clk_ctrl;
 };
 
 /**
index 613ae8f..758c355 100644 (file)
@@ -245,30 +245,14 @@ static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
 static int dpu_hw_ctl_get_bitmask_intf_v1(struct dpu_hw_ctl *ctx,
                u32 *flushbits, enum dpu_intf intf)
 {
-       switch (intf) {
-       case INTF_0:
-       case INTF_1:
-               *flushbits |= BIT(31);
-               break;
-       default:
-               return 0;
-       }
+       *flushbits |= BIT(31);
        return 0;
 }
 
 static int dpu_hw_ctl_active_get_bitmask_intf(struct dpu_hw_ctl *ctx,
                u32 *flushbits, enum dpu_intf intf)
 {
-       switch (intf) {
-       case INTF_0:
-               *flushbits |= BIT(0);
-               break;
-       case INTF_1:
-               *flushbits |= BIT(1);
-               break;
-       default:
-               return 0;
-       }
+       *flushbits |= BIT(intf - INTF_0);
        return 0;
 }
 
index efe9a57..6f0f545 100644 (file)
@@ -107,11 +107,6 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
        display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
        p->hsync_skew - 1;
 
-       if (ctx->cap->type == INTF_EDP || ctx->cap->type == INTF_DP) {
-               display_v_start += p->hsync_pulse_width + p->h_back_porch;
-               display_v_end -= p->h_front_porch;
-       }
-
        hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
        hsync_end_x = hsync_period - p->h_front_porch - 1;
 
@@ -144,10 +139,25 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
        hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
        display_hctl = (hsync_end_x << 16) | hsync_start_x;
 
+       if (ctx->cap->type == INTF_EDP || ctx->cap->type == INTF_DP) {
+               active_h_start = hsync_start_x;
+               active_h_end = active_h_start + p->xres - 1;
+               active_v_start = display_v_start;
+               active_v_end = active_v_start + (p->yres * hsync_period) - 1;
+
+               display_v_start += p->hsync_pulse_width + p->h_back_porch;
+
+               active_hctl = (active_h_end << 16) | active_h_start;
+               display_hctl = active_hctl;
+       }
+
        den_polarity = 0;
        if (ctx->cap->type == INTF_HDMI) {
                hsync_polarity = p->yres >= 720 ? 0 : 1;
                vsync_polarity = p->yres >= 720 ? 0 : 1;
+       } else if (ctx->cap->type == INTF_DP) {
+               hsync_polarity = p->hsync_polarity;
+               vsync_polarity = p->vsync_polarity;
        } else {
                hsync_polarity = 0;
                vsync_polarity = 0;
@@ -225,14 +235,9 @@ static void dpu_hw_intf_bind_pingpong_blk(
                bool enable,
                const enum dpu_pingpong pp)
 {
-       struct dpu_hw_blk_reg_map *c;
+       struct dpu_hw_blk_reg_map *c = &intf->hw;
        u32 mux_cfg;
 
-       if (!intf)
-               return;
-
-       c = &intf->hw;
-
        mux_cfg = DPU_REG_READ(c, INTF_MUX);
        mux_cfg &= ~0xf;
 
@@ -280,7 +285,7 @@ static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
        ops->get_status = dpu_hw_intf_get_status;
        ops->enable_timing = dpu_hw_intf_enable_timing_engine;
        ops->get_line_count = dpu_hw_intf_get_line_count;
-       if (cap & BIT(DPU_CTL_ACTIVE_CFG))
+       if (cap & BIT(DPU_INTF_INPUT_CTRL))
                ops->bind_pingpong_blk = dpu_hw_intf_bind_pingpong_blk;
 }
 
index 37becd4..4b8baf7 100644 (file)
@@ -152,14 +152,13 @@ static void _setup_mixer_ops(const struct dpu_mdss_cfg *m,
                unsigned long features)
 {
        ops->setup_mixer_out = dpu_hw_lm_setup_out;
-       if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion)
-           || IS_SC7180_TARGET(m->hwversion))
+       if (m->hwversion >= DPU_HW_VER_400)
                ops->setup_blend_config = dpu_hw_lm_setup_blend_config_sdm845;
        else
                ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
        ops->setup_alpha_out = dpu_hw_lm_setup_color3;
        ops->setup_border_color = dpu_hw_lm_setup_border_color;
-};
+}
 
 static struct dpu_hw_blk_ops dpu_hw_ops;
 
index 402dc58..979fd2c 100644 (file)
@@ -171,6 +171,7 @@ enum dpu_ctl {
        CTL_2,
        CTL_3,
        CTL_4,
+       CTL_5,
        CTL_MAX
 };
 
@@ -180,6 +181,7 @@ enum dpu_pingpong {
        PINGPONG_2,
        PINGPONG_3,
        PINGPONG_4,
+       PINGPONG_5,
        PINGPONG_S0,
        PINGPONG_MAX
 };
index d110a40..bea4ab5 100644 (file)
 #define PP_FBC_BUDGET_CTL               0x038
 #define PP_FBC_LOSSY_MODE               0x03C
 
+#define PP_DITHER_EN                   0x000
+#define PP_DITHER_BITDEPTH             0x004
+#define PP_DITHER_MATRIX               0x008
+
+#define DITHER_DEPTH_MAP_INDEX 9
+
+static u32 dither_depth_map[DITHER_DEPTH_MAP_INDEX] = {
+       0, 0, 0, 0, 0, 0, 0, 1, 2
+};
+
 static const struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
                const struct dpu_mdss_cfg *m,
                void __iomem *addr,
@@ -49,6 +59,37 @@ static const struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
        return ERR_PTR(-EINVAL);
 }
 
+static void dpu_hw_pp_setup_dither(struct dpu_hw_pingpong *pp,
+                                   struct dpu_hw_dither_cfg *cfg)
+{
+       struct dpu_hw_blk_reg_map *c;
+       u32 i, base, data = 0;
+
+       c = &pp->hw;
+       base = pp->caps->sblk->dither.base;
+       if (!cfg) {
+               DPU_REG_WRITE(c, base + PP_DITHER_EN, 0);
+               return;
+       }
+
+       data = dither_depth_map[cfg->c0_bitdepth] & REG_MASK(2);
+       data |= (dither_depth_map[cfg->c1_bitdepth] & REG_MASK(2)) << 2;
+       data |= (dither_depth_map[cfg->c2_bitdepth] & REG_MASK(2)) << 4;
+       data |= (dither_depth_map[cfg->c3_bitdepth] & REG_MASK(2)) << 6;
+       data |= (cfg->temporal_en) ? (1 << 8) : 0;
+
+       DPU_REG_WRITE(c, base + PP_DITHER_BITDEPTH, data);
+
+       for (i = 0; i < DITHER_MATRIX_SZ - 3; i += 4) {
+               data = (cfg->matrix[i] & REG_MASK(4)) |
+                       ((cfg->matrix[i + 1] & REG_MASK(4)) << 4) |
+                       ((cfg->matrix[i + 2] & REG_MASK(4)) << 8) |
+                       ((cfg->matrix[i + 3] & REG_MASK(4)) << 12);
+               DPU_REG_WRITE(c, base + PP_DITHER_MATRIX + i, data);
+       }
+       DPU_REG_WRITE(c, base + PP_DITHER_EN, 1);
+}
+
 static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp,
                struct dpu_hw_tear_check *te)
 {
@@ -180,15 +221,18 @@ static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
        return line;
 }
 
-static void _setup_pingpong_ops(struct dpu_hw_pingpong_ops *ops,
-       const struct dpu_pingpong_cfg *hw_cap)
+static void _setup_pingpong_ops(struct dpu_hw_pingpong *c,
+                               unsigned long features)
 {
-       ops->setup_tearcheck = dpu_hw_pp_setup_te_config;
-       ops->enable_tearcheck = dpu_hw_pp_enable_te;
-       ops->connect_external_te = dpu_hw_pp_connect_external_te;
-       ops->get_vsync_info = dpu_hw_pp_get_vsync_info;
-       ops->poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr;
-       ops->get_line_count = dpu_hw_pp_get_line_count;
+       c->ops.setup_tearcheck = dpu_hw_pp_setup_te_config;
+       c->ops.enable_tearcheck = dpu_hw_pp_enable_te;
+       c->ops.connect_external_te = dpu_hw_pp_connect_external_te;
+       c->ops.get_vsync_info = dpu_hw_pp_get_vsync_info;
+       c->ops.poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr;
+       c->ops.get_line_count = dpu_hw_pp_get_line_count;
+
+       if (test_bit(DPU_PINGPONG_DITHER, &features))
+               c->ops.setup_dither = dpu_hw_pp_setup_dither;
 };
 
 static struct dpu_hw_blk_ops dpu_hw_ops;
@@ -212,7 +256,7 @@ struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
 
        c->idx = idx;
        c->caps = cfg;
-       _setup_pingpong_ops(&c->ops, c->caps);
+       _setup_pingpong_ops(c, c->caps->features);
 
        dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
 
index d73cb73..065996b 100644 (file)
@@ -10,6 +10,8 @@
 #include "dpu_hw_util.h"
 #include "dpu_hw_blk.h"
 
+#define DITHER_MATRIX_SZ 16
+
 struct dpu_hw_pingpong;
 
 struct dpu_hw_tear_check {
@@ -35,6 +37,26 @@ struct dpu_hw_pp_vsync_info {
 };
 
 /**
+ * struct dpu_hw_dither_cfg - dither feature structure
+ * @flags: for customizing operations
+ * @temporal_en: temperal dither enable
+ * @c0_bitdepth: c0 component bit depth
+ * @c1_bitdepth: c1 component bit depth
+ * @c2_bitdepth: c2 component bit depth
+ * @c3_bitdepth: c2 component bit depth
+ * @matrix: dither strength matrix
+ */
+struct dpu_hw_dither_cfg {
+       u64 flags;
+       u32 temporal_en;
+       u32 c0_bitdepth;
+       u32 c1_bitdepth;
+       u32 c2_bitdepth;
+       u32 c3_bitdepth;
+       u32 matrix[DITHER_MATRIX_SZ];
+};
+
+/**
  *
  * struct dpu_hw_pingpong_ops : Interface to the pingpong Hw driver functions
  *  Assumption is these functions will be called after clocks are enabled
@@ -82,6 +104,12 @@ struct dpu_hw_pingpong_ops {
         * Obtain current vertical line counter
         */
        u32 (*get_line_count)(struct dpu_hw_pingpong *pp);
+
+       /**
+        * Setup dither matix for pingpong block
+        */
+       void (*setup_dither)(struct dpu_hw_pingpong *pp,
+                       struct dpu_hw_dither_cfg *cfg);
 };
 
 struct dpu_hw_pingpong {
index 82c5dbf..c940b69 100644 (file)
@@ -303,11 +303,25 @@ static void dpu_hw_sspp_setup_format(struct dpu_hw_pipe *ctx,
                DPU_REG_WRITE(c, SSPP_FETCH_CONFIG,
                        DPU_FETCH_CONFIG_RESET_VALUE |
                        ctx->mdp->highest_bank_bit << 18);
-               if (IS_UBWC_20_SUPPORTED(ctx->catalog->caps->ubwc_version)) {
+               switch (ctx->catalog->caps->ubwc_version) {
+               case DPU_HW_UBWC_VER_10:
+                       /* TODO: UBWC v1 case */
+                       break;
+               case DPU_HW_UBWC_VER_20:
                        fast_clear = fmt->alpha_enable ? BIT(31) : 0;
                        DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
                                        fast_clear | (ctx->mdp->ubwc_swizzle) |
                                        (ctx->mdp->highest_bank_bit << 4));
+                       break;
+               case DPU_HW_UBWC_VER_30:
+                       DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
+                                       BIT(30) | (ctx->mdp->ubwc_swizzle) |
+                                       (ctx->mdp->highest_bank_bit << 4));
+                       break;
+               case DPU_HW_UBWC_VER_40:
+                       DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
+                                       DPU_FORMAT_IS_YUV(fmt) ? 0 : BIT(30));
+                       break;
                }
        }
 
index f9af52a..01b7676 100644 (file)
@@ -8,7 +8,6 @@
 #include "dpu_kms.h"
 
 #define SSPP_SPARE                        0x28
-#define UBWC_STATIC                       0x144
 
 #define FLD_SPLIT_DISPLAY_CMD             BIT(1)
 #define FLD_SMART_PANEL_FREE_RUN          BIT(2)
@@ -249,22 +248,6 @@ static void dpu_hw_get_safe_status(struct dpu_hw_mdp *mdp,
        status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x1;
 }
 
-static void dpu_hw_reset_ubwc(struct dpu_hw_mdp *mdp, struct dpu_mdss_cfg *m)
-{
-       struct dpu_hw_blk_reg_map c;
-
-       if (!mdp || !m)
-               return;
-
-       if (!IS_UBWC_20_SUPPORTED(m->caps->ubwc_version))
-               return;
-
-       /* force blk offset to zero to access beginning of register region */
-       c = mdp->hw;
-       c.blk_off = 0x0;
-       DPU_REG_WRITE(&c, UBWC_STATIC, m->mdp[0].ubwc_static);
-}
-
 static void dpu_hw_intf_audio_select(struct dpu_hw_mdp *mdp)
 {
        struct dpu_hw_blk_reg_map *c;
@@ -285,7 +268,6 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
        ops->get_danger_status = dpu_hw_get_danger_status;
        ops->setup_vsync_source = dpu_hw_setup_vsync_source;
        ops->get_safe_status = dpu_hw_get_safe_status;
-       ops->reset_ubwc = dpu_hw_reset_ubwc;
        ops->intf_audio_select = dpu_hw_intf_audio_select;
 }
 
index 1d9d32e..8018fff 100644 (file)
@@ -127,13 +127,6 @@ struct dpu_hw_mdp_ops {
                        struct dpu_danger_safe_status *status);
 
        /**
-        * reset_ubwc - reset top level UBWC configuration
-        * @mdp: mdp top context driver
-        * @m: pointer to mdss catalog data
-        */
-       void (*reset_ubwc)(struct dpu_hw_mdp *mdp, struct dpu_mdss_cfg *m);
-
-       /**
         * intf_audio_select - select the external interface for audio
         * @mdp: mdp top context driver
         */
index b8615d4..c0a4d4e 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/debugfs.h>
 #include <linux/dma-buf.h>
 #include <linux/of_irq.h>
+#include <linux/pm_opp.h>
 
 #include <drm/drm_crtc.h>
 #include <drm/drm_file.h>
 static int dpu_kms_hw_init(struct msm_kms *kms);
 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
 
-static unsigned long dpu_iomap_size(struct platform_device *pdev,
-                                   const char *name)
-{
-       struct resource *res;
-
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
-       if (!res) {
-               DRM_ERROR("failed to get memory resource: %s\n", name);
-               return 0;
-       }
-
-       return resource_size(res);
-}
-
 #ifdef CONFIG_DEBUG_FS
 static int _dpu_danger_signal_status(struct seq_file *s,
                bool danger_status)
@@ -780,7 +767,7 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
 
        mmu = msm_iommu_new(dpu_kms->dev->dev, domain);
        aspace = msm_gem_address_space_create(mmu, "dpu1",
-               0x1000, 0xfffffff);
+               0x1000, 0x100000000 - 0x1000);
 
        if (IS_ERR(aspace)) {
                mmu->funcs->destroy(mmu);
@@ -844,7 +831,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
                goto error;
        }
        DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
-       dpu_kms->mmio_len = dpu_iomap_size(dpu_kms->pdev, "mdp");
 
        dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif", "vbif");
        if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
@@ -853,22 +839,16 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
                dpu_kms->vbif[VBIF_RT] = NULL;
                goto error;
        }
-       dpu_kms->vbif_len[VBIF_RT] = dpu_iomap_size(dpu_kms->pdev, "vbif");
-       dpu_kms->vbif[VBIF_NRT] = msm_ioremap(dpu_kms->pdev, "vbif_nrt", "vbif_nrt");
+       dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(dpu_kms->pdev, "vbif_nrt", "vbif_nrt");
        if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
                dpu_kms->vbif[VBIF_NRT] = NULL;
                DPU_DEBUG("VBIF NRT is not defined");
-       } else {
-               dpu_kms->vbif_len[VBIF_NRT] = dpu_iomap_size(dpu_kms->pdev,
-                                                            "vbif_nrt");
        }
 
-       dpu_kms->reg_dma = msm_ioremap(dpu_kms->pdev, "regdma", "regdma");
+       dpu_kms->reg_dma = msm_ioremap_quiet(dpu_kms->pdev, "regdma", "regdma");
        if (IS_ERR(dpu_kms->reg_dma)) {
                dpu_kms->reg_dma = NULL;
                DPU_DEBUG("REG_DMA is not defined");
-       } else {
-               dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma");
        }
 
        pm_runtime_get_sync(&dpu_kms->pdev->dev);
@@ -1025,11 +1005,24 @@ static int dpu_bind(struct device *dev, struct device *master, void *data)
        if (!dpu_kms)
                return -ENOMEM;
 
+       dpu_kms->opp_table = dev_pm_opp_set_clkname(dev, "core");
+       if (IS_ERR(dpu_kms->opp_table))
+               return PTR_ERR(dpu_kms->opp_table);
+       /* OPP table is optional */
+       ret = dev_pm_opp_of_add_table(dev);
+       if (!ret) {
+               dpu_kms->has_opp_table = true;
+       } else if (ret != -ENODEV) {
+               dev_err(dev, "invalid OPP table in device tree\n");
+               dev_pm_opp_put_clkname(dpu_kms->opp_table);
+               return ret;
+       }
+
        mp = &dpu_kms->mp;
        ret = msm_dss_parse_clock(pdev, mp);
        if (ret) {
                DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
-               return ret;
+               goto err;
        }
 
        platform_set_drvdata(pdev, dpu_kms);
@@ -1043,6 +1036,11 @@ static int dpu_bind(struct device *dev, struct device *master, void *data)
 
        priv->kms = &dpu_kms->base;
        return ret;
+err:
+       if (dpu_kms->has_opp_table)
+               dev_pm_opp_of_remove_table(dev);
+       dev_pm_opp_put_clkname(dpu_kms->opp_table);
+       return ret;
 }
 
 static void dpu_unbind(struct device *dev, struct device *master, void *data)
@@ -1057,6 +1055,10 @@ static void dpu_unbind(struct device *dev, struct device *master, void *data)
 
        if (dpu_kms->rpm_enabled)
                pm_runtime_disable(&pdev->dev);
+
+       if (dpu_kms->has_opp_table)
+               dev_pm_opp_of_remove_table(dev);
+       dev_pm_opp_put_clkname(dpu_kms->opp_table);
 }
 
 static const struct component_ops dpu_ops = {
@@ -1082,6 +1084,8 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
        struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
        struct dss_module_power *mp = &dpu_kms->mp;
 
+       /* Drop the performance state vote */
+       dev_pm_opp_set_rate(dev, 0);
        rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
        if (rc)
                DPU_ERROR("clock disable failed rc:%d\n", rc);
@@ -1115,6 +1119,8 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
 
 static const struct dev_pm_ops dpu_pm_ops = {
        SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
 };
 
 static const struct of_device_id dpu_dt_match[] = {
index a3b122b..e140cd6 100644 (file)
@@ -100,7 +100,6 @@ struct dpu_kms {
 
        /* io/register spaces: */
        void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma;
-       unsigned long mmio_len, vbif_len[VBIF_MAX], reg_dma_len;
 
        struct regulator *vdd;
        struct regulator *mmagic;
@@ -128,6 +127,10 @@ struct dpu_kms {
 
        struct platform_device *pdev;
        bool rpm_enabled;
+
+       struct opp_table *opp_table;
+       bool has_opp_table;
+
        struct dss_module_power mp;
 
        /* reference count bandwidth requests, so we know when we can
index 80d3cfc..7d3fdbb 100644 (file)
 #define HW_REV                         0x0
 #define HW_INTR_STATUS                 0x0010
 
+#define UBWC_STATIC                    0x144
+#define UBWC_CTRL_2                    0x150
+#define UBWC_PREDICTION_MODE           0x154
+
 /* Max BW defined in KBps */
 #define MAX_BW                         6800000
 
@@ -23,65 +27,15 @@ struct dpu_irq_controller {
        struct irq_domain *domain;
 };
 
-struct dpu_hw_cfg {
-       u32 val;
-       u32 offset;
-};
-
-struct dpu_mdss_hw_init_handler {
-       u32 hw_rev;
-       u32 hw_reg_count;
-       struct dpu_hw_cfg* hw_cfg;
-};
-
 struct dpu_mdss {
        struct msm_mdss base;
        void __iomem *mmio;
-       unsigned long mmio_len;
        struct dss_module_power mp;
        struct dpu_irq_controller irq_controller;
        struct icc_path *path[2];
        u32 num_paths;
 };
 
-static struct dpu_hw_cfg hw_cfg[] = {
-    {
-       /* UBWC global settings */
-       .val = 0x1E,
-       .offset = 0x144,
-    }
-};
-
-static struct dpu_mdss_hw_init_handler cfg_handler[] = {
-    { .hw_rev = DPU_HW_VER_620,
-      .hw_reg_count = ARRAY_SIZE(hw_cfg),
-      .hw_cfg = hw_cfg
-    },
-};
-
-static void dpu_mdss_hw_init(struct dpu_mdss *dpu_mdss, u32 hw_rev)
-{
-       int i;
-       u32 count = 0;
-       struct dpu_hw_cfg *hw_cfg = NULL;
-
-       for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) {
-               if (cfg_handler[i].hw_rev == hw_rev) {
-                       hw_cfg = cfg_handler[i].hw_cfg;
-                       count = cfg_handler[i].hw_reg_count;
-                       break;
-           }
-       }
-
-       for (i = 0; i < count; i++ ) {
-               writel_relaxed(hw_cfg->val,
-                       dpu_mdss->mmio + hw_cfg->offset);
-               hw_cfg++;
-       }
-
-    return;
-}
-
 static int dpu_mdss_parse_data_bus_icc_path(struct drm_device *dev,
                                                struct dpu_mdss *dpu_mdss)
 {
@@ -224,7 +178,6 @@ static int dpu_mdss_enable(struct msm_mdss *mdss)
        struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
        struct dss_module_power *mp = &dpu_mdss->mp;
        int ret;
-       u32 mdss_rev;
 
        dpu_mdss_icc_request_bw(mdss);
 
@@ -234,8 +187,25 @@ static int dpu_mdss_enable(struct msm_mdss *mdss)
                return ret;
        }
 
-       mdss_rev = readl_relaxed(dpu_mdss->mmio + HW_REV);
-       dpu_mdss_hw_init(dpu_mdss, mdss_rev);
+       /*
+        * ubwc config is part of the "mdss" region which is not accessible
+        * from the rest of the driver. hardcode known configurations here
+        */
+       switch (readl_relaxed(dpu_mdss->mmio + HW_REV)) {
+       case DPU_HW_VER_500:
+       case DPU_HW_VER_501:
+               writel_relaxed(0x420, dpu_mdss->mmio + UBWC_STATIC);
+               break;
+       case DPU_HW_VER_600:
+               /* TODO: 0x102e for LP_DDR4 */
+               writel_relaxed(0x103e, dpu_mdss->mmio + UBWC_STATIC);
+               writel_relaxed(2, dpu_mdss->mmio + UBWC_CTRL_2);
+               writel_relaxed(1, dpu_mdss->mmio + UBWC_PREDICTION_MODE);
+               break;
+       case DPU_HW_VER_620:
+               writel_relaxed(0x1e, dpu_mdss->mmio + UBWC_STATIC);
+               break;
+       }
 
        return ret;
 }
@@ -292,7 +262,6 @@ int dpu_mdss_init(struct drm_device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev->dev);
        struct msm_drm_private *priv = dev->dev_private;
-       struct resource *res;
        struct dpu_mdss *dpu_mdss;
        struct dss_module_power *mp;
        int ret = 0;
@@ -308,13 +277,6 @@ int dpu_mdss_init(struct drm_device *dev)
 
        DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mdss");
-       if (!res) {
-               DRM_ERROR("failed to get memory resource for mdss\n");
-               return -ENOMEM;
-       }
-       dpu_mdss->mmio_len = resource_size(res);
-
        ret = dpu_mdss_parse_data_bus_icc_path(dev, dpu_mdss);
        if (ret)
                return ret;
index 3b9c33e..33f6c56 100644 (file)
@@ -153,7 +153,7 @@ static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
 
        pdpu = to_dpu_plane(plane);
        pstate = to_dpu_plane_state(plane->state);
-       fixed_buff_size = pdpu->pipe_sblk->common->pixel_ram_size;
+       fixed_buff_size = pdpu->catalog->caps->pixel_ram_size;
 
        list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) {
                if (!tmp->base.state->visible)
@@ -709,7 +709,7 @@ int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane)
                 * So we cannot support more than half of the supported SSPP
                 * width for tiled formats.
                 */
-               width_threshold = dpu_plane[i]->pipe_sblk->common->maxlinewidth;
+               width_threshold = dpu_plane[i]->catalog->caps->max_linewidth;
                if (has_tiled_rect)
                        width_threshold /= 2;
 
@@ -887,7 +887,7 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
        fb_rect.x2 = state->fb->width;
        fb_rect.y2 = state->fb->height;
 
-       max_linewidth = pdpu->pipe_sblk->common->maxlinewidth;
+       max_linewidth = pdpu->catalog->caps->max_linewidth;
 
        fmt = to_dpu_format(msm_framebuffer_format(state->fb));
 
index 4b36b89..f1d1de5 100644 (file)
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml                 (    676 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml            (  37411 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml             (  37239 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml           (  41799 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml                 (    676 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml            (  37411 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml             (  42301 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml           (  41874 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
index 19291d7..dbf8d42 100644 (file)
@@ -514,7 +514,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
                        config->iommu);
 
                aspace  = msm_gem_address_space_create(mmu,
-                       "mdp4", 0x1000, 0xffffffff);
+                       "mdp4", 0x1000, 0x100000000 - 0x1000);
 
                if (IS_ERR(aspace)) {
                        if (!IS_ERR(mmu))
index 784d989..4cf0953 100644 (file)
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml                 (    676 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml            (  37411 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml             (  37239 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml           (  41799 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml                 (    676 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml            (  37411 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml             (  42301 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml           (  41874 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
index 25a13a2..df10c1a 100644 (file)
@@ -910,6 +910,202 @@ static const struct mdp5_cfg_hw msm8998_config = {
        .max_clk = 412500000,
 };
 
+static const struct mdp5_cfg_hw sdm630_config = {
+       .name = "sdm630",
+       .mdp = {
+               .count = 1,
+               .caps = MDP_CAP_CDM |
+                       MDP_CAP_SRC_SPLIT |
+                       0,
+       },
+       .ctl = {
+               .count = 5,
+               .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
+               .flush_hw_mask = 0xf4ffffff,
+       },
+       .pipe_vig = {
+               .count = 1,
+               .base = { 0x04000 },
+               .caps = MDP_PIPE_CAP_HFLIP      |
+                       MDP_PIPE_CAP_VFLIP      |
+                       MDP_PIPE_CAP_SCALE      |
+                       MDP_PIPE_CAP_CSC        |
+                       MDP_PIPE_CAP_DECIMATION |
+                       MDP_PIPE_CAP_SW_PIX_EXT |
+                       0,
+       },
+       .pipe_rgb = {
+               .count = 4,
+               .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
+               .caps = MDP_PIPE_CAP_HFLIP      |
+                       MDP_PIPE_CAP_VFLIP      |
+                       MDP_PIPE_CAP_SCALE      |
+                       MDP_PIPE_CAP_DECIMATION |
+                       MDP_PIPE_CAP_SW_PIX_EXT |
+                       0,
+       },
+       .pipe_dma = {
+               .count = 2, /* driver supports max of 2 currently */
+               .base = { 0x24000, 0x26000, 0x28000 },
+               .caps = MDP_PIPE_CAP_HFLIP      |
+                       MDP_PIPE_CAP_VFLIP      |
+                       MDP_PIPE_CAP_SW_PIX_EXT |
+                       0,
+       },
+       .pipe_cursor = {
+               .count = 1,
+               .base = { 0x34000 },
+               .caps = MDP_PIPE_CAP_HFLIP      |
+                       MDP_PIPE_CAP_VFLIP      |
+                       MDP_PIPE_CAP_SW_PIX_EXT |
+                       MDP_PIPE_CAP_CURSOR     |
+                       0,
+       },
+
+       .lm = {
+               .count = 2,
+               .base = { 0x44000, 0x46000 },
+               .instances = {
+                               { .id = 0, .pp = 0, .dspp = 0,
+                                 .caps = MDP_LM_CAP_DISPLAY |
+                                         MDP_LM_CAP_PAIR, },
+                               { .id = 1, .pp = 1, .dspp = -1,
+                                 .caps = MDP_LM_CAP_WB, },
+                               },
+               .nb_stages = 8,
+               .max_width = 2048,
+               .max_height = 0xFFFF,
+       },
+       .dspp = {
+               .count = 1,
+               .base = { 0x54000 },
+       },
+       .ad = {
+               .count = 2,
+               .base = { 0x78000, 0x78800 },
+       },
+       .pp = {
+               .count = 3,
+               .base = { 0x70000, 0x71000, 0x72000 },
+       },
+       .cdm = {
+               .count = 1,
+               .base = { 0x79200 },
+       },
+       .intf = {
+               .base = { 0x6a000, 0x6a800 },
+               .connect = {
+                       [0] = INTF_DISABLED,
+                       [1] = INTF_DSI,
+               },
+       },
+       .max_clk = 412500000,
+};
+
+static const struct mdp5_cfg_hw sdm660_config = {
+       .name = "sdm660",
+       .mdp = {
+               .count = 1,
+               .caps = MDP_CAP_DSC |
+                       MDP_CAP_CDM |
+                       MDP_CAP_SRC_SPLIT |
+                       0,
+       },
+       .ctl = {
+               .count = 5,
+               .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
+               .flush_hw_mask = 0xf4ffffff,
+       },
+       .pipe_vig = {
+               .count = 2,
+               .base = { 0x04000, 0x6000 },
+               .caps = MDP_PIPE_CAP_HFLIP      |
+                       MDP_PIPE_CAP_VFLIP      |
+                       MDP_PIPE_CAP_SCALE      |
+                       MDP_PIPE_CAP_CSC        |
+                       MDP_PIPE_CAP_DECIMATION |
+                       MDP_PIPE_CAP_SW_PIX_EXT |
+                       0,
+       },
+       .pipe_rgb = {
+               .count = 4,
+               .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
+               .caps = MDP_PIPE_CAP_HFLIP      |
+                       MDP_PIPE_CAP_VFLIP      |
+                       MDP_PIPE_CAP_SCALE      |
+                       MDP_PIPE_CAP_DECIMATION |
+                       MDP_PIPE_CAP_SW_PIX_EXT |
+                       0,
+       },
+       .pipe_dma = {
+               .count = 2, /* driver supports max of 2 currently */
+               .base = { 0x24000, 0x26000, 0x28000 },
+               .caps = MDP_PIPE_CAP_HFLIP      |
+                       MDP_PIPE_CAP_VFLIP      |
+                       MDP_PIPE_CAP_SW_PIX_EXT |
+                       0,
+       },
+       .pipe_cursor = {
+               .count = 1,
+               .base = { 0x34000 },
+               .caps = MDP_PIPE_CAP_HFLIP      |
+                       MDP_PIPE_CAP_VFLIP      |
+                       MDP_PIPE_CAP_SW_PIX_EXT |
+                       MDP_PIPE_CAP_CURSOR     |
+                       0,
+       },
+
+       .lm = {
+               .count = 4,
+               .base = { 0x44000, 0x45000, 0x46000, 0x49000 },
+               .instances = {
+                               { .id = 0, .pp = 0, .dspp = 0,
+                                 .caps = MDP_LM_CAP_DISPLAY |
+                                         MDP_LM_CAP_PAIR, },
+                               { .id = 1, .pp = 1, .dspp = 1,
+                                 .caps = MDP_LM_CAP_DISPLAY, },
+                               { .id = 2, .pp = 2, .dspp = -1,
+                                 .caps = MDP_LM_CAP_DISPLAY |
+                                         MDP_LM_CAP_PAIR, },
+                               { .id = 3, .pp = 3, .dspp = -1,
+                                 .caps = MDP_LM_CAP_WB, },
+                               },
+               .nb_stages = 8,
+               .max_width = 2560,
+               .max_height = 0xFFFF,
+       },
+       .dspp = {
+               .count = 2,
+               .base = { 0x54000, 0x56000 },
+       },
+       .ad = {
+               .count = 2,
+               .base = { 0x78000, 0x78800 },
+       },
+       .pp = {
+               .count = 5,
+               .base = { 0x70000, 0x70800, 0x71000, 0x71800, 0x72000 },
+       },
+       .cdm = {
+               .count = 1,
+               .base = { 0x79200 },
+       },
+       .dsc = {
+               .count = 2,
+               .base = { 0x80000, 0x80400 },
+       },
+       .intf = {
+               .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800 },
+               .connect = {
+                       [0] = INTF_DISABLED,
+                       [1] = INTF_DSI,
+                       [2] = INTF_DSI,
+                       [3] = INTF_HDMI,
+               },
+       },
+       .max_clk = 412500000,
+};
+
 static const struct mdp5_cfg_handler cfg_handlers_v1[] = {
        { .revision = 0, .config = { .hw = &msm8x74v1_config } },
        { .revision = 2, .config = { .hw = &msm8x74v2_config } },
@@ -924,6 +1120,8 @@ static const struct mdp5_cfg_handler cfg_handlers_v1[] = {
 
 static const struct mdp5_cfg_handler cfg_handlers_v3[] = {
        { .revision = 0, .config = { .hw = &msm8998_config } },
+       { .revision = 2, .config = { .hw = &sdm660_config } },
+       { .revision = 3, .config = { .hw = &sdm630_config } },
 };
 
 static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
index 19ec486..e193865 100644 (file)
@@ -633,7 +633,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
                mmu = msm_iommu_new(iommu_dev, config->platform.iommu);
 
                aspace = msm_gem_address_space_create(mmu, "mdp5",
-                       0x1000, 0xffffffff);
+                       0x1000, 0x100000000 - 0x1000);
 
                if (IS_ERR(aspace)) {
                        if (!IS_ERR(mmu))
index d420c80..4f51bea 100644 (file)
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml                 (    676 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml            (  37411 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml             (  37239 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml           (  41799 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml                 (    676 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml            (  37411 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml             (  42301 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml           (  41874 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
index 55ea4bc..6270488 100644 (file)
@@ -161,6 +161,8 @@ static const struct of_device_id dt_match[] = {
 
 static const struct dev_pm_ops dsi_pm_ops = {
        SET_RUNTIME_PM_OPS(msm_dsi_runtime_suspend, msm_dsi_runtime_resume, NULL)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
 };
 
 static struct platform_driver dsi_driver = {
index 21f489a..8e536e0 100644 (file)
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml                 (    676 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml            (  37411 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml             (  37239 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml           (  41799 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml                 (    676 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml            (  37411 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml             (  42301 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml           (  41874 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
@@ -148,7 +148,31 @@ static inline uint32_t DSI_6G_HW_VERSION_STEP(uint32_t val)
 #define DSI_STATUS0_INTERLEAVE_OP_CONTENTION                   0x80000000
 
 #define REG_DSI_FIFO_STATUS                                    0x00000008
+#define DSI_FIFO_STATUS_VIDEO_MDP_FIFO_OVERFLOW                        0x00000001
+#define DSI_FIFO_STATUS_VIDEO_MDP_FIFO_UNDERFLOW               0x00000008
 #define DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW                 0x00000080
+#define DSI_FIFO_STATUS_CMD_DMA_FIFO_RD_WATERMARK_REACH                0x00000100
+#define DSI_FIFO_STATUS_CMD_DMA_FIFO_WR_WATERMARK_REACH                0x00000200
+#define DSI_FIFO_STATUS_CMD_DMA_FIFO_UNDERFLOW                 0x00000400
+#define DSI_FIFO_STATUS_DLN0_LP_FIFO_EMPTY                     0x00001000
+#define DSI_FIFO_STATUS_DLN0_LP_FIFO_FULL                      0x00002000
+#define DSI_FIFO_STATUS_DLN0_LP_FIFO_OVERFLOW                  0x00004000
+#define DSI_FIFO_STATUS_DLN0_HS_FIFO_EMPTY                     0x00010000
+#define DSI_FIFO_STATUS_DLN0_HS_FIFO_FULL                      0x00020000
+#define DSI_FIFO_STATUS_DLN0_HS_FIFO_OVERFLOW                  0x00040000
+#define DSI_FIFO_STATUS_DLN0_HS_FIFO_UNDERFLOW                 0x00080000
+#define DSI_FIFO_STATUS_DLN1_HS_FIFO_EMPTY                     0x00100000
+#define DSI_FIFO_STATUS_DLN1_HS_FIFO_FULL                      0x00200000
+#define DSI_FIFO_STATUS_DLN1_HS_FIFO_OVERFLOW                  0x00400000
+#define DSI_FIFO_STATUS_DLN1_HS_FIFO_UNDERFLOW                 0x00800000
+#define DSI_FIFO_STATUS_DLN2_HS_FIFO_EMPTY                     0x01000000
+#define DSI_FIFO_STATUS_DLN2_HS_FIFO_FULL                      0x02000000
+#define DSI_FIFO_STATUS_DLN2_HS_FIFO_OVERFLOW                  0x04000000
+#define DSI_FIFO_STATUS_DLN2_HS_FIFO_UNDERFLOW                 0x08000000
+#define DSI_FIFO_STATUS_DLN3_HS_FIFO_EMPTY                     0x10000000
+#define DSI_FIFO_STATUS_DLN3_HS_FIFO_FULL                      0x20000000
+#define DSI_FIFO_STATUS_DLN3_HS_FIFO_OVERFLOW                  0x40000000
+#define DSI_FIFO_STATUS_DLN3_HS_FIFO_UNDERFLOW                 0x80000000
 
 #define REG_DSI_VID_CFG0                                       0x0000000c
 #define DSI_VID_CFG0_VIRT_CHANNEL__MASK                                0x00000003
@@ -318,38 +342,72 @@ static inline uint32_t DSI_CMD_CFG1_WR_MEM_CONTINUE(uint32_t val)
 
 #define REG_DSI_DMA_LEN                                                0x00000048
 
-#define REG_DSI_CMD_MDP_STREAM_CTRL                            0x00000054
-#define DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__MASK                        0x0000003f
-#define DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__SHIFT               0
-static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(uint32_t val)
+#define REG_DSI_CMD_MDP_STREAM0_CTRL                           0x00000054
+#define DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE__MASK               0x0000003f
+#define DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE__SHIFT              0
+static inline uint32_t DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE(uint32_t val)
 {
-       return ((val) << DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__MASK;
+       return ((val) << DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE__MASK;
 }
-#define DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__MASK          0x00000300
-#define DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__SHIFT         8
-static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(uint32_t val)
+#define DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL__MASK         0x00000300
+#define DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL__SHIFT                8
+static inline uint32_t DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL(uint32_t val)
 {
-       return ((val) << DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__MASK;
+       return ((val) << DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL__MASK;
 }
-#define DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__MASK               0xffff0000
-#define DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__SHIFT              16
-static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(uint32_t val)
+#define DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT__MASK              0xffff0000
+#define DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT__SHIFT             16
+static inline uint32_t DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT(uint32_t val)
 {
-       return ((val) << DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__MASK;
+       return ((val) << DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT__MASK;
 }
 
-#define REG_DSI_CMD_MDP_STREAM_TOTAL                           0x00000058
-#define DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__MASK                 0x00000fff
-#define DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__SHIFT                        0
-static inline uint32_t DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(uint32_t val)
+#define REG_DSI_CMD_MDP_STREAM0_TOTAL                          0x00000058
+#define DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL__MASK                        0x00000fff
+#define DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL__SHIFT               0
+static inline uint32_t DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL(uint32_t val)
 {
-       return ((val) << DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__MASK;
+       return ((val) << DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL__MASK;
 }
-#define DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__MASK                 0x0fff0000
-#define DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__SHIFT                        16
-static inline uint32_t DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(uint32_t val)
+#define DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL__MASK                        0x0fff0000
+#define DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL__SHIFT               16
+static inline uint32_t DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL(uint32_t val)
 {
-       return ((val) << DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__MASK;
+       return ((val) << DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL__MASK;
+}
+
+#define REG_DSI_CMD_MDP_STREAM1_CTRL                           0x0000005c
+#define DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE__MASK               0x0000003f
+#define DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE__SHIFT              0
+static inline uint32_t DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE(uint32_t val)
+{
+       return ((val) << DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE__MASK;
+}
+#define DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL__MASK         0x00000300
+#define DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL__SHIFT                8
+static inline uint32_t DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL(uint32_t val)
+{
+       return ((val) << DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL__MASK;
+}
+#define DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT__MASK              0xffff0000
+#define DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT__SHIFT             16
+static inline uint32_t DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT(uint32_t val)
+{
+       return ((val) << DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT__MASK;
+}
+
+#define REG_DSI_CMD_MDP_STREAM1_TOTAL                          0x00000060
+#define DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL__MASK                        0x0000ffff
+#define DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL__SHIFT               0
+static inline uint32_t DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL(uint32_t val)
+{
+       return ((val) << DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL__MASK;
+}
+#define DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL__MASK                        0xffff0000
+#define DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL__SHIFT               16
+static inline uint32_t DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL(uint32_t val)
+{
+       return ((val) << DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL__MASK;
 }
 
 #define REG_DSI_ACK_ERR_STATUS                                 0x00000064
@@ -389,6 +447,35 @@ static inline uint32_t DSI_TRIG_CTRL_STREAM(uint32_t val)
 #define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0               0x00001000
 #define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1               0x00010000
 
+#define REG_DSI_LP_TIMER_CTRL                                  0x000000b4
+#define DSI_LP_TIMER_CTRL_LP_RX_TO__MASK                       0x0000ffff
+#define DSI_LP_TIMER_CTRL_LP_RX_TO__SHIFT                      0
+static inline uint32_t DSI_LP_TIMER_CTRL_LP_RX_TO(uint32_t val)
+{
+       return ((val) << DSI_LP_TIMER_CTRL_LP_RX_TO__SHIFT) & DSI_LP_TIMER_CTRL_LP_RX_TO__MASK;
+}
+#define DSI_LP_TIMER_CTRL_BTA_TO__MASK                         0xffff0000
+#define DSI_LP_TIMER_CTRL_BTA_TO__SHIFT                                16
+static inline uint32_t DSI_LP_TIMER_CTRL_BTA_TO(uint32_t val)
+{
+       return ((val) << DSI_LP_TIMER_CTRL_BTA_TO__SHIFT) & DSI_LP_TIMER_CTRL_BTA_TO__MASK;
+}
+
+#define REG_DSI_HS_TIMER_CTRL                                  0x000000b8
+#define DSI_HS_TIMER_CTRL_HS_TX_TO__MASK                       0x0000ffff
+#define DSI_HS_TIMER_CTRL_HS_TX_TO__SHIFT                      0
+static inline uint32_t DSI_HS_TIMER_CTRL_HS_TX_TO(uint32_t val)
+{
+       return ((val) << DSI_HS_TIMER_CTRL_HS_TX_TO__SHIFT) & DSI_HS_TIMER_CTRL_HS_TX_TO__MASK;
+}
+#define DSI_HS_TIMER_CTRL_TIMER_RESOLUTION__MASK               0x000f0000
+#define DSI_HS_TIMER_CTRL_TIMER_RESOLUTION__SHIFT              16
+static inline uint32_t DSI_HS_TIMER_CTRL_TIMER_RESOLUTION(uint32_t val)
+{
+       return ((val) << DSI_HS_TIMER_CTRL_TIMER_RESOLUTION__SHIFT) & DSI_HS_TIMER_CTRL_TIMER_RESOLUTION__MASK;
+}
+#define DSI_HS_TIMER_CTRL_HS_TX_TO_STOP_EN                     0x10000000
+
 #define REG_DSI_TIMEOUT_STATUS                                 0x000000bc
 
 #define REG_DSI_CLKOUT_TIMING_CTRL                             0x000000c0
@@ -409,6 +496,19 @@ static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
 #define DSI_EOT_PACKET_CTRL_TX_EOT_APPEND                      0x00000001
 #define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE                      0x00000010
 
+#define REG_DSI_LANE_STATUS                                    0x000000a4
+#define DSI_LANE_STATUS_DLN0_STOPSTATE                         0x00000001
+#define DSI_LANE_STATUS_DLN1_STOPSTATE                         0x00000002
+#define DSI_LANE_STATUS_DLN2_STOPSTATE                         0x00000004
+#define DSI_LANE_STATUS_DLN3_STOPSTATE                         0x00000008
+#define DSI_LANE_STATUS_CLKLN_STOPSTATE                                0x00000010
+#define DSI_LANE_STATUS_DLN0_ULPS_ACTIVE_NOT                   0x00000100
+#define DSI_LANE_STATUS_DLN1_ULPS_ACTIVE_NOT                   0x00000200
+#define DSI_LANE_STATUS_DLN2_ULPS_ACTIVE_NOT                   0x00000400
+#define DSI_LANE_STATUS_DLN3_ULPS_ACTIVE_NOT                   0x00000800
+#define DSI_LANE_STATUS_CLKLN_ULPS_ACTIVE_NOT                  0x00001000
+#define DSI_LANE_STATUS_DLN0_DIRECTION                         0x00010000
+
 #define REG_DSI_LANE_CTRL                                      0x000000a8
 #define DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST                   0x10000000
 
@@ -436,6 +536,21 @@ static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val)
 #define DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK                    0x00000200
 
 #define REG_DSI_CLK_STATUS                                     0x0000011c
+#define DSI_CLK_STATUS_DSI_AON_AHBM_HCLK_ACTIVE                        0x00000001
+#define DSI_CLK_STATUS_DSI_DYN_AHBM_HCLK_ACTIVE                        0x00000002
+#define DSI_CLK_STATUS_DSI_AON_AHBS_HCLK_ACTIVE                        0x00000004
+#define DSI_CLK_STATUS_DSI_DYN_AHBS_HCLK_ACTIVE                        0x00000008
+#define DSI_CLK_STATUS_DSI_AON_DSICLK_ACTIVE                   0x00000010
+#define DSI_CLK_STATUS_DSI_DYN_DSICLK_ACTIVE                   0x00000020
+#define DSI_CLK_STATUS_DSI_AON_BYTECLK_ACTIVE                  0x00000040
+#define DSI_CLK_STATUS_DSI_DYN_BYTECLK_ACTIVE                  0x00000080
+#define DSI_CLK_STATUS_DSI_AON_ESCCLK_ACTIVE                   0x00000100
+#define DSI_CLK_STATUS_DSI_AON_PCLK_ACTIVE                     0x00000200
+#define DSI_CLK_STATUS_DSI_DYN_PCLK_ACTIVE                     0x00000400
+#define DSI_CLK_STATUS_DSI_DYN_CMD_PCLK_ACTIVE                 0x00001000
+#define DSI_CLK_STATUS_DSI_CMD_PCLK_ACTIVE                     0x00002000
+#define DSI_CLK_STATUS_DSI_VID_PCLK_ACTIVE                     0x00004000
+#define DSI_CLK_STATUS_DSI_CAM_BIST_PCLK_ACT                   0x00008000
 #define DSI_CLK_STATUS_PLL_UNLOCKED                            0x00010000
 
 #define REG_DSI_PHY_RESET                                      0x00000128
@@ -444,6 +559,51 @@ static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val)
 #define REG_DSI_T_CLK_PRE_EXTEND                               0x0000017c
 #define DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK                  0x00000001
 
+#define REG_DSI_CMD_MODE_MDP_CTRL2                             0x000001b4
+#define DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2__MASK               0x0000000f
+#define DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2__SHIFT              0
+static inline uint32_t DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2(enum dsi_cmd_dst_format val)
+{
+       return ((val) << DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2__SHIFT) & DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2__MASK;
+}
+#define DSI_CMD_MODE_MDP_CTRL2_R_SEL                           0x00000010
+#define DSI_CMD_MODE_MDP_CTRL2_G_SEL                           0x00000020
+#define DSI_CMD_MODE_MDP_CTRL2_B_SEL                           0x00000040
+#define DSI_CMD_MODE_MDP_CTRL2_BYTE_MSB_LSB_FLIP               0x00000080
+#define DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP__MASK                  0x00000700
+#define DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP__SHIFT                 8
+static inline uint32_t DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP(enum dsi_rgb_swap val)
+{
+       return ((val) << DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP__SHIFT) & DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP__MASK;
+}
+#define DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__MASK            0x00007000
+#define DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__SHIFT           12
+static inline uint32_t DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP(enum dsi_rgb_swap val)
+{
+       return ((val) << DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__SHIFT) & DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__MASK;
+}
+#define DSI_CMD_MODE_MDP_CTRL2_BURST_MODE                      0x00010000
+
+#define REG_DSI_CMD_MODE_MDP_STREAM2_CTRL                      0x000001b8
+#define DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__MASK          0x0000003f
+#define DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__SHIFT         0
+static inline uint32_t DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE(uint32_t val)
+{
+       return ((val) << DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__MASK;
+}
+#define DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL__MASK    0x00000300
+#define DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL__SHIFT   8
+static inline uint32_t DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL(uint32_t val)
+{
+       return ((val) << DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL__MASK;
+}
+#define DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT__MASK         0xffff0000
+#define DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT__SHIFT                16
+static inline uint32_t DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT(uint32_t val)
+{
+       return ((val) << DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT__MASK;
+}
+
 #define REG_DSI_RDBK_DATA_CTRL                                 0x000001d0
 #define DSI_RDBK_DATA_CTRL_COUNT__MASK                         0x00ff0000
 #define DSI_RDBK_DATA_CTRL_COUNT__SHIFT                                16
index 813d69d..f892f2c 100644 (file)
@@ -149,6 +149,25 @@ static const struct msm_dsi_config msm8998_dsi_cfg = {
        .num_dsi = 2,
 };
 
+static const char * const dsi_sdm660_bus_clk_names[] = {
+       "iface", "bus", "core", "core_mmss",
+};
+
+static const struct msm_dsi_config sdm660_dsi_cfg = {
+       .io_offset = DSI_6G_REG_SHIFT,
+       .reg_cfg = {
+               .num = 2,
+               .regs = {
+                       {"vdd", 73400, 32 },    /* 0.9 V */
+                       {"vdda", 12560, 4 },    /* 1.2 V */
+               },
+       },
+       .bus_clk_names = dsi_sdm660_bus_clk_names,
+       .num_bus_clks = ARRAY_SIZE(dsi_sdm660_bus_clk_names),
+       .io_start = { 0xc994000, 0xc996000 },
+       .num_dsi = 2,
+};
+
 static const char * const dsi_sdm845_bus_clk_names[] = {
        "iface", "bus",
 };
@@ -240,6 +259,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
                &msm8996_dsi_cfg, &msm_dsi_6g_host_ops},
        {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_2,
                &msm8976_dsi_cfg, &msm_dsi_6g_host_ops},
+       {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_1_0,
+               &sdm660_dsi_cfg, &msm_dsi_6g_v2_host_ops},
        {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_0,
                &msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops},
        {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
index 217e24a..efd469d 100644 (file)
@@ -18,6 +18,7 @@
 #define MSM_DSI_6G_VER_MINOR_V1_3_1    0x10030001
 #define MSM_DSI_6G_VER_MINOR_V1_4_1    0x10040001
 #define MSM_DSI_6G_VER_MINOR_V1_4_2    0x10040002
+#define MSM_DSI_6G_VER_MINOR_V2_1_0    0x20010000
 #define MSM_DSI_6G_VER_MINOR_V2_2_0    0x20000000
 #define MSM_DSI_6G_VER_MINOR_V2_2_1    0x20020001
 #define MSM_DSI_6G_VER_MINOR_V2_4_1    0x20040001
index 66ca0c0..b17ac6c 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/of_graph.h>
 #include <linux/of_irq.h>
 #include <linux/pinctrl/consumer.h>
+#include <linux/pm_opp.h>
 #include <linux/regmap.h>
 #include <linux/regulator/consumer.h>
 #include <linux/spinlock.h>
@@ -111,6 +112,9 @@ struct msm_dsi_host {
        struct clk *pixel_clk_src;
        struct clk *byte_intf_clk;
 
+       struct opp_table *opp_table;
+       bool has_opp_table;
+
        u32 byte_clk_rate;
        u32 pixel_clk_rate;
        u32 esc_clk_rate;
@@ -512,9 +516,10 @@ int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
        DBG("Set clk rates: pclk=%d, byteclk=%d",
                msm_host->mode->clock, msm_host->byte_clk_rate);
 
-       ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
+       ret = dev_pm_opp_set_rate(&msm_host->pdev->dev,
+                                 msm_host->byte_clk_rate);
        if (ret) {
-               pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
+               pr_err("%s: dev_pm_opp_set_rate failed %d\n", __func__, ret);
                return ret;
        }
 
@@ -658,6 +663,8 @@ error:
 
 void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host)
 {
+       /* Drop the performance state vote */
+       dev_pm_opp_set_rate(&msm_host->pdev->dev, 0);
        clk_disable_unprepare(msm_host->esc_clk);
        clk_disable_unprepare(msm_host->pixel_clk);
        if (msm_host->byte_intf_clk)
@@ -986,16 +993,16 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_dual_dsi)
                /* image data and 1 byte write_memory_start cmd */
                wc = hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
 
-               dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
-                       DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
-                       DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(
+               dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM0_CTRL,
+                       DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT(wc) |
+                       DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL(
                                        msm_host->channel) |
-                       DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(
+                       DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE(
                                        MIPI_DSI_DCS_LONG_WRITE));
 
-               dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
-                       DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(hdisplay) |
-                       DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
+               dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM0_TOTAL,
+                       DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL(hdisplay) |
+                       DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL(mode->vdisplay));
        }
 }
 
@@ -1879,6 +1886,19 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
                goto fail;
        }
 
+       msm_host->opp_table = dev_pm_opp_set_clkname(&pdev->dev, "byte");
+       if (IS_ERR(msm_host->opp_table))
+               return PTR_ERR(msm_host->opp_table);
+       /* OPP table is optional */
+       ret = dev_pm_opp_of_add_table(&pdev->dev);
+       if (!ret) {
+               msm_host->has_opp_table = true;
+       } else if (ret != -ENODEV) {
+               dev_err(&pdev->dev, "invalid OPP table in device tree\n");
+               dev_pm_opp_put_clkname(msm_host->opp_table);
+               return ret;
+       }
+
        init_completion(&msm_host->dma_comp);
        init_completion(&msm_host->video_comp);
        mutex_init(&msm_host->dev_mutex);
@@ -1914,6 +1934,9 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host)
        mutex_destroy(&msm_host->cmd_mutex);
        mutex_destroy(&msm_host->dev_mutex);
 
+       if (msm_host->has_opp_table)
+               dev_pm_opp_of_remove_table(&msm_host->pdev->dev);
+       dev_pm_opp_put_clkname(msm_host->opp_table);
        pm_runtime_disable(&msm_host->pdev->dev);
 }
 
index 8742653..4e8660c 100644 (file)
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml                 (    676 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml            (  37411 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml             (  37239 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml           (  41799 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml                 (    676 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml            (  37411 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml             (  42301 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml           (  41874 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
index f509ebd..009f5b8 100644 (file)
@@ -499,6 +499,8 @@ static const struct of_device_id dsi_phy_dt_match[] = {
 #ifdef CONFIG_DRM_MSM_DSI_14NM_PHY
        { .compatible = "qcom,dsi-phy-14nm",
          .data = &dsi_phy_14nm_cfgs },
+       { .compatible = "qcom,dsi-phy-14nm-660",
+         .data = &dsi_phy_14nm_660_cfgs },
 #endif
 #ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
        { .compatible = "qcom,dsi-phy-10nm",
index 24b294e..ef8672d 100644 (file)
@@ -45,6 +45,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
 extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
 extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
 extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs;
 extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs;
 extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs;
 
index 1594f14..5194005 100644 (file)
@@ -161,3 +161,21 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = {
        .io_start = { 0x994400, 0x996400 },
        .num_dsi_phy = 2,
 };
+
+const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
+       .type = MSM_DSI_PHY_14NM,
+       .src_pll_truthtable = { {false, false}, {true, false} },
+       .reg_cfg = {
+               .num = 1,
+               .regs = {
+                       {"vcca", 17000, 32},
+               },
+       },
+       .ops = {
+               .enable = dsi_14nm_phy_enable,
+               .disable = dsi_14nm_phy_disable,
+               .init = dsi_14nm_phy_init,
+       },
+       .io_start = { 0xc994400, 0xc996000 },
+       .num_dsi_phy = 2,
+};
index 07c48dd..a384922 100644 (file)
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml                 (    676 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml            (  37411 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml             (  37239 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml           (  41799 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml                 (    676 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml            (  37411 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml             (  42301 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml           (  41874 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
index 9cb6e6f..7aed6cf 100644 (file)
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml                 (    676 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml            (  37411 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml             (  37239 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml           (  41799 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml                 (    676 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml            (  37411 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml             (  42301 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml           (  41874 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
index 3eff3ea..72c95b6 100644 (file)
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml                 (    676 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml            (  37411 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml             (  37239 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml           (  41799 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml                 (    676 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml            (  37411 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml             (  42301 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml           (  41874 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
index 7717d42..85be1b1 100644 (file)
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml                 (    676 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml            (  37411 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml             (  37239 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml           (  41799 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml                 (    676 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml            (  37411 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml             (  42301 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml           (  41874 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
index c981cc1..36d98d4 100644 (file)
@@ -120,8 +120,8 @@ struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
        return clk;
 }
 
-void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
-               const char *dbgname)
+void __iomem *_msm_ioremap(struct platform_device *pdev, const char *name,
+                          const char *dbgname, bool quiet)
 {
        struct resource *res;
        unsigned long size;
@@ -133,7 +133,8 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
                res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
        if (!res) {
-               DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name);
+               if (!quiet)
+                       DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name);
                return ERR_PTR(-EINVAL);
        }
 
@@ -141,7 +142,8 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
 
        ptr = devm_ioremap(&pdev->dev, res->start, size);
        if (!ptr) {
-               DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
+               if (!quiet)
+                       DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -151,6 +153,18 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
        return ptr;
 }
 
+void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
+                         const char *dbgname)
+{
+       return _msm_ioremap(pdev, name, dbgname, false);
+}
+
+void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name,
+                               const char *dbgname)
+{
+       return _msm_ioremap(pdev, name, dbgname, true);
+}
+
 void msm_writel(u32 data, void __iomem *addr)
 {
        if (reglog)
@@ -238,10 +252,8 @@ static int msm_drm_uninit(struct device *dev)
 
        /* clean up event worker threads */
        for (i = 0; i < priv->num_crtcs; i++) {
-               if (priv->event_thread[i].thread) {
-                       kthread_destroy_worker(&priv->event_thread[i].worker);
-                       priv->event_thread[i].thread = NULL;
-               }
+               if (priv->event_thread[i].worker)
+                       kthread_destroy_worker(priv->event_thread[i].worker);
        }
 
        msm_gem_shrinker_cleanup(ddev);
@@ -504,19 +516,15 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
        for (i = 0; i < priv->num_crtcs; i++) {
                /* initialize event thread */
                priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
-               kthread_init_worker(&priv->event_thread[i].worker);
                priv->event_thread[i].dev = ddev;
-               priv->event_thread[i].thread =
-                       kthread_run(kthread_worker_fn,
-                               &priv->event_thread[i].worker,
-                               "crtc_event:%d", priv->event_thread[i].crtc_id);
-               if (IS_ERR(priv->event_thread[i].thread)) {
+               priv->event_thread[i].worker = kthread_create_worker(0,
+                       "crtc_event:%d", priv->event_thread[i].crtc_id);
+               if (IS_ERR(priv->event_thread[i].worker)) {
                        DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
-                       priv->event_thread[i].thread = NULL;
                        goto err_msm_uninit;
                }
 
-               ret = sched_setscheduler(priv->event_thread[i].thread,
+               ret = sched_setscheduler(priv->event_thread[i].worker->task,
                                         SCHED_FIFO, &param);
                if (ret)
                        dev_warn(dev, "event_thread set priority failed:%d\n",
@@ -1039,75 +1047,70 @@ static struct drm_driver msm_driver = {
        .patchlevel         = MSM_VERSION_PATCHLEVEL,
 };
 
-#ifdef CONFIG_PM_SLEEP
-static int msm_pm_suspend(struct device *dev)
+static int __maybe_unused msm_runtime_suspend(struct device *dev)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct msm_drm_private *priv = ddev->dev_private;
+       struct msm_mdss *mdss = priv->mdss;
 
-       if (WARN_ON(priv->pm_state))
-               drm_atomic_state_put(priv->pm_state);
+       DBG("");
 
-       priv->pm_state = drm_atomic_helper_suspend(ddev);
-       if (IS_ERR(priv->pm_state)) {
-               int ret = PTR_ERR(priv->pm_state);
-               DRM_ERROR("Failed to suspend dpu, %d\n", ret);
-               return ret;
-       }
+       if (mdss && mdss->funcs)
+               return mdss->funcs->disable(mdss);
 
        return 0;
 }
 
-static int msm_pm_resume(struct device *dev)
+static int __maybe_unused msm_runtime_resume(struct device *dev)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct msm_drm_private *priv = ddev->dev_private;
-       int ret;
+       struct msm_mdss *mdss = priv->mdss;
 
-       if (WARN_ON(!priv->pm_state))
-               return -ENOENT;
+       DBG("");
 
-       ret = drm_atomic_helper_resume(ddev, priv->pm_state);
-       if (!ret)
-               priv->pm_state = NULL;
+       if (mdss && mdss->funcs)
+               return mdss->funcs->enable(mdss);
 
-       return ret;
+       return 0;
 }
-#endif
 
-#ifdef CONFIG_PM
-static int msm_runtime_suspend(struct device *dev)
+static int __maybe_unused msm_pm_suspend(struct device *dev)
 {
-       struct drm_device *ddev = dev_get_drvdata(dev);
-       struct msm_drm_private *priv = ddev->dev_private;
-       struct msm_mdss *mdss = priv->mdss;
 
-       DBG("");
+       if (pm_runtime_suspended(dev))
+               return 0;
 
-       if (mdss && mdss->funcs)
-               return mdss->funcs->disable(mdss);
+       return msm_runtime_suspend(dev);
+}
 
-       return 0;
+static int __maybe_unused msm_pm_resume(struct device *dev)
+{
+       if (pm_runtime_suspended(dev))
+               return 0;
+
+       return msm_runtime_resume(dev);
 }
 
-static int msm_runtime_resume(struct device *dev)
+static int __maybe_unused msm_pm_prepare(struct device *dev)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
-       struct msm_drm_private *priv = ddev->dev_private;
-       struct msm_mdss *mdss = priv->mdss;
 
-       DBG("");
+       return drm_mode_config_helper_suspend(ddev);
+}
 
-       if (mdss && mdss->funcs)
-               return mdss->funcs->enable(mdss);
+static void __maybe_unused msm_pm_complete(struct device *dev)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
 
-       return 0;
+       drm_mode_config_helper_resume(ddev);
 }
-#endif
 
 static const struct dev_pm_ops msm_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
        SET_RUNTIME_PM_OPS(msm_runtime_suspend, msm_runtime_resume, NULL)
+       .prepare = msm_pm_prepare,
+       .complete = msm_pm_complete,
 };
 
 /*
index e2d6a60..af259b0 100644 (file)
@@ -129,9 +129,8 @@ struct msm_display_info {
 /* Commit/Event thread specific structure */
 struct msm_drm_thread {
        struct drm_device *dev;
-       struct task_struct *thread;
        unsigned int crtc_id;
-       struct kthread_worker worker;
+       struct kthread_worker *worker;
 };
 
 struct msm_drm_private {
@@ -411,6 +410,8 @@ struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
        const char *name);
 void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
                const char *dbgname);
+void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name,
+               const char *dbgname);
 void msm_writel(u32 data, void __iomem *addr);
 u32 msm_readl(const void __iomem *addr);
 
index 38b0c0e..b2f4915 100644 (file)
@@ -996,10 +996,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
 
 static int msm_gem_new_impl(struct drm_device *dev,
                uint32_t size, uint32_t flags,
-               struct drm_gem_object **obj,
-               bool struct_mutex_locked)
+               struct drm_gem_object **obj)
 {
-       struct msm_drm_private *priv = dev->dev_private;
        struct msm_gem_object *msm_obj;
 
        switch (flags & MSM_BO_CACHE_MASK) {
@@ -1025,15 +1023,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
        INIT_LIST_HEAD(&msm_obj->submit_entry);
        INIT_LIST_HEAD(&msm_obj->vmas);
 
-       if (struct_mutex_locked) {
-               WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-               list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
-       } else {
-               mutex_lock(&dev->struct_mutex);
-               list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
-               mutex_unlock(&dev->struct_mutex);
-       }
-
        *obj = &msm_obj->base;
 
        return 0;
@@ -1043,6 +1032,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
                uint32_t size, uint32_t flags, bool struct_mutex_locked)
 {
        struct msm_drm_private *priv = dev->dev_private;
+       struct msm_gem_object *msm_obj;
        struct drm_gem_object *obj = NULL;
        bool use_vram = false;
        int ret;
@@ -1063,14 +1053,15 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
        if (size == 0)
                return ERR_PTR(-EINVAL);
 
-       ret = msm_gem_new_impl(dev, size, flags, &obj, struct_mutex_locked);
+       ret = msm_gem_new_impl(dev, size, flags, &obj);
        if (ret)
                goto fail;
 
+       msm_obj = to_msm_bo(obj);
+
        if (use_vram) {
                struct msm_gem_vma *vma;
                struct page **pages;
-               struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
                mutex_lock(&msm_obj->lock);
 
@@ -1105,6 +1096,15 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
                mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
        }
 
+       if (struct_mutex_locked) {
+               WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+               list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+       } else {
+               mutex_lock(&dev->struct_mutex);
+               list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+               mutex_unlock(&dev->struct_mutex);
+       }
+
        return obj;
 
 fail:
@@ -1127,6 +1127,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
                struct dma_buf *dmabuf, struct sg_table *sgt)
 {
+       struct msm_drm_private *priv = dev->dev_private;
        struct msm_gem_object *msm_obj;
        struct drm_gem_object *obj;
        uint32_t size;
@@ -1140,7 +1141,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 
        size = PAGE_ALIGN(dmabuf->size);
 
-       ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj, false);
+       ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
        if (ret)
                goto fail;
 
@@ -1165,6 +1166,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
        }
 
        mutex_unlock(&msm_obj->lock);
+
+       mutex_lock(&dev->struct_mutex);
+       list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+       mutex_unlock(&dev->struct_mutex);
+
        return obj;
 
 fail:
index 86a1386..d564547 100644 (file)
@@ -13,7 +13,6 @@
 
 #include <generated/utsrelease.h>
 #include <linux/string_helpers.h>
-#include <linux/pm_opp.h>
 #include <linux/devfreq.h>
 #include <linux/devcoredump.h>
 #include <linux/sched/task.h>
@@ -34,7 +33,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
                return PTR_ERR(opp);
 
        if (gpu->funcs->gpu_set_freq)
-               gpu->funcs->gpu_set_freq(gpu, (u64)*freq);
+               gpu->funcs->gpu_set_freq(gpu, opp);
        else
                clk_set_rate(gpu->core_clk, *freq);
 
@@ -93,7 +92,11 @@ static void msm_devfreq_init(struct msm_gpu *gpu)
        /*
         * Don't set the freq_table or max_state and let devfreq build the table
         * from OPP
+        * After a deferred probe, these may have be left to non-zero values,
+        * so set them back to zero before creating the devfreq device
         */
+       msm_devfreq_profile.freq_table = NULL;
+       msm_devfreq_profile.max_state = 0;
 
        gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
                        &msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
index 429cb40..0db117a 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/clk.h>
 #include <linux/interconnect.h>
+#include <linux/pm_opp.h>
 #include <linux/regulator/consumer.h>
 
 #include "msm_drv.h"
@@ -61,7 +62,7 @@ struct msm_gpu_funcs {
        struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
        int (*gpu_state_put)(struct msm_gpu_state *state);
        unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
-       void (*gpu_set_freq)(struct msm_gpu *gpu, unsigned long freq);
+       void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp);
        struct msm_gem_address_space *(*create_address_space)
                (struct msm_gpu *gpu, struct platform_device *pdev);
 };
index 001fbf5..a1d94be 100644 (file)
@@ -71,8 +71,10 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
        queue->flags = flags;
 
        if (priv->gpu) {
-               if (prio >= priv->gpu->nr_rings)
+               if (prio >= priv->gpu->nr_rings) {
+                       kfree(queue);
                        return -EINVAL;
+               }
 
                queue->prio = prio;
        }
index 49e57fb..60586fb 100644 (file)
@@ -32,6 +32,13 @@ nouveau-y += nouveau_vga.o
 
 # DRM - memory management
 nouveau-y += nouveau_bo.o
+nouveau-y += nouveau_bo0039.o
+nouveau-y += nouveau_bo5039.o
+nouveau-y += nouveau_bo74c1.o
+nouveau-y += nouveau_bo85b5.o
+nouveau-y += nouveau_bo9039.o
+nouveau-y += nouveau_bo90b5.o
+nouveau-y += nouveau_boa0b5.o
 nouveau-y += nouveau_gem.o
 nouveau-$(CONFIG_DRM_NOUVEAU_SVM) += nouveau_svm.o
 nouveau-$(CONFIG_DRM_NOUVEAU_SVM) += nouveau_dmem.o
index d6e4ae1..5dec1e5 100644 (file)
@@ -76,6 +76,14 @@ config NOUVEAU_DEBUG_MMU
        help
          Say Y here if you want to enable verbose MMU debug output.
 
+config NOUVEAU_DEBUG_PUSH
+       bool "Enable additional push buffer debugging"
+       depends on DRM_NOUVEAU
+       default n
+       help
+         Say Y here if you want to enable verbose push buffer debug output
+         and sanity checks.
+
 config DRM_NOUVEAU_BACKLIGHT
        bool "Support for backlight control"
        depends on DRM_NOUVEAU
index 4989627..6416b69 100644 (file)
@@ -44,6 +44,8 @@
 #include <subdev/bios/pll.h>
 #include <subdev/clk.h>
 
+#include <nvif/push006c.h>
+
 #include <nvif/event.h>
 #include <nvif/cl0046.h>
 
@@ -759,7 +761,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
        nouveau_bo_unmap(nv_crtc->cursor.nvbo);
        nouveau_bo_unpin(nv_crtc->cursor.nvbo);
        nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
-       nvif_notify_fini(&nv_crtc->vblank);
+       nvif_notify_dtor(&nv_crtc->vblank);
        kfree(nv_crtc);
 }
 
@@ -1105,6 +1107,7 @@ nv04_page_flip_emit(struct nouveau_channel *chan,
        struct nouveau_fence_chan *fctx = chan->fence;
        struct nouveau_drm *drm = chan->drm;
        struct drm_device *dev = drm->dev;
+       struct nvif_push *push = chan->chan.push;
        unsigned long flags;
        int ret;
 
@@ -1119,13 +1122,12 @@ nv04_page_flip_emit(struct nouveau_channel *chan,
                goto fail;
 
        /* Emit the pageflip */
-       ret = RING_SPACE(chan, 2);
+       ret = PUSH_WAIT(push, 2);
        if (ret)
                goto fail;
 
-       BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
-       OUT_RING  (chan, 0x00000000);
-       FIRE_RING (chan);
+       PUSH_NVSQ(push, NV_SW, NV_SW_PAGE_FLIP, 0x00000000);
+       PUSH_KICK(push);
 
        ret = nouveau_fence_new(chan, false, pfence);
        if (ret)
@@ -1155,6 +1157,7 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        struct nouveau_cli *cli;
        struct nouveau_fence *fence;
        struct nv04_display *dispnv04 = nv04_display(dev);
+       struct nvif_push *push;
        int head = nouveau_crtc(crtc)->index;
        int ret;
 
@@ -1162,6 +1165,7 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        if (!chan)
                return -ENODEV;
        cli = (void *)chan->user.client;
+       push = chan->chan.push;
 
        s = kzalloc(sizeof(*s), GFP_KERNEL);
        if (!s)
@@ -1203,18 +1207,14 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 
        /* Emit a page flip */
        if (swap_interval) {
-               ret = RING_SPACE(chan, 8);
+               ret = PUSH_WAIT(push, 8);
                if (ret)
                        goto fail_unreserve;
 
-               BEGIN_NV04(chan, NvSubImageBlit, 0x012c, 1);
-               OUT_RING  (chan, 0);
-               BEGIN_NV04(chan, NvSubImageBlit, 0x0134, 1);
-               OUT_RING  (chan, head);
-               BEGIN_NV04(chan, NvSubImageBlit, 0x0100, 1);
-               OUT_RING  (chan, 0);
-               BEGIN_NV04(chan, NvSubImageBlit, 0x0130, 1);
-               OUT_RING  (chan, 0);
+               PUSH_NVSQ(push, NV05F, 0x012c, 0);
+               PUSH_NVSQ(push, NV05F, 0x0134, head);
+               PUSH_NVSQ(push, NV05F, 0x0100, 0);
+               PUSH_NVSQ(push, NV05F, 0x0130, 0);
        }
 
        nouveau_bo_ref(new_bo, &dispnv04->image[head]);
@@ -1351,7 +1351,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
 
        nv04_cursor_init(nv_crtc);
 
-       ret = nvif_notify_init(&disp->disp.object, nv04_crtc_vblank_handler,
+       ret = nvif_notify_ctor(&disp->disp.object, "kmsVbl", nv04_crtc_vblank_handler,
                               false, NV04_DISP_NTFY_VBLANK,
                               &(struct nvif_notify_head_req_v0) {
                                    .head = nv_crtc->index,
index 76be805..900ab69 100644 (file)
@@ -31,6 +31,7 @@
 #include "nouveau_connector.h"
 #include "nouveau_bo.h"
 #include "nouveau_gem.h"
+#include "nouveau_chan.h"
 
 #include <nvif/if0004.h>
 
@@ -178,7 +179,7 @@ nv04_display_destroy(struct drm_device *dev)
 
        nouveau_hw_save_vga_fonts(dev, 0);
 
-       nvif_notify_fini(&disp->flip);
+       nvif_notify_dtor(&disp->flip);
 
        nouveau_display(dev)->priv = NULL;
        kfree(disp);
@@ -214,8 +215,8 @@ nv04_display_create(struct drm_device *dev)
        dev->driver_features &= ~DRIVER_ATOMIC;
 
        /* Request page flip completion event. */
-       if (drm->nvsw.client) {
-               nvif_notify_init(&drm->nvsw, nv04_flip_complete,
+       if (drm->channel) {
+               nvif_notify_ctor(&drm->channel->nvsw, "kmsFlip", nv04_flip_complete,
                                 false, NV04_NVSW_NTFY_UEVENT,
                                 NULL, 0, 0, &disp->flip);
        }
index e7f14f2..085bd3a 100644 (file)
@@ -11,14 +11,10 @@ int base507c_acquire(struct nv50_wndw *, struct nv50_wndw_atom *,
                     struct nv50_head_atom *);
 void base507c_release(struct nv50_wndw *, struct nv50_wndw_atom *,
                      struct nv50_head_atom *);
-void base507c_sema_set(struct nv50_wndw *, struct nv50_wndw_atom *);
-void base507c_sema_clr(struct nv50_wndw *);
-void base507c_ntfy_set(struct nv50_wndw *, struct nv50_wndw_atom *);
-void base507c_ntfy_clr(struct nv50_wndw *);
-void base507c_xlut_set(struct nv50_wndw *, struct nv50_wndw_atom *);
-void base507c_xlut_clr(struct nv50_wndw *);
-void base507c_image_clr(struct nv50_wndw *);
-void base507c_update(struct nv50_wndw *, u32 *);
+int base507c_sema_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int base507c_sema_clr(struct nv50_wndw *);
+int base507c_xlut_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int base507c_xlut_clr(struct nv50_wndw *);
 
 int base827c_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
 
index ba20a77..302d4e6 100644 (file)
 
 #include <nvif/cl507c.h>
 #include <nvif/event.h>
+#include <nvif/push507c.h>
 #include <nvif/timer.h>
 
+#include <nvhw/class/cl507c.h>
+
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_fourcc.h>
 #include <drm/drm_plane_helper.h>
 
 #include "nouveau_bo.h"
 
-void
+int
 base507c_update(struct nv50_wndw *wndw, u32 *interlock)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 2))) {
-               evo_mthd(push, 0x0080, 1);
-               evo_data(push, interlock[NV50_DISP_INTERLOCK_CORE]);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV507C, UPDATE, interlock[NV50_DISP_INTERLOCK_CORE]);
+       return PUSH_KICK(push);
 }
 
-void
+int
 base507c_image_clr(struct nv50_wndw *wndw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 4))) {
-               evo_mthd(push, 0x0084, 1);
-               evo_data(push, 0x00000000);
-               evo_mthd(push, 0x00c0, 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 4)))
+               return ret;
+
+       PUSH_MTHD(push, NV507C, SET_PRESENT_CONTROL,
+                 NVDEF(NV507C, SET_PRESENT_CONTROL, BEGIN_MODE, NON_TEARING) |
+                 NVVAL(NV507C, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, 0));
+
+       PUSH_MTHD(push, NV507C, SET_CONTEXT_DMA_ISO, 0x00000000);
+       return 0;
 }
 
-static void
+static int
 base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 13))) {
-               evo_mthd(push, 0x0084, 1);
-               evo_data(push, asyw->image.mode << 8 |
-                              asyw->image.interval << 4);
-               evo_mthd(push, 0x00c0, 1);
-               evo_data(push, asyw->image.handle[0]);
-               if (asyw->image.format == 0xca) {
-                       evo_mthd(push, 0x0110, 2);
-                       evo_data(push, 1);
-                       evo_data(push, 0x6400);
-               } else {
-                       evo_mthd(push, 0x0110, 2);
-                       evo_data(push, 0);
-                       evo_data(push, 0);
-               }
-               evo_mthd(push, 0x0800, 5);
-               evo_data(push, asyw->image.offset[0] >> 8);
-               evo_data(push, 0x00000000);
-               evo_data(push, asyw->image.h << 16 | asyw->image.w);
-               evo_data(push, asyw->image.layout << 20 |
-                              (asyw->image.pitch[0] >> 8) << 8 |
-                              asyw->image.blocks[0] << 8 |
-                              asyw->image.blockh);
-               evo_data(push, asyw->image.kind << 16 |
-                              asyw->image.format << 8);
-               evo_kick(push, &wndw->wndw);
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 13)))
+               return ret;
+
+       PUSH_MTHD(push, NV507C, SET_PRESENT_CONTROL,
+                 NVVAL(NV507C, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+                 NVVAL(NV507C, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval));
+
+       PUSH_MTHD(push, NV507C, SET_CONTEXT_DMA_ISO, asyw->image.handle[0]);
+
+       if (asyw->image.format == NV507C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16) {
+               PUSH_MTHD(push, NV507C, SET_PROCESSING,
+                         NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, ENABLE),
+
+                                       SET_CONVERSION,
+                         NVVAL(NV507C, SET_CONVERSION, GAIN, 0) |
+                         NVVAL(NV507C, SET_CONVERSION, OFS, 0x64));
+       } else {
+               PUSH_MTHD(push, NV507C, SET_PROCESSING,
+                         NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE));
        }
+
+       PUSH_MTHD(push, NV507C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8);
+
+       PUSH_MTHD(push, NV507C, SURFACE_SET_SIZE(0),
+                 NVVAL(NV507C, SURFACE_SET_SIZE, WIDTH, asyw->image.w) |
+                 NVVAL(NV507C, SURFACE_SET_SIZE, HEIGHT, asyw->image.h),
+
+                               SURFACE_SET_STORAGE(0),
+                 NVVAL(NV507C, SURFACE_SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout) |
+                 NVVAL(NV507C, SURFACE_SET_STORAGE, PITCH, asyw->image.pitch[0] >> 8) |
+                 NVVAL(NV507C, SURFACE_SET_STORAGE, PITCH, asyw->image.blocks[0]) |
+                 NVVAL(NV507C, SURFACE_SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh),
+
+                               SURFACE_SET_PARAMS(0),
+                 NVVAL(NV507C, SURFACE_SET_PARAMS, FORMAT, asyw->image.format) |
+                 NVDEF(NV507C, SURFACE_SET_PARAMS, SUPER_SAMPLE, X1_AA) |
+                 NVDEF(NV507C, SURFACE_SET_PARAMS, GAMMA, LINEAR) |
+                 NVDEF(NV507C, SURFACE_SET_PARAMS, LAYOUT, FRM) |
+                 NVVAL(NV507C, SURFACE_SET_PARAMS, KIND, asyw->image.kind) |
+                 NVDEF(NV507C, SURFACE_SET_PARAMS, PART_STRIDE, PARTSTRIDE_256));
+       return 0;
 }
 
-void
+int
 base507c_xlut_clr(struct nv50_wndw *wndw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 2))) {
-               evo_mthd(push, 0x00e0, 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV507C, SET_BASE_LUT_LO,
+                 NVDEF(NV507C, SET_BASE_LUT_LO, ENABLE, DISABLE));
+       return 0;
 }
 
-void
+int
 base507c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 2))) {
-               evo_mthd(push, 0x00e0, 1);
-               evo_data(push, 0x40000000);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV507C, SET_BASE_LUT_LO,
+                 NVDEF(NV507C, SET_BASE_LUT_LO, ENABLE, USE_CORE_LUT));
+       return 0;
 }
 
 int
@@ -115,66 +146,77 @@ base507c_ntfy_wait_begun(struct nouveau_bo *bo, u32 offset,
                         struct nvif_device *device)
 {
        s64 time = nvif_msec(device, 2000ULL,
-               u32 data = nouveau_bo_rd32(bo, offset / 4);
-               if ((data & 0xc0000000) == 0x40000000)
+               if (NVBO_TD32(bo, offset, NV_DISP_BASE_NOTIFIER_1, _0, STATUS, ==, BEGUN))
                        break;
                usleep_range(1, 2);
        );
        return time < 0 ? time : 0;
 }
 
-void
+int
 base507c_ntfy_clr(struct nv50_wndw *wndw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 2))) {
-               evo_mthd(push, 0x00a4, 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV507C, SET_CONTEXT_DMA_NOTIFIER, 0x00000000);
+       return 0;
 }
 
-void
+int
 base507c_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 3))) {
-               evo_mthd(push, 0x00a0, 2);
-               evo_data(push, asyw->ntfy.awaken << 30 | asyw->ntfy.offset);
-               evo_data(push, asyw->ntfy.handle);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 3)))
+               return ret;
+
+       PUSH_MTHD(push, NV507C, SET_NOTIFIER_CONTROL,
+                 NVVAL(NV507C, SET_NOTIFIER_CONTROL, MODE, asyw->ntfy.awaken) |
+                 NVVAL(NV507C, SET_NOTIFIER_CONTROL, OFFSET, asyw->ntfy.offset >> 2),
+
+                               SET_CONTEXT_DMA_NOTIFIER, asyw->ntfy.handle);
+       return 0;
 }
 
 void
 base507c_ntfy_reset(struct nouveau_bo *bo, u32 offset)
 {
-       nouveau_bo_wr32(bo, offset / 4, 0x00000000);
+       NVBO_WR32(bo, offset, NV_DISP_BASE_NOTIFIER_1, _0,
+                       NVDEF(NV_DISP_BASE_NOTIFIER_1, _0, STATUS, NOT_BEGUN));
 }
 
-void
+int
 base507c_sema_clr(struct nv50_wndw *wndw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 2))) {
-               evo_mthd(push, 0x0094, 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV507C, SET_CONTEXT_DMA_SEMAPHORE, 0x00000000);
+       return 0;
 }
 
-void
+int
 base507c_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 5))) {
-               evo_mthd(push, 0x0088, 4);
-               evo_data(push, asyw->sema.offset);
-               evo_data(push, asyw->sema.acquire);
-               evo_data(push, asyw->sema.release);
-               evo_data(push, asyw->sema.handle);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 5)))
+               return ret;
+
+       PUSH_MTHD(push, NV507C, SET_SEMAPHORE_CONTROL, asyw->sema.offset,
+                               SET_SEMAPHORE_ACQUIRE, asyw->sema.acquire,
+                               SET_SEMAPHORE_RELEASE, asyw->sema.release,
+                               SET_CONTEXT_DMA_SEMAPHORE, asyw->sema.handle);
+       return 0;
 }
 
 void
@@ -282,8 +324,9 @@ base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
                return ret;
        }
 
-       ret = nvif_notify_init(&wndw->wndw.base.user, wndw->notify.func,
-                              false, NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT,
+       ret = nvif_notify_ctor(&wndw->wndw.base.user, "kmsBaseNtfy",
+                              wndw->notify.func, false,
+                              NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT,
                               &(struct nvif_notify_uevent_req) {},
                               sizeof(struct nvif_notify_uevent_req),
                               sizeof(struct nvif_notify_uevent_rep),
index f4c0594..18d3409 100644 (file)
  */
 #include "base.h"
 
-static void
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl827c.h>
+
+static int
 base827c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 13))) {
-               evo_mthd(push, 0x0084, 1);
-               evo_data(push, asyw->image.mode << 8 |
-                              asyw->image.interval << 4);
-               evo_mthd(push, 0x00c0, 1);
-               evo_data(push, asyw->image.handle[0]);
-               if (asyw->image.format == 0xca) {
-                       evo_mthd(push, 0x0110, 2);
-                       evo_data(push, 1);
-                       evo_data(push, 0x6400);
-               } else {
-                       evo_mthd(push, 0x0110, 2);
-                       evo_data(push, 0);
-                       evo_data(push, 0);
-               }
-               evo_mthd(push, 0x0800, 5);
-               evo_data(push, asyw->image.offset[0] >> 8);
-               evo_data(push, 0x00000000);
-               evo_data(push, asyw->image.h << 16 | asyw->image.w);
-               evo_data(push, asyw->image.layout << 20 |
-                              (asyw->image.pitch[0] >> 8) << 8 |
-                              asyw->image.blocks[0] << 8 |
-                              asyw->image.blockh);
-               evo_data(push, asyw->image.format << 8);
-               evo_kick(push, &wndw->wndw);
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 13)))
+               return ret;
+
+       PUSH_MTHD(push, NV827C, SET_PRESENT_CONTROL,
+                 NVVAL(NV827C, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+                 NVVAL(NV827C, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval));
+
+       PUSH_MTHD(push, NV827C, SET_CONTEXT_DMAS_ISO(0), asyw->image.handle, 1);
+
+       if (asyw->image.format == NV827C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16) {
+               PUSH_MTHD(push, NV827C, SET_PROCESSING,
+                         NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, ENABLE),
+
+                                       SET_CONVERSION,
+                         NVVAL(NV827C, SET_CONVERSION, GAIN, 0) |
+                         NVVAL(NV827C, SET_CONVERSION, OFS, 0x64));
+       } else {
+               PUSH_MTHD(push, NV827C, SET_PROCESSING,
+                         NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE));
        }
+
+       PUSH_MTHD(push, NV827C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8,
+                               SURFACE_SET_OFFSET(0, 1), 0x00000000,
+
+                               SURFACE_SET_SIZE(0),
+                 NVVAL(NV827C, SURFACE_SET_SIZE, WIDTH, asyw->image.w) |
+                 NVVAL(NV827C, SURFACE_SET_SIZE, HEIGHT, asyw->image.h),
+
+                               SURFACE_SET_STORAGE(0),
+                 NVVAL(NV827C, SURFACE_SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
+                 NVVAL(NV827C, SURFACE_SET_STORAGE, PITCH, asyw->image.pitch[0] >> 8) |
+                 NVVAL(NV827C, SURFACE_SET_STORAGE, PITCH, asyw->image.blocks[0]) |
+                 NVVAL(NV827C, SURFACE_SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
+
+                               SURFACE_SET_PARAMS(0),
+                 NVVAL(NV827C, SURFACE_SET_PARAMS, FORMAT, asyw->image.format) |
+                 NVDEF(NV827C, SURFACE_SET_PARAMS, SUPER_SAMPLE, X1_AA) |
+                 NVDEF(NV827C, SURFACE_SET_PARAMS, GAMMA, LINEAR) |
+                 NVDEF(NV827C, SURFACE_SET_PARAMS, LAYOUT, FRM));
+       return 0;
 }
 
 static const struct nv50_wndw_func
index 224a34c..5396e37 100644 (file)
  */
 #include "base.h"
 
-static void
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl907c.h>
+
+static int
 base907c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 10))) {
-               evo_mthd(push, 0x0084, 1);
-               evo_data(push, asyw->image.mode << 8 |
-                              asyw->image.interval << 4);
-               evo_mthd(push, 0x00c0, 1);
-               evo_data(push, asyw->image.handle[0]);
-               evo_mthd(push, 0x0400, 5);
-               evo_data(push, asyw->image.offset[0] >> 8);
-               evo_data(push, 0x00000000);
-               evo_data(push, asyw->image.h << 16 | asyw->image.w);
-               evo_data(push, asyw->image.layout << 24 |
-                              (asyw->image.pitch[0] >> 8) << 8 |
-                              asyw->image.blocks[0] << 8 |
-                              asyw->image.blockh);
-               evo_data(push, asyw->image.format << 8);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 10)))
+               return ret;
+
+       PUSH_MTHD(push, NV907C, SET_PRESENT_CONTROL,
+                 NVVAL(NV907C, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+                 NVDEF(NV907C, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE) |
+                 NVVAL(NV907C, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval));
+
+       PUSH_MTHD(push, NV907C, SET_CONTEXT_DMAS_ISO(0), asyw->image.handle, 1);
+
+       PUSH_MTHD(push, NV907C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8,
+                               SURFACE_SET_OFFSET(0, 1), 0x00000000,
+
+                               SURFACE_SET_SIZE(0),
+                 NVVAL(NV907C, SURFACE_SET_SIZE, WIDTH, asyw->image.w) |
+                 NVVAL(NV907C, SURFACE_SET_SIZE, HEIGHT, asyw->image.h),
+
+                               SURFACE_SET_STORAGE(0),
+                 NVVAL(NV907C, SURFACE_SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
+                 NVVAL(NV907C, SURFACE_SET_STORAGE, PITCH, asyw->image.pitch[0] >> 8) |
+                 NVVAL(NV907C, SURFACE_SET_STORAGE, PITCH, asyw->image.blocks[0]) |
+                 NVVAL(NV907C, SURFACE_SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
+
+                               SURFACE_SET_PARAMS(0),
+                 NVVAL(NV907C, SURFACE_SET_PARAMS, FORMAT, asyw->image.format) |
+                 NVDEF(NV907C, SURFACE_SET_PARAMS, SUPER_SAMPLE, X1_AA) |
+                 NVDEF(NV907C, SURFACE_SET_PARAMS, GAMMA, LINEAR) |
+                 NVDEF(NV907C, SURFACE_SET_PARAMS, LAYOUT, FRM));
+       return 0;
 }
 
-static void
+static int
 base907c_xlut_clr(struct nv50_wndw *wndw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 6))) {
-               evo_mthd(push, 0x00e0, 1);
-               evo_data(push, 0x00000000);
-               evo_mthd(push, 0x00e8, 1);
-               evo_data(push, 0x00000000);
-               evo_mthd(push, 0x00fc, 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 6)))
+               return ret;
+
+       PUSH_MTHD(push, NV907C, SET_BASE_LUT_LO,
+                 NVDEF(NV907C, SET_BASE_LUT_LO, ENABLE, DISABLE));
+
+       PUSH_MTHD(push, NV907C, SET_OUTPUT_LUT_LO,
+                 NVDEF(NV907C, SET_OUTPUT_LUT_LO, ENABLE, DISABLE));
+
+       PUSH_MTHD(push, NV907C, SET_CONTEXT_DMA_LUT, 0x00000000);
+       return 0;
 }
 
-static void
+static int
 base907c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 6))) {
-               evo_mthd(push, 0x00e0, 3);
-               evo_data(push, asyw->xlut.i.enable << 30 |
-                              asyw->xlut.i.mode << 24);
-               evo_data(push, asyw->xlut.i.offset >> 8);
-               evo_data(push, 0x40000000);
-               evo_mthd(push, 0x00fc, 1);
-               evo_data(push, asyw->xlut.handle);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 6)))
+               return ret;
+
+       PUSH_MTHD(push, NV907C, SET_BASE_LUT_LO,
+                 NVVAL(NV907C, SET_BASE_LUT_LO, ENABLE, asyw->xlut.i.enable) |
+                 NVVAL(NV907C, SET_BASE_LUT_LO, MODE, asyw->xlut.i.mode),
+
+                               SET_BASE_LUT_HI, asyw->xlut.i.offset >> 8,
+
+                               SET_OUTPUT_LUT_LO,
+                 NVDEF(NV907C, SET_OUTPUT_LUT_LO, ENABLE, USE_CORE_LUT));
+
+       PUSH_MTHD(push, NV907C, SET_CONTEXT_DMA_LUT, asyw->xlut.handle);
+       return 0;
 }
 
 static bool
@@ -81,8 +109,12 @@ base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
        if (size != 256 && size != 1024)
                return false;
 
-       asyw->xlut.i.mode = size == 1024 ? 4 : 7;
-       asyw->xlut.i.enable = 2;
+       if (size == 1024)
+               asyw->xlut.i.mode = NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE;
+       else
+               asyw->xlut.i.mode = NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE;
+
+       asyw->xlut.i.enable = NV907C_SET_BASE_LUT_LO_ENABLE_ENABLE;
        asyw->xlut.i.load = head907d_olut_load;
        return true;
 }
@@ -125,28 +157,35 @@ base907c_csc(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
        }
 }
 
-static void
+static int
 base907c_csc_clr(struct nv50_wndw *wndw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 2))) {
-               evo_mthd(push, 0x0140, 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV907C, SET_CSC_RED2RED,
+                 NVDEF(NV907C, SET_CSC_RED2RED, OWNER, CORE));
+       return 0;
 }
 
-static void
+static int
 base907c_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-       u32 *push, i;
-       if ((push = evo_wait(&wndw->wndw, 13))) {
-               evo_mthd(push, 0x0140, 12);
-               evo_data(push, asyw->csc.matrix[0] | 0x80000000);
-               for (i = 1; i < 12; i++)
-                       evo_data(push, asyw->csc.matrix[i]);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 13)))
+               return ret;
+
+       PUSH_MTHD(push, NV907C, SET_CSC_RED2RED,
+                 NVDEF(NV907C, SET_CSC_RED2RED, OWNER, BASE) |
+                 NVVAL(NV907C, SET_CSC_RED2RED, COEFF, asyw->csc.matrix[0]),
+
+                               SET_CSC_GRN2RED, &asyw->csc.matrix[1], 11);
+       return 0;
 }
 
 const struct nv50_wndw_func
index e021cb3..498622c 100644 (file)
@@ -15,15 +15,15 @@ int nv50_core_new(struct nouveau_drm *, struct nv50_core **);
 void nv50_core_del(struct nv50_core **);
 
 struct nv50_core_func {
-       void (*init)(struct nv50_core *);
+       int (*init)(struct nv50_core *);
        void (*ntfy_init)(struct nouveau_bo *, u32 offset);
        int (*caps_init)(struct nouveau_drm *, struct nv50_disp *);
        int (*ntfy_wait_done)(struct nouveau_bo *, u32 offset,
                              struct nvif_device *);
-       void (*update)(struct nv50_core *, u32 *interlock, bool ntfy);
+       int (*update)(struct nv50_core *, u32 *interlock, bool ntfy);
 
        struct {
-               void (*owner)(struct nv50_core *);
+               int (*owner)(struct nv50_core *);
        } wndw;
 
        const struct nv50_head_func *head;
@@ -31,7 +31,7 @@ struct nv50_core_func {
        const struct nv50_crc_func *crc;
 #endif
        const struct nv50_outp_func {
-               void (*ctrl)(struct nv50_core *, int or, u32 ctrl,
+               int (*ctrl)(struct nv50_core *, int or, u32 ctrl,
                             struct nv50_head_atom *);
                /* XXX: Only used by SORs and PIORs for now */
                void (*get_caps)(struct nv50_disp *,
@@ -42,11 +42,11 @@ struct nv50_core_func {
 int core507d_new(struct nouveau_drm *, s32, struct nv50_core **);
 int core507d_new_(const struct nv50_core_func *, struct nouveau_drm *, s32,
                  struct nv50_core **);
-void core507d_init(struct nv50_core *);
+int core507d_init(struct nv50_core *);
 void core507d_ntfy_init(struct nouveau_bo *, u32);
 int core507d_caps_init(struct nouveau_drm *, struct nv50_disp *);
 int core507d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
-void core507d_update(struct nv50_core *, u32 *, bool);
+int core507d_update(struct nv50_core *, u32 *, bool);
 
 extern const struct nv50_outp_func dac507d;
 extern const struct nv50_outp_func sor507d;
@@ -63,8 +63,8 @@ int core917d_new(struct nouveau_drm *, s32, struct nv50_core **);
 int corec37d_new(struct nouveau_drm *, s32, struct nv50_core **);
 int corec37d_caps_init(struct nouveau_drm *, struct nv50_disp *);
 int corec37d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
-void corec37d_update(struct nv50_core *, u32 *, bool);
-void corec37d_wndw_owner(struct nv50_core *);
+int corec37d_update(struct nv50_core *, u32 *, bool);
+int corec37d_wndw_owner(struct nv50_core *);
 extern const struct nv50_outp_func sorc37d;
 
 int corec57d_new(struct nouveau_drm *, s32, struct nv50_core **);
index 1d66f69..ad1f09a 100644 (file)
 #include "head.h"
 
 #include <nvif/cl507d.h>
+#include <nvif/push507c.h>
 #include <nvif/timer.h>
 
+#include <nvhw/class/cl507d.h>
+
 #include "nouveau_bo.h"
 
-void
+int
 core507d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
 {
-       u32 *push;
-       if ((push = evo_wait(&core->chan, 5))) {
-               if (ntfy) {
-                       evo_mthd(push, 0x0084, 1);
-                       evo_data(push, 0x80000000 | NV50_DISP_CORE_NTFY);
-               }
-               evo_mthd(push, 0x0080, 2);
-               evo_data(push, interlock[NV50_DISP_INTERLOCK_BASE] |
-                              interlock[NV50_DISP_INTERLOCK_OVLY]);
-               evo_data(push, 0x00000000);
-               evo_kick(push, &core->chan);
+       struct nvif_push *push = core->chan.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 5)))
+               return ret;
+
+       if (ntfy) {
+               PUSH_MTHD(push, NV507D, SET_NOTIFIER_CONTROL,
+                         NVDEF(NV507D, SET_NOTIFIER_CONTROL, MODE, WRITE) |
+                         NVVAL(NV507D, SET_NOTIFIER_CONTROL, OFFSET, NV50_DISP_CORE_NTFY >> 2) |
+                         NVDEF(NV507D, SET_NOTIFIER_CONTROL, NOTIFY, ENABLE));
        }
+
+       PUSH_MTHD(push, NV507D, UPDATE, interlock[NV50_DISP_INTERLOCK_BASE] |
+                                       interlock[NV50_DISP_INTERLOCK_OVLY] |
+                 NVDEF(NV507D, UPDATE, NOT_DRIVER_FRIENDLY, FALSE) |
+                 NVDEF(NV507D, UPDATE, NOT_DRIVER_UNFRIENDLY, FALSE) |
+                 NVDEF(NV507D, UPDATE, INHIBIT_INTERRUPTS, FALSE));
+
+       return PUSH_KICK(push);
 }
 
 int
@@ -49,7 +60,7 @@ core507d_ntfy_wait_done(struct nouveau_bo *bo, u32 offset,
                        struct nvif_device *device)
 {
        s64 time = nvif_msec(device, 2000ULL,
-               if (nouveau_bo_rd32(bo, offset / 4))
+               if (NVBO_TD32(bo, offset, NV_DISP_CORE_NOTIFIER_1, COMPLETION_0, DONE, ==, TRUE))
                        break;
                usleep_range(1, 2);
        );
@@ -59,32 +70,34 @@ core507d_ntfy_wait_done(struct nouveau_bo *bo, u32 offset,
 void
 core507d_ntfy_init(struct nouveau_bo *bo, u32 offset)
 {
-       nouveau_bo_wr32(bo, offset / 4, 0x00000000);
+       NVBO_WR32(bo, offset, NV_DISP_CORE_NOTIFIER_1, COMPLETION_0,
+                       NVDEF(NV_DISP_CORE_NOTIFIER_1, COMPLETION_0, DONE, FALSE));
 }
 
 int
 core507d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
 {
-       u32 *push = evo_wait(&disp->core->chan, 2);
+       struct nvif_push *push = disp->core->chan.push;
+       int ret;
 
-       if (push) {
-               evo_mthd(push, 0x008c, 1);
-               evo_data(push, 0x0);
-               evo_kick(push, &disp->core->chan);
-       }
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
 
-       return 0;
+       PUSH_MTHD(push, NV507D, GET_CAPABILITIES, 0x00000000);
+       return PUSH_KICK(push);
 }
 
-void
+int
 core507d_init(struct nv50_core *core)
 {
-       u32 *push;
-       if ((push = evo_wait(&core->chan, 2))) {
-               evo_mthd(push, 0x0088, 1);
-               evo_data(push, core->chan.sync.handle);
-               evo_kick(push, &core->chan);
-       }
+       struct nvif_push *push = core->chan.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV507D, SET_CONTEXT_DMA_NOTIFIER, core->chan.sync.handle);
+       return PUSH_KICK(push);
 }
 
 static const struct nv50_core_func
index ec83189..9035d3a 100644 (file)
 #include "head.h"
 
 #include <nvif/class.h>
-#include <nouveau_bo.h>
-
+#include <nvif/pushc37b.h>
 #include <nvif/timer.h>
 
-void
+#include <nvhw/class/clc37d.h>
+
+#include <nouveau_bo.h>
+
+int
 corec37d_wndw_owner(struct nv50_core *core)
 {
+       struct nvif_push *push = core->chan.push;
        const u32 windows = 8; /*XXX*/
-       u32 *push, i;
-       if ((push = evo_wait(&core->chan, 2 * windows))) {
-               for (i = 0; i < windows; i++) {
-                       evo_mthd(push, 0x1000 + (i * 0x080), 1);
-                       evo_data(push, i >> 1);
-               }
-               evo_kick(push, &core->chan);
+       int ret, i;
+
+       if ((ret = PUSH_WAIT(push, windows * 2)))
+               return ret;
+
+       for (i = 0; i < windows; i++) {
+               PUSH_MTHD(push, NVC37D, WINDOW_SET_CONTROL(i),
+                         NVDEF(NVC37D, WINDOW_SET_CONTROL, OWNER, HEAD(i >> 1)));
        }
+
+       return 0;
 }
 
-void
+int
 corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
 {
-       u32 *push;
-       if ((push = evo_wait(&core->chan, 9))) {
-               if (ntfy) {
-                       evo_mthd(push, 0x020c, 1);
-                       evo_data(push, 0x00001000 | NV50_DISP_CORE_NTFY);
-               }
-
-               evo_mthd(push, 0x0218, 2);
-               evo_data(push, interlock[NV50_DISP_INTERLOCK_CURS]);
-               evo_data(push, interlock[NV50_DISP_INTERLOCK_WNDW]);
-               evo_mthd(push, 0x0200, 1);
-               evo_data(push, 0x00000001);
-
-               if (ntfy) {
-                       evo_mthd(push, 0x020c, 1);
-                       evo_data(push, 0x00000000);
-               }
-               evo_kick(push, &core->chan);
+       struct nvif_push *push = core->chan.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 9)))
+               return ret;
+
+       if (ntfy) {
+               PUSH_MTHD(push, NVC37D, SET_NOTIFIER_CONTROL,
+                         NVDEF(NVC37D, SET_NOTIFIER_CONTROL, MODE, WRITE) |
+                         NVVAL(NVC37D, SET_NOTIFIER_CONTROL, OFFSET, NV50_DISP_CORE_NTFY >> 4) |
+                         NVDEF(NVC37D, SET_NOTIFIER_CONTROL, NOTIFY, ENABLE));
        }
+
+       PUSH_MTHD(push, NVC37D, SET_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_CURS],
+                               SET_WINDOW_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_WNDW]);
+       PUSH_MTHD(push, NVC37D, UPDATE, 0x00000001 |
+                 NVDEF(NVC37D, UPDATE, SPECIAL_HANDLING, NONE) |
+                 NVDEF(NVC37D, UPDATE, INHIBIT_INTERRUPTS, FALSE));
+
+       if (ntfy) {
+               PUSH_MTHD(push, NVC37D, SET_NOTIFIER_CONTROL,
+                         NVDEF(NVC37D, SET_NOTIFIER_CONTROL, NOTIFY, DISABLE));
+       }
+
+       return PUSH_KICK(push);
 }
 
 int
 corec37d_ntfy_wait_done(struct nouveau_bo *bo, u32 offset,
                        struct nvif_device *device)
 {
-       u32 data;
        s64 time = nvif_msec(device, 2000ULL,
-               data = nouveau_bo_rd32(bo, offset / 4 + 0);
-               if ((data & 0xc0000000) == 0x80000000)
+               if (NVBO_TD32(bo, offset, NV_DISP_NOTIFIER, _0, STATUS, ==, FINISHED))
                        break;
                usleep_range(1, 2);
        );
@@ -82,18 +93,19 @@ corec37d_ntfy_wait_done(struct nouveau_bo *bo, u32 offset,
 void
 corec37d_ntfy_init(struct nouveau_bo *bo, u32 offset)
 {
-       nouveau_bo_wr32(bo, offset / 4 + 0, 0x00000000);
-       nouveau_bo_wr32(bo, offset / 4 + 1, 0x00000000);
-       nouveau_bo_wr32(bo, offset / 4 + 2, 0x00000000);
-       nouveau_bo_wr32(bo, offset / 4 + 3, 0x00000000);
+       NVBO_WR32(bo, offset, NV_DISP_NOTIFIER, _0,
+                       NVDEF(NV_DISP_NOTIFIER, _0, STATUS, NOT_BEGUN));
+       NVBO_WR32(bo, offset, NV_DISP_NOTIFIER, _1, 0);
+       NVBO_WR32(bo, offset, NV_DISP_NOTIFIER, _2, 0);
+       NVBO_WR32(bo, offset, NV_DISP_NOTIFIER, _3, 0);
 }
 
 int corec37d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
 {
        int ret;
 
-       ret = nvif_object_init(&disp->disp->object, 0, GV100_DISP_CAPS,
-                              NULL, 0, &disp->caps);
+       ret = nvif_object_ctor(&disp->disp->object, "dispCaps", 0,
+                              GV100_DISP_CAPS, NULL, 0, &disp->caps);
        if (ret) {
                NV_ERROR(drm,
                         "Failed to init notifier caps region: %d\n",
@@ -112,24 +124,37 @@ int corec37d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
        return 0;
 }
 
-static void
+static int
 corec37d_init(struct nv50_core *core)
 {
+       struct nvif_push *push = core->chan.push;
        const u32 windows = 8; /*XXX*/
-       u32 *push, i;
-       if ((push = evo_wait(&core->chan, 2 + 5 * windows))) {
-               evo_mthd(push, 0x0208, 1);
-               evo_data(push, core->chan.sync.handle);
-               for (i = 0; i < windows; i++) {
-                       evo_mthd(push, 0x1004 + (i * 0x080), 2);
-                       evo_data(push, 0x0000001f);
-                       evo_data(push, 0x00000000);
-                       evo_mthd(push, 0x1010 + (i * 0x080), 1);
-                       evo_data(push, 0x00127fff);
-               }
-               evo_kick(push, &core->chan);
-               core->assign_windows = true;
+       int ret, i;
+
+       if ((ret = PUSH_WAIT(push, 2 + windows * 5)))
+               return ret;
+
+       PUSH_MTHD(push, NVC37D, SET_CONTEXT_DMA_NOTIFIER, core->chan.sync.handle);
+
+       for (i = 0; i < windows; i++) {
+               PUSH_MTHD(push, NVC37D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(i),
+                         NVDEF(NVC37D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED1BPP, TRUE) |
+                         NVDEF(NVC37D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED2BPP, TRUE) |
+                         NVDEF(NVC37D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED4BPP, TRUE) |
+                         NVDEF(NVC37D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED8BPP, TRUE) |
+                         NVDEF(NVC37D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, YUV_PACKED422, TRUE),
+
+                                       WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(i), 0x00000000);
+
+               PUSH_MTHD(push, NVC37D, WINDOW_SET_WINDOW_USAGE_BOUNDS(i),
+                         NVVAL(NVC37D, WINDOW_SET_WINDOW_USAGE_BOUNDS, MAX_PIXELS_FETCHED_PER_LINE, 0x7fff) |
+                         NVDEF(NVC37D, WINDOW_SET_WINDOW_USAGE_BOUNDS, INPUT_LUT, USAGE_1025) |
+                         NVDEF(NVC37D, WINDOW_SET_WINDOW_USAGE_BOUNDS, INPUT_SCALER_TAPS, TAPS_2) |
+                         NVDEF(NVC37D, WINDOW_SET_WINDOW_USAGE_BOUNDS, UPSCALING_ALLOWED, FALSE));
        }
+
+       core->assign_windows = true;
+       return PUSH_KICK(push);
 }
 
 static const struct nv50_core_func
index e1c11eb..7587654 100644 (file)
 #include "core.h"
 #include "head.h"
 
-static void
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc57d.h>
+
+static int
 corec57d_init(struct nv50_core *core)
 {
+       struct nvif_push *push = core->chan.push;
        const u32 windows = 8; /*XXX*/
-       u32 *push, i;
-       if ((push = evo_wait(&core->chan, 2 + 5 * windows))) {
-               evo_mthd(push, 0x0208, 1);
-               evo_data(push, core->chan.sync.handle);
-               for (i = 0; i < windows; i++) {
-                       evo_mthd(push, 0x1004 + (i * 0x080), 2);
-                       evo_data(push, 0x0000000f);
-                       evo_data(push, 0x00000000);
-                       evo_mthd(push, 0x1010 + (i * 0x080), 1);
-                       evo_data(push, 0x00117fff);
-               }
-               evo_kick(push, &core->chan);
-               core->assign_windows = true;
+       int ret, i;
+
+       if ((ret = PUSH_WAIT(push, 2 + windows * 5)))
+               return ret;
+
+       PUSH_MTHD(push, NVC57D, SET_CONTEXT_DMA_NOTIFIER, core->chan.sync.handle);
+
+       for (i = 0; i < windows; i++) {
+               PUSH_MTHD(push, NVC57D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(i),
+                         NVDEF(NVC57D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED1BPP, TRUE) |
+                         NVDEF(NVC57D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED2BPP, TRUE) |
+                         NVDEF(NVC57D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED4BPP, TRUE) |
+                         NVDEF(NVC57D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED8BPP, TRUE),
+
+                                       WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(i), 0x00000000);
+
+               PUSH_MTHD(push, NVC57D, WINDOW_SET_WINDOW_USAGE_BOUNDS(i),
+                         NVVAL(NVC57D, WINDOW_SET_WINDOW_USAGE_BOUNDS, MAX_PIXELS_FETCHED_PER_LINE, 0x7fff) |
+                         NVDEF(NVC57D, WINDOW_SET_WINDOW_USAGE_BOUNDS, ILUT_ALLOWED, TRUE) |
+                         NVDEF(NVC57D, WINDOW_SET_WINDOW_USAGE_BOUNDS, INPUT_SCALER_TAPS, TAPS_2) |
+                         NVDEF(NVC57D, WINDOW_SET_WINDOW_USAGE_BOUNDS, UPSCALING_ALLOWED, FALSE));
        }
+
+       core->assign_windows = true;
+       return PUSH_KICK(push);
 }
 
 static const struct nv50_core_func
index f17fb6d..b8c31b6 100644 (file)
@@ -9,6 +9,8 @@
 #include <nvif/cl0002.h>
 #include <nvif/timer.h>
 
+#include <nvhw/class/cl907d.h>
+
 #include "nouveau_drv.h"
 #include "core.h"
 #include "head.h"
@@ -478,10 +480,6 @@ void nv50_crc_atomic_clr(struct nv50_head *head)
        func->set_src(head, 0, NV50_CRC_SOURCE_TYPE_NONE, NULL, 0);
 }
 
-#define NV50_CRC_RASTER_ACTIVE   0
-#define NV50_CRC_RASTER_COMPLETE 1
-#define NV50_CRC_RASTER_INACTIVE 2
-
 static inline int
 nv50_crc_raster_type(enum nv50_crc_source source)
 {
@@ -490,11 +488,11 @@ nv50_crc_raster_type(enum nv50_crc_source source)
        case NV50_CRC_SOURCE_AUTO:
        case NV50_CRC_SOURCE_RG:
        case NV50_CRC_SOURCE_OUTP_ACTIVE:
-               return NV50_CRC_RASTER_ACTIVE;
+               return NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER;
        case NV50_CRC_SOURCE_OUTP_COMPLETE:
-               return NV50_CRC_RASTER_COMPLETE;
+               return NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER;
        case NV50_CRC_SOURCE_OUTP_INACTIVE:
-               return NV50_CRC_RASTER_INACTIVE;
+               return NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER;
        }
 
        return 0;
@@ -510,11 +508,11 @@ nv50_crc_ctx_init(struct nv50_head *head, struct nvif_mmu *mmu,
        struct nv50_core *core = nv50_disp(head->base.base.dev)->core;
        int ret;
 
-       ret = nvif_mem_init_map(mmu, NVIF_MEM_VRAM, len, &ctx->mem);
+       ret = nvif_mem_ctor_map(mmu, "kmsCrcNtfy", NVIF_MEM_VRAM, len, &ctx->mem);
        if (ret)
                return ret;
 
-       ret = nvif_object_init(&core->chan.base.user,
+       ret = nvif_object_ctor(&core->chan.base.user, "kmsCrcNtfyCtxDma",
                               NV50_DISP_HANDLE_CRC_CTX(head, idx),
                               NV_DMA_IN_MEMORY,
                               &(struct nv_dma_v0) {
@@ -531,15 +529,15 @@ nv50_crc_ctx_init(struct nv50_head *head, struct nvif_mmu *mmu,
        return 0;
 
 fail_fini:
-       nvif_mem_fini(&ctx->mem);
+       nvif_mem_dtor(&ctx->mem);
        return ret;
 }
 
 static inline void
 nv50_crc_ctx_fini(struct nv50_crc_notifier_ctx *ctx)
 {
-       nvif_object_fini(&ctx->ntfy);
-       nvif_mem_fini(&ctx->mem);
+       nvif_object_dtor(&ctx->ntfy);
+       nvif_mem_dtor(&ctx->mem);
 }
 
 int nv50_crc_set_source(struct drm_crtc *crtc, const char *source_str)
index 4bc59e7..4fce871 100644 (file)
@@ -50,9 +50,9 @@ struct nv50_crc_atom {
 };
 
 struct nv50_crc_func {
-       void (*set_src)(struct nv50_head *, int or, enum nv50_crc_source_type,
-                       struct nv50_crc_notifier_ctx *, u32 wndw);
-       void (*set_ctx)(struct nv50_head *, struct nv50_crc_notifier_ctx *);
+       int (*set_src)(struct nv50_head *, int or, enum nv50_crc_source_type,
+                      struct nv50_crc_notifier_ctx *, u32 wndw);
+       int (*set_ctx)(struct nv50_head *, struct nv50_crc_notifier_ctx *);
        u32 (*get_entry)(struct nv50_head *, struct nv50_crc_notifier_ctx *,
                         enum nv50_crc_source, int idx);
        bool (*ctx_finished)(struct nv50_head *,
@@ -106,26 +106,27 @@ struct nv50_crc_atom {};
 #define nv50_crc_set_source NULL
 
 static inline void nv50_crc_init(struct drm_device *dev) {}
-static inline int nv50_head_crc_late_register(struct nv50_head *) {}
-static inline void
-nv50_crc_handle_vblank(struct nv50_head *head) { return 0; }
+static inline int
+nv50_head_crc_late_register(struct nv50_head *head) { return 0; }
+static inline void nv50_crc_handle_vblank(struct nv50_head *head) {}
 
 static inline int
-nv50_crc_atomic_check_head(struct nv50_head *, struct nv50_head_atom *,
-                          struct nv50_head_atom *) {}
+nv50_crc_atomic_check_head(struct nv50_head *head,
+                          struct nv50_head_atom *asyh,
+                          struct nv50_head_atom *armh) { return 0; }
 static inline void nv50_crc_atomic_check_outp(struct nv50_atom *atom) {}
 static inline void
-nv50_crc_atomic_stop_reporting(struct drm_atomic_state *) {}
+nv50_crc_atomic_stop_reporting(struct drm_atomic_state *state) {}
 static inline void
-nv50_crc_atomic_init_notifier_contexts(struct drm_atomic_state *) {}
+nv50_crc_atomic_init_notifier_contexts(struct drm_atomic_state *state) {}
 static inline void
-nv50_crc_atomic_release_notifier_contexts(struct drm_atomic_state *) {}
+nv50_crc_atomic_release_notifier_contexts(struct drm_atomic_state *state) {}
 static inline void
-nv50_crc_atomic_start_reporting(struct drm_atomic_state *) {}
+nv50_crc_atomic_start_reporting(struct drm_atomic_state *state) {}
 static inline void
-nv50_crc_atomic_set(struct nv50_head *, struct nv50_head_atom *) {}
+nv50_crc_atomic_set(struct nv50_head *head, struct nv50_head_atom *state) {}
 static inline void
-nv50_crc_atomic_clr(struct nv50_head *) {}
+nv50_crc_atomic_clr(struct nv50_head *head) {}
 
 #endif /* IS_ENABLED(CONFIG_DEBUG_FS) */
 #endif /* !__NV50_CRC_H__ */
index 92e907d..0fb0fdb 100644 (file)
@@ -6,6 +6,10 @@
 #include "disp.h"
 #include "head.h"
 
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl907d.h>
+
 #define CRC907D_MAX_ENTRIES 255
 
 struct crc907d_notifier {
@@ -18,68 +22,67 @@ struct crc907d_notifier {
        } entries[CRC907D_MAX_ENTRIES];
 } __packed;
 
-static void
+static int
 crc907d_set_src(struct nv50_head *head, int or,
                enum nv50_crc_source_type source,
                struct nv50_crc_notifier_ctx *ctx, u32 wndw)
 {
-       struct drm_crtc *crtc = &head->base.base;
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       const u32 hoff = head->base.index * 0x300;
-       u32 *push;
-       u32 crc_args = 0xfff00000;
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       u32 crc_args = NVDEF(NV907D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) |
+                      NVDEF(NV907D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
+                      NVDEF(NV907D, HEAD_SET_CRC_CONTROL, TIMESTAMP_MODE, FALSE) |
+                      NVDEF(NV907D, HEAD_SET_CRC_CONTROL, SECONDARY_OUTPUT, NONE) |
+                      NVDEF(NV907D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE);
+       int ret;
 
        switch (source) {
        case NV50_CRC_SOURCE_TYPE_SOR:
-               crc_args |= (0x00000f0f + or * 16) << 8;
+               crc_args |= NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, SOR(or));
                break;
        case NV50_CRC_SOURCE_TYPE_PIOR:
-               crc_args |= (0x000000ff + or * 256) << 8;
+               crc_args |= NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, PIOR(or));
                break;
        case NV50_CRC_SOURCE_TYPE_DAC:
-               crc_args |= (0x00000ff0 + or) << 8;
+               crc_args |= NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, DAC(or));
                break;
        case NV50_CRC_SOURCE_TYPE_RG:
-               crc_args |= (0x00000ff8 + drm_crtc_index(crtc)) << 8;
+               crc_args |= NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, RG(i));
                break;
        case NV50_CRC_SOURCE_TYPE_SF:
-               crc_args |= (0x00000f8f + drm_crtc_index(crtc) * 16) << 8;
+               crc_args |= NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, SF(i));
                break;
        case NV50_CRC_SOURCE_NONE:
-               crc_args |= 0x000fff00;
+               crc_args |= NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, NONE);
                break;
        }
 
-       push = evo_wait(core, 4);
-       if (!push)
-               return;
+       if ((ret = PUSH_WAIT(push, 4)))
+               return ret;
 
        if (source) {
-               evo_mthd(push, 0x0438 + hoff, 1);
-               evo_data(push, ctx->ntfy.handle);
-               evo_mthd(push, 0x0430 + hoff, 1);
-               evo_data(push, crc_args);
+               PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_CRC(i), ctx->ntfy.handle);
+               PUSH_MTHD(push, NV907D, HEAD_SET_CRC_CONTROL(i), crc_args);
        } else {
-               evo_mthd(push, 0x0430 + hoff, 1);
-               evo_data(push, crc_args);
-               evo_mthd(push, 0x0438 + hoff, 1);
-               evo_data(push, 0);
+               PUSH_MTHD(push, NV907D, HEAD_SET_CRC_CONTROL(i), crc_args);
+               PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_CRC(i), 0);
        }
-       evo_kick(push, core);
+
+       return 0;
 }
 
-static void crc907d_set_ctx(struct nv50_head *head,
-                           struct nv50_crc_notifier_ctx *ctx)
+static int
+crc907d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push = evo_wait(core, 2);
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
 
-       if (!push)
-               return;
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
 
-       evo_mthd(push, 0x0438 + (head->base.index * 0x300), 1);
-       evo_data(push, ctx ? ctx->ntfy.handle : 0);
-       evo_kick(push, core);
+       PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_CRC(i), ctx ? ctx->ntfy.handle : 0);
+       return 0;
 }
 
 static u32 crc907d_get_entry(struct nv50_head *head,
index 940cefd..9afe9a8 100644 (file)
@@ -6,6 +6,10 @@
 #include "disp.h"
 #include "head.h"
 
+#include <nvif/push507c.h>
+
+#include <nvhw/class/clc37d.h>
+
 #define CRCC37D_MAX_ENTRIES 2047
 
 struct crcc37d_notifier {
@@ -30,62 +34,59 @@ struct crcc37d_notifier {
        } entries[CRCC37D_MAX_ENTRIES];
 } __packed;
 
-static void
+static int
 crcc37d_set_src(struct nv50_head *head, int or,
                enum nv50_crc_source_type source,
                struct nv50_crc_notifier_ctx *ctx, u32 wndw)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       const u32 hoff = head->base.index * 0x400;
-       u32 *push;
-       u32 crc_args;
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       u32 crc_args = NVVAL(NVC37D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, wndw) |
+                      NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
+                      NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, SECONDARY_CRC, NONE) |
+                      NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE);
+       int ret;
 
        switch (source) {
        case NV50_CRC_SOURCE_TYPE_SOR:
-               crc_args = (0x00000050 + or) << 12;
+               crc_args |= NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, SOR(or));
                break;
        case NV50_CRC_SOURCE_TYPE_PIOR:
-               crc_args = (0x00000060 + or) << 12;
+               crc_args |= NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, PIOR(or));
                break;
        case NV50_CRC_SOURCE_TYPE_SF:
-               crc_args = 0x00000030 << 12;
+               crc_args |= NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, SF);
                break;
        default:
-               crc_args = 0;
                break;
        }
 
-       push = evo_wait(core, 4);
-       if (!push)
-               return;
+       if ((ret = PUSH_WAIT(push, 4)))
+               return ret;
 
        if (source) {
-               evo_mthd(push, 0x2180 + hoff, 1);
-               evo_data(push, ctx->ntfy.handle);
-               evo_mthd(push, 0x2184 + hoff, 1);
-               evo_data(push, crc_args | wndw);
+               PUSH_MTHD(push, NVC37D, HEAD_SET_CONTEXT_DMA_CRC(i), ctx->ntfy.handle);
+               PUSH_MTHD(push, NVC37D, HEAD_SET_CRC_CONTROL(i), crc_args);
        } else {
-               evo_mthd(push, 0x2184 + hoff, 1);
-               evo_data(push, 0);
-               evo_mthd(push, 0x2180 + hoff, 1);
-               evo_data(push, 0);
+               PUSH_MTHD(push, NVC37D, HEAD_SET_CRC_CONTROL(i), 0);
+               PUSH_MTHD(push, NVC37D, HEAD_SET_CONTEXT_DMA_CRC(i), 0);
        }
 
-       evo_kick(push, core);
+       return 0;
 }
 
-static void crcc37d_set_ctx(struct nv50_head *head,
-                           struct nv50_crc_notifier_ctx *ctx)
+static int
+crcc37d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push = evo_wait(core, 2);
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
 
-       if (!push)
-               return;
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
 
-       evo_mthd(push, 0x2180 + (head->base.index * 0x400), 1);
-       evo_data(push, ctx ? ctx->ntfy.handle : 0);
-       evo_kick(push, core);
+       PUSH_MTHD(push, NVC37D, HEAD_SET_CONTEXT_DMA_CRC(i), ctx ? ctx->ntfy.handle : 0);
+       return 0;
 }
 
 static u32 crcc37d_get_entry(struct nv50_head *head,
index 658a200..54fbd6f 100644 (file)
@@ -26,6 +26,8 @@
 #include <nvif/cl507a.h>
 #include <nvif/timer.h>
 
+#include <nvhw/class/cl507a.h>
+
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_plane_helper.h>
 
@@ -33,27 +35,37 @@ bool
 curs507a_space(struct nv50_wndw *wndw)
 {
        nvif_msec(&nouveau_drm(wndw->plane.dev)->client.device, 100,
-               if (nvif_rd32(&wndw->wimm.base.user, 0x0008) >= 4)
+               if (NVIF_TV32(&wndw->wimm.base.user, NV507A, FREE, COUNT, >=, 4))
                        return true;
        );
+
        WARN_ON(1);
        return false;
 }
 
-static void
+static int
 curs507a_update(struct nv50_wndw *wndw, u32 *interlock)
 {
-       if (curs507a_space(wndw))
-               nvif_wr32(&wndw->wimm.base.user, 0x0080, 0x00000000);
+       struct nvif_object *user = &wndw->wimm.base.user;
+       int ret = nvif_chan_wait(&wndw->wimm, 1);
+       if (ret == 0) {
+               NVIF_WR32(user, NV507A, UPDATE,
+                         NVDEF(NV507A, UPDATE, INTERLOCK_WITH_CORE, DISABLE));
+       }
+       return ret;
 }
 
-static void
+static int
 curs507a_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-       if (curs507a_space(wndw)) {
-               nvif_wr32(&wndw->wimm.base.user, 0x0084, asyw->point.y << 16 |
-                                                        asyw->point.x);
+       struct nvif_object *user = &wndw->wimm.base.user;
+       int ret = nvif_chan_wait(&wndw->wimm, 1);
+       if (ret == 0) {
+               NVIF_WR32(user, NV507A, SET_CURSOR_HOT_SPOT_POINT_OUT,
+                         NVVAL(NV507A, SET_CURSOR_HOT_SPOT_POINT_OUT, X, asyw->point.x) |
+                         NVVAL(NV507A, SET_CURSOR_HOT_SPOT_POINT_OUT, Y, asyw->point.y));
        }
+       return ret;
 }
 
 const struct nv50_wimm_func
@@ -138,8 +150,8 @@ curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
        if (*pwndw = wndw, ret)
                return ret;
 
-       ret = nvif_object_init(&disp->disp->object, 0, oclass, &args,
-                              sizeof(args), &wndw->wimm.base.user);
+       ret = nvif_object_ctor(&disp->disp->object, "kmsCurs", 0, oclass,
+                              &args, sizeof(args), &wndw->wimm.base.user);
        if (ret) {
                NV_ERROR(drm, "curs%04x allocation failed: %d\n", oclass, ret);
                return ret;
index 96dff4f..e39d086 100644 (file)
 #include "curs.h"
 #include "atom.h"
 
-static void
+#include <nvhw/class/clc37a.h>
+
+static int
 cursc37a_update(struct nv50_wndw *wndw, u32 *interlock)
 {
-       if (curs507a_space(wndw))
-               nvif_wr32(&wndw->wimm.base.user, 0x0200, 0x00000001);
+       struct nvif_object *user = &wndw->wimm.base.user;
+       int ret = nvif_chan_wait(&wndw->wimm, 1);
+       if (ret == 0)
+               NVIF_WR32(user, NVC37A, UPDATE, 0x00000001);
+       return ret;
 }
 
-static void
+static int
 cursc37a_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-       if (curs507a_space(wndw)) {
-               nvif_wr32(&wndw->wimm.base.user, 0x0208, asyw->point.y << 16 |
-                                                        asyw->point.x);
+       struct nvif_object *user = &wndw->wimm.base.user;
+       int ret = nvif_chan_wait(&wndw->wimm, 1);
+       if (ret == 0) {
+               NVIF_WR32(user, NVC37A, SET_CURSOR_HOT_SPOT_POINT_OUT(0),
+                         NVVAL(NVC37A, SET_CURSOR_HOT_SPOT_POINT_OUT, X, asyw->point.x) |
+                         NVVAL(NVC37A, SET_CURSOR_HOT_SPOT_POINT_OUT, Y, asyw->point.y));
        }
+       return ret;
 }
 
 static const struct nv50_wimm_func
index 2a10ef7..09de78d 100644 (file)
  */
 #include "core.h"
 
-static void
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl507d.h>
+
+static int
 dac507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
             struct nv50_head_atom *asyh)
 {
-       u32 *push, sync = 0;
-       if ((push = evo_wait(&core->chan, 3))) {
-               if (asyh) {
-                       sync |= asyh->or.nvsync << 1;
-                       sync |= asyh->or.nhsync;
-               }
-               evo_mthd(push, 0x0400 + (or * 0x080), 2);
-               evo_data(push, ctrl);
-               evo_data(push, sync);
-               evo_kick(push, &core->chan);
+       struct nvif_push *push = core->chan.push;
+       u32 sync = 0;
+       int ret;
+
+       if (asyh) {
+               sync |= NVVAL(NV507D, DAC_SET_POLARITY, HSYNC, asyh->or.nhsync);
+               sync |= NVVAL(NV507D, DAC_SET_POLARITY, VSYNC, asyh->or.nvsync);
        }
+
+       if ((ret = PUSH_WAIT(push, 3)))
+               return ret;
+
+       PUSH_MTHD(push, NV507D, DAC_SET_CONTROL(or), ctrl,
+                               DAC_SET_POLARITY(or), sync);
+       return 0;
 }
 
 const struct nv50_outp_func
index 11e87fa..95efa62 100644 (file)
  */
 #include "core.h"
 
-static void
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl907d.h>
+
+static int
 dac907d_ctrl(struct nv50_core *core, int or, u32 ctrl,
             struct nv50_head_atom *asyh)
 {
-       u32 *push;
-       if ((push = evo_wait(&core->chan, 2))) {
-               evo_mthd(push, 0x0180 + (or * 0x020), 1);
-               evo_data(push, ctrl);
-               evo_kick(push, &core->chan);
-       }
+       struct nvif_push *push = core->chan.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV907D, DAC_SET_CONTROL(or), ctrl);
+       return 0;
 }
 
 const struct nv50_outp_func
index cd71b98..64bac67 100644 (file)
@@ -41,6 +41,8 @@
 #include <drm/drm_scdc_helper.h>
 #include <drm/drm_vblank.h>
 
+#include <nvif/push507c.h>
+
 #include <nvif/class.h>
 #include <nvif/cl0002.h>
 #include <nvif/cl5070.h>
 #include <nvif/event.h>
 #include <nvif/timer.h>
 
+#include <nvhw/class/cl507c.h>
+#include <nvhw/class/cl507d.h>
+#include <nvhw/class/cl837d.h>
+#include <nvhw/class/cl887d.h>
+#include <nvhw/class/cl907d.h>
+#include <nvhw/class/cl917d.h>
+
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_gem.h"
@@ -79,8 +88,9 @@ nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
        while (oclass[0]) {
                for (i = 0; i < n; i++) {
                        if (sclass[i].oclass == oclass[0]) {
-                               ret = nvif_object_init(disp, 0, oclass[0],
-                                                      data, size, &chan->user);
+                               ret = nvif_object_ctor(disp, "kmsChan", 0,
+                                                      oclass[0], data, size,
+                                                      &chan->user);
                                if (ret == 0)
                                        nvif_object_map(&chan->user, NULL, 0);
                                nvif_object_sclass_put(&sclass);
@@ -97,7 +107,7 @@ nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
 static void
 nv50_chan_destroy(struct nv50_chan *chan)
 {
-       nvif_object_fini(&chan->user);
+       nvif_object_dtor(&chan->user);
 }
 
 /******************************************************************************
@@ -107,12 +117,106 @@ nv50_chan_destroy(struct nv50_chan *chan)
 void
 nv50_dmac_destroy(struct nv50_dmac *dmac)
 {
-       nvif_object_fini(&dmac->vram);
-       nvif_object_fini(&dmac->sync);
+       nvif_object_dtor(&dmac->vram);
+       nvif_object_dtor(&dmac->sync);
 
        nv50_chan_destroy(&dmac->base);
 
-       nvif_mem_fini(&dmac->push);
+       nvif_mem_dtor(&dmac->_push.mem);
+}
+
+static void
+nv50_dmac_kick(struct nvif_push *push)
+{
+       struct nv50_dmac *dmac = container_of(push, typeof(*dmac), _push);
+
+       dmac->cur = push->cur - (u32 *)dmac->_push.mem.object.map.ptr;
+       if (dmac->put != dmac->cur) {
+               /* Push buffer fetches are not coherent with BAR1, we need to ensure
+                * writes have been flushed right through to VRAM before writing PUT.
+                */
+               if (dmac->push->mem.type & NVIF_MEM_VRAM) {
+                       struct nvif_device *device = dmac->base.device;
+                       nvif_wr32(&device->object, 0x070000, 0x00000001);
+                       nvif_msec(device, 2000,
+                               if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
+                                       break;
+                       );
+               }
+
+               NVIF_WV32(&dmac->base.user, NV507C, PUT, PTR, dmac->cur);
+               dmac->put = dmac->cur;
+       }
+
+       push->bgn = push->cur;
+}
+
+static int
+nv50_dmac_free(struct nv50_dmac *dmac)
+{
+       u32 get = NVIF_RV32(&dmac->base.user, NV507C, GET, PTR);
+       if (get > dmac->cur) /* NVIDIA stay 5 away from GET, do the same. */
+               return get - dmac->cur - 5;
+       return dmac->max - dmac->cur;
+}
+
+static int
+nv50_dmac_wind(struct nv50_dmac *dmac)
+{
+       /* Wait for GET to depart from the beginning of the push buffer to
+        * prevent writing PUT == GET, which would be ignored by HW.
+        */
+       u32 get = NVIF_RV32(&dmac->base.user, NV507C, GET, PTR);
+       if (get == 0) {
+               /* Corner-case, HW idle, but non-committed work pending. */
+               if (dmac->put == 0)
+                       nv50_dmac_kick(dmac->push);
+
+               if (nvif_msec(dmac->base.device, 2000,
+                       if (NVIF_TV32(&dmac->base.user, NV507C, GET, PTR, >, 0))
+                               break;
+               ) < 0)
+                       return -ETIMEDOUT;
+       }
+
+       PUSH_RSVD(dmac->push, PUSH_JUMP(dmac->push, 0));
+       dmac->cur = 0;
+       return 0;
+}
+
+static int
+nv50_dmac_wait(struct nvif_push *push, u32 size)
+{
+       struct nv50_dmac *dmac = container_of(push, typeof(*dmac), _push);
+       int free;
+
+       if (WARN_ON(size > dmac->max))
+               return -EINVAL;
+
+       dmac->cur = push->cur - (u32 *)dmac->_push.mem.object.map.ptr;
+       if (dmac->cur + size >= dmac->max) {
+               int ret = nv50_dmac_wind(dmac);
+               if (ret)
+                       return ret;
+
+               push->cur = dmac->_push.mem.object.map.ptr;
+               push->cur = push->cur + dmac->cur;
+               nv50_dmac_kick(push);
+       }
+
+       if (nvif_msec(dmac->base.device, 2000,
+               if ((free = nv50_dmac_free(dmac)) >= size)
+                       break;
+       ) < 0) {
+               WARN_ON(1);
+               return -ETIMEDOUT;
+       }
+
+       push->bgn = dmac->_push.mem.object.map.ptr;
+       push->bgn = push->bgn + dmac->cur;
+       push->cur = push->bgn;
+       push->end = push->cur + free;
+       return 0;
 }
 
 int
@@ -139,13 +243,21 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
        if (device->info.family == NV_DEVICE_INFO_V0_PASCAL)
                type |= NVIF_MEM_VRAM;
 
-       ret = nvif_mem_init_map(&cli->mmu, type, 0x1000, &dmac->push);
+       ret = nvif_mem_ctor_map(&cli->mmu, "kmsChanPush", type, 0x1000,
+                               &dmac->_push.mem);
        if (ret)
                return ret;
 
-       dmac->ptr = dmac->push.object.map.ptr;
+       dmac->ptr = dmac->_push.mem.object.map.ptr;
+       dmac->_push.wait = nv50_dmac_wait;
+       dmac->_push.kick = nv50_dmac_kick;
+       dmac->push = &dmac->_push;
+       dmac->push->bgn = dmac->_push.mem.object.map.ptr;
+       dmac->push->cur = dmac->push->bgn;
+       dmac->push->end = dmac->push->bgn;
+       dmac->max = 0x1000/4 - 1;
 
-       args->pushbuf = nvif_handle(&dmac->push.object);
+       args->pushbuf = nvif_handle(&dmac->_push.mem.object);
 
        ret = nv50_chan_create(device, disp, oclass, head, data, size,
                               &dmac->base);
@@ -155,7 +267,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
        if (!syncbuf)
                return 0;
 
-       ret = nvif_object_init(&dmac->base.user, NV50_DISP_HANDLE_SYNCBUF,
+       ret = nvif_object_ctor(&dmac->base.user, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF,
                               NV_DMA_IN_MEMORY,
                               &(struct nv_dma_v0) {
                                        .target = NV_DMA_V0_TARGET_VRAM,
@@ -167,7 +279,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
        if (ret)
                return ret;
 
-       ret = nvif_object_init(&dmac->base.user, NV50_DISP_HANDLE_VRAM,
+       ret = nvif_object_ctor(&dmac->base.user, "kmsVramCtxDma", NV50_DISP_HANDLE_VRAM,
                               NV_DMA_IN_MEMORY,
                               &(struct nv_dma_v0) {
                                        .target = NV_DMA_V0_TARGET_VRAM,
@@ -183,64 +295,6 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
 }
 
 /******************************************************************************
- * EVO channel helpers
- *****************************************************************************/
-static void
-evo_flush(struct nv50_dmac *dmac)
-{
-       /* Push buffer fetches are not coherent with BAR1, we need to ensure
-        * writes have been flushed right through to VRAM before writing PUT.
-        */
-       if (dmac->push.type & NVIF_MEM_VRAM) {
-               struct nvif_device *device = dmac->base.device;
-               nvif_wr32(&device->object, 0x070000, 0x00000001);
-               nvif_msec(device, 2000,
-                       if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
-                               break;
-               );
-       }
-}
-
-u32 *
-evo_wait(struct nv50_dmac *evoc, int nr)
-{
-       struct nv50_dmac *dmac = evoc;
-       struct nvif_device *device = dmac->base.device;
-       u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4;
-
-       mutex_lock(&dmac->lock);
-       if (put + nr >= (PAGE_SIZE / 4) - 8) {
-               dmac->ptr[put] = 0x20000000;
-               evo_flush(dmac);
-
-               nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
-               if (nvif_msec(device, 2000,
-                       if (!nvif_rd32(&dmac->base.user, 0x0004))
-                               break;
-               ) < 0) {
-                       mutex_unlock(&dmac->lock);
-                       pr_err("nouveau: evo channel stalled\n");
-                       return NULL;
-               }
-
-               put = 0;
-       }
-
-       return dmac->ptr + put;
-}
-
-void
-evo_kick(u32 *push, struct nv50_dmac *evoc)
-{
-       struct nv50_dmac *dmac = evoc;
-
-       evo_flush(dmac);
-
-       nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
-       mutex_unlock(&dmac->lock);
-}
-
-/******************************************************************************
  * Output path helpers
  *****************************************************************************/
 static void
@@ -365,8 +419,9 @@ nv50_dac_disable(struct drm_encoder *encoder)
 {
        struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
        struct nv50_core *core = nv50_disp(encoder->dev)->core;
+       const u32 ctrl = NVDEF(NV507D, DAC_SET_CONTROL, OWNER, NONE);
        if (nv_encoder->crtc)
-               core->func->dac->ctrl(core, nv_encoder->or, 0x00000000, NULL);
+               core->func->dac->ctrl(core, nv_encoder->or, ctrl, NULL);
        nv_encoder->crtc = NULL;
        nv50_outp_release(nv_encoder);
 }
@@ -378,10 +433,23 @@ nv50_dac_enable(struct drm_encoder *encoder)
        struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
        struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
        struct nv50_core *core = nv50_disp(encoder->dev)->core;
+       u32 ctrl = 0;
+
+       switch (nv_crtc->index) {
+       case 0: ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, OWNER, HEAD0); break;
+       case 1: ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, OWNER, HEAD1); break;
+       case 2: ctrl |= NVDEF(NV907D, DAC_SET_CONTROL, OWNER_MASK, HEAD2); break;
+       case 3: ctrl |= NVDEF(NV907D, DAC_SET_CONTROL, OWNER_MASK, HEAD3); break;
+       default:
+               WARN_ON(1);
+               break;
+       }
+
+       ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, PROTOCOL, RGB_CRT);
 
        nv50_outp_acquire(nv_encoder, false);
 
-       core->func->dac->ctrl(core, nv_encoder->or, 1 << nv_crtc->index, asyh);
+       core->func->dac->ctrl(core, nv_encoder->or, ctrl, asyh);
        asyh->or.depth = 0;
 
        nv_encoder->crtc = encoder->crtc;
@@ -586,6 +654,9 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
                                (0x0100 << nv_crtc->index),
        };
 
+       if (!nv_encoder->audio)
+               return;
+
        nv_encoder->audio = false;
        nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
 
@@ -928,10 +999,10 @@ static u8
 nv50_dp_bpc_to_depth(unsigned int bpc)
 {
        switch (bpc) {
-       case  6: return 0x2;
-       case  8: return 0x5;
-       case 10: /* fall-through */
-       default: return 0x6;
+       case  6: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_18_444;
+       case  8: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_444;
+       case 10:
+       default: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_30_444;
        }
 }
 
@@ -970,9 +1041,9 @@ nv50_msto_enable(struct drm_encoder *encoder)
                nv50_outp_acquire(mstm->outp, false /*XXX: MST audio.*/);
 
        if (mstm->outp->link & 1)
-               proto = 0x8;
+               proto = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_A;
        else
-               proto = 0x9;
+               proto = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_B;
 
        mstm->outp->update(mstm->outp, head->base.index, armh, proto,
                           nv50_dp_bpc_to_depth(armh->or.bpc));
@@ -1501,10 +1572,10 @@ nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
 
        if (!asyh) {
                nv_encoder->ctrl &= ~BIT(head);
-               if (!(nv_encoder->ctrl & 0x0000000f))
+               if (NVDEF_TEST(nv_encoder->ctrl, NV507D, SOR_SET_CONTROL, OWNER, ==, NONE))
                        nv_encoder->ctrl = 0;
        } else {
-               nv_encoder->ctrl |= proto << 8;
+               nv_encoder->ctrl |= NVVAL(NV507D, SOR_SET_CONTROL, PROTOCOL, proto);
                nv_encoder->ctrl |= BIT(head);
                asyh->or.depth = depth;
        }
@@ -1562,8 +1633,8 @@ nv50_sor_enable(struct drm_encoder *encoder)
        struct nouveau_connector *nv_connector;
        struct nvbios *bios = &drm->vbios;
        bool hda = false;
-       u8 proto = 0xf;
-       u8 depth = 0x0;
+       u8 proto = NV507D_SOR_SET_CONTROL_PROTOCOL_CUSTOM;
+       u8 depth = NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT;
 
        nv_connector = nouveau_encoder_connector_get(nv_encoder);
        nv_encoder->crtc = encoder->crtc;
@@ -1577,7 +1648,7 @@ nv50_sor_enable(struct drm_encoder *encoder)
        switch (nv_encoder->dcb->type) {
        case DCB_OUTPUT_TMDS:
                if (nv_encoder->link & 1) {
-                       proto = 0x1;
+                       proto = NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A;
                        /* Only enable dual-link if:
                         *  - Need to (i.e. rate > 165MHz)
                         *  - DCB says we can
@@ -1587,15 +1658,15 @@ nv50_sor_enable(struct drm_encoder *encoder)
                        if (mode->clock >= 165000 &&
                            nv_encoder->dcb->duallink_possible &&
                            !drm_detect_hdmi_monitor(nv_connector->edid))
-                               proto |= 0x4;
+                               proto = NV507D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS;
                } else {
-                       proto = 0x2;
+                       proto = NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B;
                }
 
                nv50_hdmi_enable(&nv_encoder->base.base, mode);
                break;
        case DCB_OUTPUT_LVDS:
-               proto = 0x0;
+               proto = NV507D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM;
 
                if (bios->fp_no_ddc) {
                        if (bios->fp.dual_link)
@@ -1629,9 +1700,9 @@ nv50_sor_enable(struct drm_encoder *encoder)
                depth = nv50_dp_bpc_to_depth(asyh->or.bpc);
 
                if (nv_encoder->link & 1)
-                       proto = 0x8;
+                       proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_A;
                else
-                       proto = 0x9;
+                       proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_B;
 
                nv50_audio_enable(encoder, mode);
                break;
@@ -1766,8 +1837,9 @@ nv50_pior_disable(struct drm_encoder *encoder)
 {
        struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
        struct nv50_core *core = nv50_disp(encoder->dev)->core;
+       const u32 ctrl = NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, NONE);
        if (nv_encoder->crtc)
-               core->func->pior->ctrl(core, nv_encoder->or, 0x00000000, NULL);
+               core->func->pior->ctrl(core, nv_encoder->or, ctrl, NULL);
        nv_encoder->crtc = NULL;
        nv50_outp_release(nv_encoder);
 }
@@ -1779,29 +1851,36 @@ nv50_pior_enable(struct drm_encoder *encoder)
        struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
        struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
        struct nv50_core *core = nv50_disp(encoder->dev)->core;
-       u8 owner = 1 << nv_crtc->index;
-       u8 proto;
+       u32 ctrl = 0;
+
+       switch (nv_crtc->index) {
+       case 0: ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, HEAD0); break;
+       case 1: ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, HEAD1); break;
+       default:
+               WARN_ON(1);
+               break;
+       }
 
        nv50_outp_acquire(nv_encoder, false);
 
        switch (asyh->or.bpc) {
-       case 10: asyh->or.depth = 0x6; break;
-       case  8: asyh->or.depth = 0x5; break;
-       case  6: asyh->or.depth = 0x2; break;
-       default: asyh->or.depth = 0x0; break;
+       case 10: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_30_444; break;
+       case  8: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_444; break;
+       case  6: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_18_444; break;
+       default: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT; break;
        }
 
        switch (nv_encoder->dcb->type) {
        case DCB_OUTPUT_TMDS:
        case DCB_OUTPUT_DP:
-               proto = 0x0;
+               ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, PROTOCOL, EXT_TMDS_ENC);
                break;
        default:
                BUG();
                break;
        }
 
-       core->func->pior->ctrl(core, nv_encoder->or, (proto << 8) | owner, asyh);
+       core->func->pior->ctrl(core, nv_encoder->or, ctrl, asyh);
        nv_encoder->crtc = encoder->crtc;
 }
 
@@ -2168,8 +2247,10 @@ nv50_disp_atomic_commit(struct drm_device *dev,
        int ret, i;
 
        ret = pm_runtime_get_sync(dev->dev);
-       if (ret < 0 && ret != -EACCES)
+       if (ret < 0 && ret != -EACCES) {
+               pm_runtime_put_autosuspend(dev->dev);
                return ret;
+       }
 
        ret = drm_atomic_helper_setup_commit(state, nonblock);
        if (ret)
@@ -2460,7 +2541,7 @@ nv50_display_destroy(struct drm_device *dev)
        nv50_audio_component_fini(nouveau_drm(dev));
 
        nvif_object_unmap(&disp->caps);
-       nvif_object_fini(&disp->caps);
+       nvif_object_dtor(&disp->caps);
        nv50_core_del(&disp->core);
 
        nouveau_bo_unmap(disp->sync);
index 1968c69..92bddc0 100644 (file)
@@ -2,6 +2,7 @@
 #define __NV50_KMS_H__
 #include <linux/workqueue.h>
 #include <nvif/mem.h>
+#include <nvif/push.h>
 
 #include "nouveau_display.h"
 
@@ -61,7 +62,8 @@ struct nv50_chan {
 struct nv50_dmac {
        struct nv50_chan base;
 
-       struct nvif_mem push;
+       struct nvif_push _push;
+       struct nvif_push *push;
        u32 *ptr;
 
        struct nvif_object sync;
@@ -71,6 +73,10 @@ struct nv50_dmac {
         * grabbed by evo_wait (if the pushbuf reservation is successful) and
         * dropped again by evo_kick. */
        struct mutex lock;
+
+       u32 cur;
+       u32 put;
+       u32 max;
 };
 
 struct nv50_outp_atom {
@@ -106,18 +112,4 @@ void evo_kick(u32 *, struct nv50_dmac *);
 extern const u64 disp50xx_modifiers[];
 extern const u64 disp90xx_modifiers[];
 extern const u64 wndwc57e_modifiers[];
-
-#define evo_mthd(p, m, s) do {                                         \
-       const u32 _m = (m), _s = (s);                                   \
-       if (drm_debug_enabled(DRM_UT_KMS))                              \
-               pr_err("%04x %d %s\n", _m, _s, __func__);               \
-       *((p)++) = ((_s << 18) | _m);                                   \
-} while(0)
-
-#define evo_data(p, d) do {                                            \
-       const u32 _d = (d);                                             \
-       if (drm_debug_enabled(DRM_UT_KMS))                              \
-               pr_err("\t%08x\n", _d);                                 \
-       *((p)++) = _d;                                                  \
-} while(0)
 #endif
index 9a10ec2..841edfa 100644 (file)
@@ -106,9 +106,9 @@ nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
                }
        }
 
-       asyh->dither.enable = mode;
-       asyh->dither.bits = mode >> 1;
-       asyh->dither.mode = mode >> 3;
+       asyh->dither.enable = NVVAL_GET(mode, NV507D, HEAD_SET_DITHER_CONTROL, ENABLE);
+       asyh->dither.bits = NVVAL_GET(mode, NV507D, HEAD_SET_DITHER_CONTROL, BITS);
+       asyh->dither.mode = NVVAL_GET(mode, NV507D, HEAD_SET_DITHER_CONTROL, MODE);
        asyh->set.dither = true;
 }
 
@@ -489,7 +489,7 @@ nv50_head_destroy(struct drm_crtc *crtc)
 {
        struct nv50_head *head = nv50_head(crtc);
 
-       nvif_notify_fini(&head->base.vblank);
+       nvif_notify_dtor(&head->base.vblank);
        nv50_lut_fini(&head->olut);
        drm_crtc_cleanup(crtc);
        kfree(head);
@@ -598,7 +598,7 @@ nv50_head_create(struct drm_device *dev, int index)
                }
        }
 
-       ret = nvif_notify_init(&disp->disp->object, nv50_head_vblank_handler,
+       ret = nvif_notify_ctor(&disp->disp->object, "kmsVbl", nv50_head_vblank_handler,
                               false, NV04_DISP_NTFY_VBLANK,
                               &(struct nvif_notify_head_req_v0) {
                                    .head = nv_crtc->index,
index 30501ad..dae841d 100644 (file)
@@ -25,74 +25,72 @@ void nv50_head_flush_clr(struct nv50_head *head,
                         struct nv50_head_atom *asyh, bool flush);
 
 struct nv50_head_func {
-       void (*view)(struct nv50_head *, struct nv50_head_atom *);
-       void (*mode)(struct nv50_head *, struct nv50_head_atom *);
+       int (*view)(struct nv50_head *, struct nv50_head_atom *);
+       int (*mode)(struct nv50_head *, struct nv50_head_atom *);
        bool (*olut)(struct nv50_head *, struct nv50_head_atom *, int);
        bool olut_identity;
        int  olut_size;
-       void (*olut_set)(struct nv50_head *, struct nv50_head_atom *);
-       void (*olut_clr)(struct nv50_head *);
+       int (*olut_set)(struct nv50_head *, struct nv50_head_atom *);
+       int (*olut_clr)(struct nv50_head *);
        void (*core_calc)(struct nv50_head *, struct nv50_head_atom *);
-       void (*core_set)(struct nv50_head *, struct nv50_head_atom *);
-       void (*core_clr)(struct nv50_head *);
+       int (*core_set)(struct nv50_head *, struct nv50_head_atom *);
+       int (*core_clr)(struct nv50_head *);
        int (*curs_layout)(struct nv50_head *, struct nv50_wndw_atom *,
                           struct nv50_head_atom *);
        int (*curs_format)(struct nv50_head *, struct nv50_wndw_atom *,
                           struct nv50_head_atom *);
-       void (*curs_set)(struct nv50_head *, struct nv50_head_atom *);
-       void (*curs_clr)(struct nv50_head *);
-       void (*base)(struct nv50_head *, struct nv50_head_atom *);
-       void (*ovly)(struct nv50_head *, struct nv50_head_atom *);
-       void (*dither)(struct nv50_head *, struct nv50_head_atom *);
-       void (*procamp)(struct nv50_head *, struct nv50_head_atom *);
-       void (*or)(struct nv50_head *, struct nv50_head_atom *);
+       int (*curs_set)(struct nv50_head *, struct nv50_head_atom *);
+       int (*curs_clr)(struct nv50_head *);
+       int (*base)(struct nv50_head *, struct nv50_head_atom *);
+       int (*ovly)(struct nv50_head *, struct nv50_head_atom *);
+       int (*dither)(struct nv50_head *, struct nv50_head_atom *);
+       int (*procamp)(struct nv50_head *, struct nv50_head_atom *);
+       int (*or)(struct nv50_head *, struct nv50_head_atom *);
        void (*static_wndw_map)(struct nv50_head *, struct nv50_head_atom *);
 };
 
 extern const struct nv50_head_func head507d;
-void head507d_view(struct nv50_head *, struct nv50_head_atom *);
-void head507d_mode(struct nv50_head *, struct nv50_head_atom *);
+int head507d_view(struct nv50_head *, struct nv50_head_atom *);
+int head507d_mode(struct nv50_head *, struct nv50_head_atom *);
 bool head507d_olut(struct nv50_head *, struct nv50_head_atom *, int);
 void head507d_core_calc(struct nv50_head *, struct nv50_head_atom *);
-void head507d_core_clr(struct nv50_head *);
+int head507d_core_clr(struct nv50_head *);
 int head507d_curs_layout(struct nv50_head *, struct nv50_wndw_atom *,
                         struct nv50_head_atom *);
 int head507d_curs_format(struct nv50_head *, struct nv50_wndw_atom *,
                         struct nv50_head_atom *);
-void head507d_base(struct nv50_head *, struct nv50_head_atom *);
-void head507d_ovly(struct nv50_head *, struct nv50_head_atom *);
-void head507d_dither(struct nv50_head *, struct nv50_head_atom *);
-void head507d_procamp(struct nv50_head *, struct nv50_head_atom *);
+int head507d_base(struct nv50_head *, struct nv50_head_atom *);
+int head507d_ovly(struct nv50_head *, struct nv50_head_atom *);
+int head507d_dither(struct nv50_head *, struct nv50_head_atom *);
+int head507d_procamp(struct nv50_head *, struct nv50_head_atom *);
 
 extern const struct nv50_head_func head827d;
 
 extern const struct nv50_head_func head907d;
-void head907d_view(struct nv50_head *, struct nv50_head_atom *);
-void head907d_mode(struct nv50_head *, struct nv50_head_atom *);
+int head907d_view(struct nv50_head *, struct nv50_head_atom *);
+int head907d_mode(struct nv50_head *, struct nv50_head_atom *);
 bool head907d_olut(struct nv50_head *, struct nv50_head_atom *, int);
-void head907d_olut_set(struct nv50_head *, struct nv50_head_atom *);
-void head907d_olut_clr(struct nv50_head *);
-void head907d_core_set(struct nv50_head *, struct nv50_head_atom *);
-void head907d_core_clr(struct nv50_head *);
-void head907d_curs_set(struct nv50_head *, struct nv50_head_atom *);
-void head907d_curs_clr(struct nv50_head *);
-void head907d_ovly(struct nv50_head *, struct nv50_head_atom *);
-void head907d_procamp(struct nv50_head *, struct nv50_head_atom *);
-void head907d_or(struct nv50_head *, struct nv50_head_atom *);
+int head907d_olut_set(struct nv50_head *, struct nv50_head_atom *);
+int head907d_olut_clr(struct nv50_head *);
+int head907d_core_set(struct nv50_head *, struct nv50_head_atom *);
+int head907d_core_clr(struct nv50_head *);
+int head907d_curs_set(struct nv50_head *, struct nv50_head_atom *);
+int head907d_curs_clr(struct nv50_head *);
+int head907d_ovly(struct nv50_head *, struct nv50_head_atom *);
+int head907d_procamp(struct nv50_head *, struct nv50_head_atom *);
+int head907d_or(struct nv50_head *, struct nv50_head_atom *);
 
 extern const struct nv50_head_func head917d;
 int head917d_curs_layout(struct nv50_head *, struct nv50_wndw_atom *,
                         struct nv50_head_atom *);
 
 extern const struct nv50_head_func headc37d;
-void headc37d_view(struct nv50_head *, struct nv50_head_atom *);
-void headc37d_core_set(struct nv50_head *, struct nv50_head_atom *);
-void headc37d_core_clr(struct nv50_head *);
+int headc37d_view(struct nv50_head *, struct nv50_head_atom *);
 int headc37d_curs_format(struct nv50_head *, struct nv50_wndw_atom *,
                         struct nv50_head_atom *);
-void headc37d_curs_set(struct nv50_head *, struct nv50_head_atom *);
-void headc37d_curs_clr(struct nv50_head *);
-void headc37d_dither(struct nv50_head *, struct nv50_head_atom *);
+int headc37d_curs_set(struct nv50_head *, struct nv50_head_atom *);
+int headc37d_curs_clr(struct nv50_head *);
+int headc37d_dither(struct nv50_head *, struct nv50_head_atom *);
 void headc37d_static_wndw_map(struct nv50_head *, struct nv50_head_atom *);
 
 extern const struct nv50_head_func headc57d;
index 66ccf36..0edd4e5 100644 (file)
 #include "head.h"
 #include "core.h"
 
-void
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl507d.h>
+
+int
 head507d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 2))) {
-               evo_mthd(push, 0x08a8 + (head->base.index * 0x400), 1);
-               evo_data(push, asyh->procamp.sat.sin << 20 |
-                              asyh->procamp.sat.cos << 8);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV507D, HEAD_SET_PROCAMP(i),
+                 NVDEF(NV507D, HEAD_SET_PROCAMP, COLOR_SPACE, RGB) |
+                 NVDEF(NV507D, HEAD_SET_PROCAMP, CHROMA_LPF, AUTO) |
+                 NVVAL(NV507D, HEAD_SET_PROCAMP, SAT_COS, asyh->procamp.sat.cos) |
+                 NVVAL(NV507D, HEAD_SET_PROCAMP, SAT_SINE, asyh->procamp.sat.sin) |
+                 NVDEF(NV507D, HEAD_SET_PROCAMP, TRANSITION, HARD));
+       return 0;
 }
 
-void
+int
 head507d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 2))) {
-               evo_mthd(push, 0x08a0 + (head->base.index * 0x0400), 1);
-               evo_data(push, asyh->dither.mode << 3 |
-                              asyh->dither.bits << 1 |
-                              asyh->dither.enable);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV507D, HEAD_SET_DITHER_CONTROL(i),
+                 NVVAL(NV507D, HEAD_SET_DITHER_CONTROL, ENABLE, asyh->dither.enable) |
+                 NVVAL(NV507D, HEAD_SET_DITHER_CONTROL, BITS, asyh->dither.bits) |
+                 NVVAL(NV507D, HEAD_SET_DITHER_CONTROL, MODE, asyh->dither.mode) |
+                 NVVAL(NV507D, HEAD_SET_DITHER_CONTROL, PHASE, 0));
+       return 0;
 }
 
-void
+int
 head507d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
        u32 bounds = 0;
-       u32 *push;
+       int ret;
 
        if (asyh->ovly.cpp) {
                switch (asyh->ovly.cpp) {
-               case 4: bounds |= 0x00000300; break;
-               case 2: bounds |= 0x00000100; break;
+               case 4: bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_32); break;
+               case 2: bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16); break;
                default:
                        WARN_ON(1);
                        break;
                }
-               bounds |= 0x00000001;
+               bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, USABLE, TRUE);
        } else {
-               bounds |= 0x00000100;
+               bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16);
        }
 
-       if ((push = evo_wait(core, 2))) {
-               evo_mthd(push, 0x0904 + head->base.index * 0x400, 1);
-               evo_data(push, bounds);
-               evo_kick(push, core);
-       }
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(i), bounds);
+       return 0;
 }
 
-void
+int
 head507d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
        u32 bounds = 0;
-       u32 *push;
+       int ret;
 
        if (asyh->base.cpp) {
                switch (asyh->base.cpp) {
-               case 8: bounds |= 0x00000500; break;
-               case 4: bounds |= 0x00000300; break;
-               case 2: bounds |= 0x00000100; break;
-               case 1: bounds |= 0x00000000; break;
+               case 8: bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_64); break;
+               case 4: bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_32); break;
+               case 2: bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16); break;
+               case 1: bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_8); break;
                default:
                        WARN_ON(1);
                        break;
                }
-               bounds |= 0x00000001;
+               bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, USABLE, TRUE);
        }
 
-       if ((push = evo_wait(core, 2))) {
-               evo_mthd(push, 0x0900 + head->base.index * 0x400, 1);
-               evo_data(push, bounds);
-               evo_kick(push, core);
-       }
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(i), bounds);
+       return 0;
 }
 
-static void
+static int
 head507d_curs_clr(struct nv50_head *head)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 2))) {
-               evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
-               evo_data(push, 0x05000000);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV507D, HEAD_SET_CONTROL_CURSOR(i),
+                 NVDEF(NV507D, HEAD_SET_CONTROL_CURSOR, ENABLE, DISABLE) |
+                 NVDEF(NV507D, HEAD_SET_CONTROL_CURSOR, FORMAT, A8R8G8B8) |
+                 NVDEF(NV507D, HEAD_SET_CONTROL_CURSOR, SIZE, W64_H64));
+       return 0;
 }
 
-static void
+static int
 head507d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 3))) {
-               evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
-               evo_data(push, 0x80000000 | asyh->curs.layout << 26 |
-                                           asyh->curs.format << 24);
-               evo_data(push, asyh->curs.offset >> 8);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 3)))
+               return ret;
+
+       PUSH_MTHD(push, NV507D, HEAD_SET_CONTROL_CURSOR(i),
+                 NVDEF(NV507D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
+                 NVVAL(NV507D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
+                 NVVAL(NV507D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
+                 NVVAL(NV507D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
+                 NVVAL(NV507D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
+                 NVDEF(NV507D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND) |
+                 NVDEF(NV507D, HEAD_SET_CONTROL_CURSOR, SUB_OWNER, NONE),
+
+                               HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8);
+       return 0;
 }
 
 int
@@ -134,7 +164,7 @@ head507d_curs_format(struct nv50_head *head, struct nv50_wndw_atom *asyw,
                     struct nv50_head_atom *asyh)
 {
        switch (asyw->image.format) {
-       case 0xcf: asyh->curs.format = 1; break;
+       case 0xcf: asyh->curs.format = NV507D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8; break;
        default:
                WARN_ON(1);
                return -EINVAL;
@@ -147,54 +177,70 @@ head507d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
                     struct nv50_head_atom *asyh)
 {
        switch (asyw->image.w) {
-       case 32: asyh->curs.layout = 0; break;
-       case 64: asyh->curs.layout = 1; break;
+       case 32: asyh->curs.layout = NV507D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32; break;
+       case 64: asyh->curs.layout = NV507D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64; break;
        default:
                return -EINVAL;
        }
        return 0;
 }
 
-void
+int
 head507d_core_clr(struct nv50_head *head)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 2))) {
-               evo_mthd(push, 0x0874 + head->base.index * 0x400, 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV507D, HEAD_SET_CONTEXT_DMA_ISO(i), 0x00000000);
+       return 0;
 }
 
-static void
+static int
 head507d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 9))) {
-               evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
-               evo_data(push, asyh->core.offset >> 8);
-               evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
-               evo_data(push, asyh->core.h << 16 | asyh->core.w);
-               evo_data(push, asyh->core.layout << 20 |
-                              (asyh->core.pitch >> 8) << 8 |
-                              asyh->core.blocks << 8 |
-                              asyh->core.blockh);
-               evo_data(push, asyh->core.kind << 16 |
-                              asyh->core.format << 8);
-               evo_data(push, asyh->core.handle);
-               evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
-               evo_data(push, asyh->core.y << 16 | asyh->core.x);
-               evo_kick(push, core);
-
-               /* EVO will complain with INVALID_STATE if we have an
-                * active cursor and (re)specify HeadSetContextDmaIso
-                * without also updating HeadSetOffsetCursor.
-                */
-               asyh->set.curs = asyh->curs.visible;
-               asyh->set.olut = asyh->olut.handle != 0;
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 9)))
+               return ret;
+
+       PUSH_MTHD(push, NV507D, HEAD_SET_OFFSET(i, 0),
+                 NVVAL(NV507D, HEAD_SET_OFFSET, ORIGIN, asyh->core.offset >> 8));
+
+       PUSH_MTHD(push, NV507D, HEAD_SET_SIZE(i),
+                 NVVAL(NV507D, HEAD_SET_SIZE, WIDTH, asyh->core.w) |
+                 NVVAL(NV507D, HEAD_SET_SIZE, HEIGHT, asyh->core.h),
+
+                               HEAD_SET_STORAGE(i),
+                 NVVAL(NV507D, HEAD_SET_STORAGE, BLOCK_HEIGHT, asyh->core.blockh) |
+                 NVVAL(NV507D, HEAD_SET_STORAGE, PITCH, asyh->core.pitch >> 8) |
+                 NVVAL(NV507D, HEAD_SET_STORAGE, PITCH, asyh->core.blocks) |
+                 NVVAL(NV507D, HEAD_SET_STORAGE, MEMORY_LAYOUT, asyh->core.layout),
+
+                               HEAD_SET_PARAMS(i),
+                 NVVAL(NV507D, HEAD_SET_PARAMS, FORMAT, asyh->core.format) |
+                 NVVAL(NV507D, HEAD_SET_PARAMS, KIND, asyh->core.kind) |
+                 NVDEF(NV507D, HEAD_SET_PARAMS, PART_STRIDE, PARTSTRIDE_256),
+
+                               HEAD_SET_CONTEXT_DMA_ISO(i),
+                 NVVAL(NV507D, HEAD_SET_CONTEXT_DMA_ISO, HANDLE, asyh->core.handle));
+
+       PUSH_MTHD(push, NV507D, HEAD_SET_VIEWPORT_POINT_IN(i, 0),
+                 NVVAL(NV507D, HEAD_SET_VIEWPORT_POINT_IN, X, asyh->core.x) |
+                 NVVAL(NV507D, HEAD_SET_VIEWPORT_POINT_IN, Y, asyh->core.y));
+
+       /* EVO will complain with INVALID_STATE if we have an
+        * active cursor and (re)specify HeadSetContextDmaIso
+        * without also updating HeadSetOffsetCursor.
+        */
+       asyh->set.curs = asyh->curs.visible;
+       asyh->set.olut = asyh->olut.handle != 0;
+       return 0;
 }
 
 void
@@ -221,37 +267,47 @@ head507d_core_calc(struct nv50_head *head, struct nv50_head_atom *asyh)
        }
        asyh->core.handle = disp->core->chan.vram.handle;
        asyh->core.offset = 0;
-       asyh->core.format = 0xcf;
-       asyh->core.kind = 0;
-       asyh->core.layout = 1;
-       asyh->core.blockh = 0;
+       asyh->core.format = NV507D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8;
+       asyh->core.kind = NV507D_HEAD_SET_PARAMS_KIND_KIND_PITCH;
+       asyh->core.layout = NV507D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH;
+       asyh->core.blockh = NV507D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB;
        asyh->core.blocks = 0;
        asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
 }
 
-static void
+static int
 head507d_olut_clr(struct nv50_head *head)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 2))) {
-               evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV507D, HEAD_SET_BASE_LUT_LO(i),
+                 NVDEF(NV507D, HEAD_SET_BASE_LUT_LO, ENABLE, DISABLE));
+       return 0;
 }
 
-static void
+static int
 head507d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 3))) {
-               evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
-               evo_data(push, 0x80000000 | asyh->olut.mode << 30);
-               evo_data(push, asyh->olut.offset >> 8);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 3)))
+               return ret;
+
+       PUSH_MTHD(push, NV507D, HEAD_SET_BASE_LUT_LO(i),
+                 NVDEF(NV507D, HEAD_SET_BASE_LUT_LO, ENABLE, ENABLE) |
+                 NVVAL(NV507D, HEAD_SET_BASE_LUT_LO, MODE, asyh->olut.mode) |
+                 NVVAL(NV507D, HEAD_SET_BASE_LUT_LO, ORIGIN, 0),
+
+                               HEAD_SET_BASE_LUT_HI(i),
+                 NVVAL(NV507D, HEAD_SET_BASE_LUT_HI, ORIGIN, asyh->olut.offset >> 8));
+       return 0;
 }
 
 static void
@@ -278,53 +334,97 @@ head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
                return false;
 
        if (asyh->base.cpp == 1)
-               asyh->olut.mode = 0;
+               asyh->olut.mode = NV507D_HEAD_SET_BASE_LUT_LO_MODE_LORES;
        else
-               asyh->olut.mode = 1;
+               asyh->olut.mode = NV507D_HEAD_SET_BASE_LUT_LO_MODE_HIRES;
 
        asyh->olut.load = head507d_olut_load;
        return true;
 }
 
-void
+int
 head507d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
        struct nv50_head_mode *m = &asyh->mode;
-       u32 *push;
-       if ((push = evo_wait(core, 13))) {
-               evo_mthd(push, 0x0804 + (head->base.index * 0x400), 2);
-               evo_data(push, 0x00800000 | m->clock);
-               evo_data(push, m->interlace ? 0x00000002 : 0x00000000);
-               evo_mthd(push, 0x0810 + (head->base.index * 0x400), 7);
-               evo_data(push, 0x00000000);
-               evo_data(push, m->v.active  << 16 | m->h.active );
-               evo_data(push, m->v.synce   << 16 | m->h.synce  );
-               evo_data(push, m->v.blanke  << 16 | m->h.blanke );
-               evo_data(push, m->v.blanks  << 16 | m->h.blanks );
-               evo_data(push, m->v.blank2e << 16 | m->v.blank2s);
-               evo_data(push, asyh->mode.v.blankus);
-               evo_mthd(push, 0x082c + (head->base.index * 0x400), 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, core);
-       }
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 13)))
+               return ret;
+
+       PUSH_MTHD(push, NV507D, HEAD_SET_PIXEL_CLOCK(i),
+                 NVVAL(NV507D, HEAD_SET_PIXEL_CLOCK, FREQUENCY, m->clock) |
+                 NVDEF(NV507D, HEAD_SET_PIXEL_CLOCK, MODE, CLK_CUSTOM) |
+                 NVDEF(NV507D, HEAD_SET_PIXEL_CLOCK, ADJ1000DIV1001, FALSE) |
+                 NVDEF(NV507D, HEAD_SET_PIXEL_CLOCK, NOT_DRIVER, FALSE),
+
+                               HEAD_SET_CONTROL(i),
+                 NVVAL(NV507D, HEAD_SET_CONTROL, STRUCTURE, m->interlace));
+
+       PUSH_MTHD(push, NV507D, HEAD_SET_OVERSCAN_COLOR(i),
+                 NVVAL(NV507D, HEAD_SET_OVERSCAN_COLOR, RED, 0) |
+                 NVVAL(NV507D, HEAD_SET_OVERSCAN_COLOR, GRN, 0) |
+                 NVVAL(NV507D, HEAD_SET_OVERSCAN_COLOR, BLU, 0),
+
+                               HEAD_SET_RASTER_SIZE(i),
+                 NVVAL(NV507D, HEAD_SET_RASTER_SIZE, WIDTH, m->h.active) |
+                 NVVAL(NV507D, HEAD_SET_RASTER_SIZE, HEIGHT, m->v.active),
+
+                               HEAD_SET_RASTER_SYNC_END(i),
+                 NVVAL(NV507D, HEAD_SET_RASTER_SYNC_END, X, m->h.synce) |
+                 NVVAL(NV507D, HEAD_SET_RASTER_SYNC_END, Y, m->v.synce),
+
+                               HEAD_SET_RASTER_BLANK_END(i),
+                 NVVAL(NV507D, HEAD_SET_RASTER_BLANK_END, X, m->h.blanke) |
+                 NVVAL(NV507D, HEAD_SET_RASTER_BLANK_END, Y, m->v.blanke),
+
+                               HEAD_SET_RASTER_BLANK_START(i),
+                 NVVAL(NV507D, HEAD_SET_RASTER_BLANK_START, X, m->h.blanks) |
+                 NVVAL(NV507D, HEAD_SET_RASTER_BLANK_START, Y, m->v.blanks),
+
+                               HEAD_SET_RASTER_VERT_BLANK2(i),
+                 NVVAL(NV507D, HEAD_SET_RASTER_VERT_BLANK2, YSTART, m->v.blank2s) |
+                 NVVAL(NV507D, HEAD_SET_RASTER_VERT_BLANK2, YEND, m->v.blank2e),
+
+                               HEAD_SET_RASTER_VERT_BLANK_DMI(i),
+                 NVVAL(NV507D, HEAD_SET_RASTER_VERT_BLANK_DMI, DURATION, m->v.blankus));
+
+       PUSH_MTHD(push, NV507D, HEAD_SET_DEFAULT_BASE_COLOR(i),
+                 NVVAL(NV507D, HEAD_SET_DEFAULT_BASE_COLOR, RED, 0) |
+                 NVVAL(NV507D, HEAD_SET_DEFAULT_BASE_COLOR, GREEN, 0) |
+                 NVVAL(NV507D, HEAD_SET_DEFAULT_BASE_COLOR, BLUE, 0));
+       return 0;
 }
 
-void
+int
 head507d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 7))) {
-               evo_mthd(push, 0x08a4 + (head->base.index * 0x400), 1);
-               evo_data(push, 0x00000000);
-               evo_mthd(push, 0x08c8 + (head->base.index * 0x400), 1);
-               evo_data(push, asyh->view.iH << 16 | asyh->view.iW);
-               evo_mthd(push, 0x08d8 + (head->base.index * 0x400), 2);
-               evo_data(push, asyh->view.oH << 16 | asyh->view.oW);
-               evo_data(push, asyh->view.oH << 16 | asyh->view.oW);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 7)))
+               return ret;
+
+       PUSH_MTHD(push, NV507D, HEAD_SET_CONTROL_OUTPUT_SCALER(i),
+                 NVDEF(NV507D, HEAD_SET_CONTROL_OUTPUT_SCALER, VERTICAL_TAPS, TAPS_1) |
+                 NVDEF(NV507D, HEAD_SET_CONTROL_OUTPUT_SCALER, HORIZONTAL_TAPS, TAPS_1) |
+                 NVVAL(NV507D, HEAD_SET_CONTROL_OUTPUT_SCALER, HRESPONSE_BIAS, 0) |
+                 NVVAL(NV507D, HEAD_SET_CONTROL_OUTPUT_SCALER, VRESPONSE_BIAS, 0));
+
+       PUSH_MTHD(push, NV507D, HEAD_SET_VIEWPORT_SIZE_IN(i),
+                 NVVAL(NV507D, HEAD_SET_VIEWPORT_SIZE_IN, WIDTH, asyh->view.iW) |
+                 NVVAL(NV507D, HEAD_SET_VIEWPORT_SIZE_IN, HEIGHT, asyh->view.iH));
+
+       PUSH_MTHD(push, NV507D, HEAD_SET_VIEWPORT_SIZE_OUT(i),
+                 NVVAL(NV507D, HEAD_SET_VIEWPORT_SIZE_OUT, WIDTH, asyh->view.oW) |
+                 NVVAL(NV507D, HEAD_SET_VIEWPORT_SIZE_OUT, HEIGHT, asyh->view.oH),
+
+                               HEAD_SET_VIEWPORT_SIZE_OUT_MIN(i),
+                 NVVAL(NV507D, HEAD_SET_VIEWPORT_SIZE_OUT_MIN, WIDTH, asyh->view.oW) |
+                 NVVAL(NV507D, HEAD_SET_VIEWPORT_SIZE_OUT_MIN, HEIGHT, asyh->view.oH));
+       return 0;
 }
 
 const struct nv50_head_func
index 1187711..194d177 100644 (file)
 #include "head.h"
 #include "core.h"
 
-static void
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl827d.h>
+
+static int
 head827d_curs_clr(struct nv50_head *head)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 4))) {
-               evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
-               evo_data(push, 0x05000000);
-               evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 4)))
+               return ret;
+
+       PUSH_MTHD(push, NV827D, HEAD_SET_CONTROL_CURSOR(i),
+                 NVDEF(NV827D, HEAD_SET_CONTROL_CURSOR, ENABLE, DISABLE) |
+                 NVDEF(NV827D, HEAD_SET_CONTROL_CURSOR, FORMAT, A8R8G8B8) |
+                 NVDEF(NV827D, HEAD_SET_CONTROL_CURSOR, SIZE, W64_H64));
+
+       PUSH_MTHD(push, NV827D, HEAD_SET_CONTEXT_DMA_CURSOR(i), 0x00000000);
+       return 0;
 }
 
-static void
+static int
 head827d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 5))) {
-               evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
-               evo_data(push, 0x80000000 | asyh->curs.layout << 26 |
-                                           asyh->curs.format << 24);
-               evo_data(push, asyh->curs.offset >> 8);
-               evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
-               evo_data(push, asyh->curs.handle);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 5)))
+               return ret;
+
+       PUSH_MTHD(push, NV827D, HEAD_SET_CONTROL_CURSOR(i),
+                 NVDEF(NV827D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
+                 NVVAL(NV827D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
+                 NVVAL(NV827D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
+                 NVVAL(NV827D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
+                 NVVAL(NV827D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
+                 NVDEF(NV827D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND) |
+                 NVDEF(NV827D, HEAD_SET_CONTROL_CURSOR, SUB_OWNER, NONE),
+
+                               HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8);
+
+       PUSH_MTHD(push, NV827D, HEAD_SET_CONTEXT_DMA_CURSOR(i), asyh->curs.handle);
+       return 0;
 }
 
-static void
+static int
 head827d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 9))) {
-               evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
-               evo_data(push, asyh->core.offset >> 8);
-               evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
-               evo_data(push, asyh->core.h << 16 | asyh->core.w);
-               evo_data(push, asyh->core.layout << 20 |
-                              (asyh->core.pitch >> 8) << 8 |
-                              asyh->core.blocks << 8 |
-                              asyh->core.blockh);
-               evo_data(push, asyh->core.format << 8);
-               evo_data(push, asyh->core.handle);
-               evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
-               evo_data(push, asyh->core.y << 16 | asyh->core.x);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 9)))
+               return ret;
+
+       PUSH_MTHD(push, NV827D, HEAD_SET_OFFSET(i, 0),
+                 NVVAL(NV827D, HEAD_SET_OFFSET, ORIGIN, asyh->core.offset >> 8));
+
+       PUSH_MTHD(push, NV827D, HEAD_SET_SIZE(i),
+                 NVVAL(NV827D, HEAD_SET_SIZE, WIDTH, asyh->core.w) |
+                 NVVAL(NV827D, HEAD_SET_SIZE, HEIGHT, asyh->core.h),
+
+                               HEAD_SET_STORAGE(i),
+                 NVVAL(NV827D, HEAD_SET_STORAGE, BLOCK_HEIGHT, asyh->core.blockh) |
+                 NVVAL(NV827D, HEAD_SET_STORAGE, PITCH, asyh->core.pitch >> 8) |
+                 NVVAL(NV827D, HEAD_SET_STORAGE, PITCH, asyh->core.blocks) |
+                 NVVAL(NV827D, HEAD_SET_STORAGE, MEMORY_LAYOUT, asyh->core.layout),
+
+                               HEAD_SET_PARAMS(i),
+                 NVVAL(NV827D, HEAD_SET_PARAMS, FORMAT, asyh->core.format) |
+                 NVDEF(NV827D, HEAD_SET_PARAMS, SUPER_SAMPLE, X1_AA) |
+                 NVDEF(NV827D, HEAD_SET_PARAMS, GAMMA, LINEAR),
+
+                               HEAD_SET_CONTEXT_DMAS_ISO(i, 0),
+                 NVVAL(NV827D, HEAD_SET_CONTEXT_DMAS_ISO, HANDLE, asyh->core.handle));
+
+       PUSH_MTHD(push, NV827D, HEAD_SET_VIEWPORT_POINT_IN(i, 0),
+                 NVVAL(NV827D, HEAD_SET_VIEWPORT_POINT_IN, X, asyh->core.x) |
+                 NVVAL(NV827D, HEAD_SET_VIEWPORT_POINT_IN, Y, asyh->core.y));
+       return 0;
 }
 
-static void
+static int
 head827d_olut_clr(struct nv50_head *head)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 4))) {
-               evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
-               evo_data(push, 0x00000000);
-               evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 4)))
+               return ret;
+
+       PUSH_MTHD(push, NV827D, HEAD_SET_BASE_LUT_LO(i),
+                 NVDEF(NV827D, HEAD_SET_BASE_LUT_LO, ENABLE, DISABLE));
+
+       PUSH_MTHD(push, NV827D, HEAD_SET_CONTEXT_DMA_LUT(i), 0x00000000);
+       return 0;
 }
 
-static void
+static int
 head827d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 5))) {
-               evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
-               evo_data(push, 0x80000000 | asyh->olut.mode << 30);
-               evo_data(push, asyh->olut.offset >> 8);
-               evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
-               evo_data(push, asyh->olut.handle);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 5)))
+               return ret;
+
+       PUSH_MTHD(push, NV827D, HEAD_SET_BASE_LUT_LO(i),
+                 NVDEF(NV827D, HEAD_SET_BASE_LUT_LO, ENABLE, ENABLE) |
+                 NVVAL(NV827D, HEAD_SET_BASE_LUT_LO, MODE, asyh->olut.mode) |
+                 NVVAL(NV827D, HEAD_SET_BASE_LUT_LO, ORIGIN, 0),
+
+                               HEAD_SET_BASE_LUT_HI(i),
+                 NVVAL(NV827D, HEAD_SET_BASE_LUT_HI, ORIGIN, asyh->olut.offset >> 8));
+
+       PUSH_MTHD(push, NV827D, HEAD_SET_CONTEXT_DMA_LUT(i), asyh->olut.handle);
+       return 0;
 }
 
 const struct nv50_head_func
index 63a0b45..8f860e9 100644 (file)
 #include "core.h"
 #include "crc.h"
 
-void
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl907d.h>
+
+int
 head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 3))) {
-               evo_mthd(push, 0x0404 + (head->base.index * 0x300), 2);
-               evo_data(push, asyh->or.depth  << 6 |
-                              asyh->or.nvsync << 4 |
-                              asyh->or.nhsync << 3 |
-                              asyh->or.crc_raster);
-               evo_data(push, 0x31ec6000 | head->base.index << 25 |
-                                           asyh->mode.interlace);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 3)))
+               return ret;
+
+       PUSH_MTHD(push, NV907D, HEAD_SET_CONTROL_OUTPUT_RESOURCE(i),
+                 NVVAL(NV907D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, CRC_MODE, asyh->or.crc_raster) |
+                 NVVAL(NV907D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, HSYNC_POLARITY, asyh->or.nhsync) |
+                 NVVAL(NV907D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, VSYNC_POLARITY, asyh->or.nvsync) |
+                 NVVAL(NV907D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, PIXEL_DEPTH, asyh->or.depth),
+
+                               HEAD_SET_CONTROL(i), 0x31ec6000 | head->base.index << 25 |
+                 NVVAL(NV907D, HEAD_SET_CONTROL, STRUCTURE, asyh->mode.interlace));
+       return 0;
 }
 
-void
+int
 head907d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 2))) {
-               evo_mthd(push, 0x0498 + (head->base.index * 0x300), 1);
-               evo_data(push, asyh->procamp.sat.sin << 20 |
-                              asyh->procamp.sat.cos << 8);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV907D, HEAD_SET_PROCAMP(i),
+                 NVDEF(NV907D, HEAD_SET_PROCAMP, COLOR_SPACE, RGB) |
+                 NVDEF(NV907D, HEAD_SET_PROCAMP, CHROMA_LPF, AUTO) |
+                 NVVAL(NV907D, HEAD_SET_PROCAMP, SAT_COS, asyh->procamp.sat.cos) |
+                 NVVAL(NV907D, HEAD_SET_PROCAMP, SAT_SINE, asyh->procamp.sat.sin) |
+                 NVDEF(NV907D, HEAD_SET_PROCAMP, DYNAMIC_RANGE, VESA) |
+                 NVDEF(NV907D, HEAD_SET_PROCAMP, RANGE_COMPRESSION, DISABLE));
+       return 0;
 }
 
-static void
+static int
 head907d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 2))) {
-               evo_mthd(push, 0x0490 + (head->base.index * 0x0300), 1);
-               evo_data(push, asyh->dither.mode << 3 |
-                              asyh->dither.bits << 1 |
-                              asyh->dither.enable);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV907D, HEAD_SET_DITHER_CONTROL(i),
+                 NVVAL(NV907D, HEAD_SET_DITHER_CONTROL, ENABLE, asyh->dither.enable) |
+                 NVVAL(NV907D, HEAD_SET_DITHER_CONTROL, BITS, asyh->dither.bits) |
+                 NVVAL(NV907D, HEAD_SET_DITHER_CONTROL, MODE, asyh->dither.mode) |
+                 NVVAL(NV907D, HEAD_SET_DITHER_CONTROL, PHASE, 0));
+       return 0;
 }
 
-void
+int
 head907d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
        u32 bounds = 0;
-       u32 *push;
+       int ret;
 
        if (asyh->ovly.cpp) {
                switch (asyh->ovly.cpp) {
-               case 8: bounds |= 0x00000500; break;
-               case 4: bounds |= 0x00000300; break;
-               case 2: bounds |= 0x00000100; break;
+               case 8: bounds |= NVDEF(NV907D, HEAD_SET_OVERLAY_USAGE_BOUNDS, PIXEL_DEPTH, BPP_64); break;
+               case 4: bounds |= NVDEF(NV907D, HEAD_SET_OVERLAY_USAGE_BOUNDS, PIXEL_DEPTH, BPP_32); break;
+               case 2: bounds |= NVDEF(NV907D, HEAD_SET_OVERLAY_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16); break;
                default:
                        WARN_ON(1);
                        break;
                }
-               bounds |= 0x00000001;
+               bounds |= NVDEF(NV907D, HEAD_SET_OVERLAY_USAGE_BOUNDS, USABLE, TRUE);
        } else {
-               bounds |= 0x00000100;
+               bounds |= NVDEF(NV907D, HEAD_SET_OVERLAY_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16);
        }
 
-       if ((push = evo_wait(core, 2))) {
-               evo_mthd(push, 0x04d4 + head->base.index * 0x300, 1);
-               evo_data(push, bounds);
-               evo_kick(push, core);
-       }
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV907D, HEAD_SET_OVERLAY_USAGE_BOUNDS(i), bounds);
+       return 0;
 }
 
-static void
+static int
 head907d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
        u32 bounds = 0;
-       u32 *push;
+       int ret;
 
        if (asyh->base.cpp) {
                switch (asyh->base.cpp) {
-               case 8: bounds |= 0x00000500; break;
-               case 4: bounds |= 0x00000300; break;
-               case 2: bounds |= 0x00000100; break;
-               case 1: bounds |= 0x00000000; break;
+               case 8: bounds |= NVDEF(NV907D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_64); break;
+               case 4: bounds |= NVDEF(NV907D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_32); break;
+               case 2: bounds |= NVDEF(NV907D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16); break;
+               case 1: bounds |= NVDEF(NV907D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_8); break;
                default:
                        WARN_ON(1);
                        break;
                }
-               bounds |= 0x00000001;
+               bounds |= NVDEF(NV907D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, USABLE, TRUE);
        }
 
-       if ((push = evo_wait(core, 2))) {
-               evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1);
-               evo_data(push, bounds);
-               evo_kick(push, core);
-       }
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV907D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(i), bounds);
+       return 0;
 }
 
-void
+int
 head907d_curs_clr(struct nv50_head *head)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 4))) {
-               evo_mthd(push, 0x0480 + head->base.index * 0x300, 1);
-               evo_data(push, 0x05000000);
-               evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 4)))
+               return ret;
+
+       PUSH_MTHD(push, NV907D, HEAD_SET_CONTROL_CURSOR(i),
+                 NVDEF(NV907D, HEAD_SET_CONTROL_CURSOR, ENABLE, DISABLE) |
+                 NVDEF(NV907D, HEAD_SET_CONTROL_CURSOR, FORMAT, A8R8G8B8) |
+                 NVDEF(NV907D, HEAD_SET_CONTROL_CURSOR, SIZE, W64_H64));
+
+       PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_CURSOR(i), 0x00000000);
+       return 0;
 }
 
-void
+int
 head907d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 5))) {
-               evo_mthd(push, 0x0480 + head->base.index * 0x300, 2);
-               evo_data(push, 0x80000000 | asyh->curs.layout << 26 |
-                                           asyh->curs.format << 24);
-               evo_data(push, asyh->curs.offset >> 8);
-               evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
-               evo_data(push, asyh->curs.handle);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 5)))
+               return ret;
+
+       PUSH_MTHD(push, NV907D, HEAD_SET_CONTROL_CURSOR(i),
+                 NVDEF(NV907D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
+                 NVVAL(NV907D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
+                 NVVAL(NV907D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
+                 NVVAL(NV907D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
+                 NVVAL(NV907D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
+                 NVDEF(NV907D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND),
+
+                               HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8);
+
+       PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_CURSOR(i), asyh->curs.handle);
+       return 0;
 }
 
-void
+int
 head907d_core_clr(struct nv50_head *head)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 2))) {
-               evo_mthd(push, 0x0474 + head->base.index * 0x300, 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMAS_ISO(i), 0x00000000);
+       return 0;
 }
 
-void
+int
 head907d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 9))) {
-               evo_mthd(push, 0x0460 + head->base.index * 0x300, 1);
-               evo_data(push, asyh->core.offset >> 8);
-               evo_mthd(push, 0x0468 + head->base.index * 0x300, 4);
-               evo_data(push, asyh->core.h << 16 | asyh->core.w);
-               evo_data(push, asyh->core.layout << 24 |
-                              (asyh->core.pitch >> 8) << 8 |
-                              asyh->core.blocks << 8 |
-                              asyh->core.blockh);
-               evo_data(push, asyh->core.format << 8);
-               evo_data(push, asyh->core.handle);
-               evo_mthd(push, 0x04b0 + head->base.index * 0x300, 1);
-               evo_data(push, asyh->core.y << 16 | asyh->core.x);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 9)))
+               return ret;
+
+       PUSH_MTHD(push, NV907D, HEAD_SET_OFFSET(i),
+                 NVVAL(NV907D, HEAD_SET_OFFSET, ORIGIN, asyh->core.offset >> 8));
+
+       PUSH_MTHD(push, NV907D, HEAD_SET_SIZE(i),
+                 NVVAL(NV907D, HEAD_SET_SIZE, WIDTH, asyh->core.w) |
+                 NVVAL(NV907D, HEAD_SET_SIZE, HEIGHT, asyh->core.h),
+
+                               HEAD_SET_STORAGE(i),
+                 NVVAL(NV907D, HEAD_SET_STORAGE, BLOCK_HEIGHT, asyh->core.blockh) |
+                 NVVAL(NV907D, HEAD_SET_STORAGE, PITCH, asyh->core.pitch >> 8) |
+                 NVVAL(NV907D, HEAD_SET_STORAGE, PITCH, asyh->core.blocks) |
+                 NVVAL(NV907D, HEAD_SET_STORAGE, MEMORY_LAYOUT, asyh->core.layout),
+
+                               HEAD_SET_PARAMS(i),
+                 NVVAL(NV907D, HEAD_SET_PARAMS, FORMAT, asyh->core.format) |
+                 NVDEF(NV907D, HEAD_SET_PARAMS, SUPER_SAMPLE, X1_AA) |
+                 NVDEF(NV907D, HEAD_SET_PARAMS, GAMMA, LINEAR),
+
+                               HEAD_SET_CONTEXT_DMAS_ISO(i),
+                 NVVAL(NV907D, HEAD_SET_CONTEXT_DMAS_ISO, HANDLE, asyh->core.handle));
+
+       PUSH_MTHD(push, NV907D, HEAD_SET_VIEWPORT_POINT_IN(i),
+                 NVVAL(NV907D, HEAD_SET_VIEWPORT_POINT_IN, X, asyh->core.x) |
+                 NVVAL(NV907D, HEAD_SET_VIEWPORT_POINT_IN, Y, asyh->core.y));
+       return 0;
 }
 
-void
+int
 head907d_olut_clr(struct nv50_head *head)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 4))) {
-               evo_mthd(push, 0x0448 + (head->base.index * 0x300), 1);
-               evo_data(push, 0x00000000);
-               evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 4)))
+               return ret;
+
+       PUSH_MTHD(push, NV907D, HEAD_SET_OUTPUT_LUT_LO(i),
+                 NVDEF(NV907D, HEAD_SET_OUTPUT_LUT_LO, ENABLE, DISABLE));
+
+       PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_LUT(i), 0x00000000);
+       return 0;
 }
 
-void
+int
 head907d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 5))) {
-               evo_mthd(push, 0x0448 + (head->base.index * 0x300), 2);
-               evo_data(push, 0x80000000 | asyh->olut.mode << 24);
-               evo_data(push, asyh->olut.offset >> 8);
-               evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
-               evo_data(push, asyh->olut.handle);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 5)))
+               return ret;
+
+       PUSH_MTHD(push, NV907D, HEAD_SET_OUTPUT_LUT_LO(i),
+                 NVDEF(NV907D, HEAD_SET_OUTPUT_LUT_LO, ENABLE, ENABLE) |
+                 NVVAL(NV907D, HEAD_SET_OUTPUT_LUT_LO, MODE, asyh->olut.mode) |
+                 NVDEF(NV907D, HEAD_SET_OUTPUT_LUT_LO, NEVER_YIELD_TO_BASE, DISABLE),
+
+                               HEAD_SET_OUTPUT_LUT_HI(i),
+                 NVVAL(NV907D, HEAD_SET_OUTPUT_LUT_HI, ORIGIN, asyh->olut.offset >> 8));
+
+       PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_LUT(i), asyh->olut.handle);
+       return 0;
 }
 
 void
@@ -244,52 +305,110 @@ head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
        if (size != 256 && size != 1024)
                return false;
 
-       asyh->olut.mode = size == 1024 ? 4 : 7;
+       if (size == 1024)
+               asyh->olut.mode = NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE;
+       else
+               asyh->olut.mode = NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE;
+
        asyh->olut.load = head907d_olut_load;
        return true;
 }
 
-void
+int
 head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
        struct nv50_head_mode *m = &asyh->mode;
-       u32 *push;
-       if ((push = evo_wait(core, 14))) {
-               evo_mthd(push, 0x0410 + (head->base.index * 0x300), 6);
-               evo_data(push, 0x00000000);
-               evo_data(push, m->v.active  << 16 | m->h.active );
-               evo_data(push, m->v.synce   << 16 | m->h.synce  );
-               evo_data(push, m->v.blanke  << 16 | m->h.blanke );
-               evo_data(push, m->v.blanks  << 16 | m->h.blanks );
-               evo_data(push, m->v.blank2e << 16 | m->v.blank2s);
-               evo_mthd(push, 0x042c + (head->base.index * 0x300), 2);
-               evo_data(push, 0x00000000); /* ??? */
-               evo_data(push, 0xffffff00);
-               evo_mthd(push, 0x0450 + (head->base.index * 0x300), 3);
-               evo_data(push, m->clock * 1000);
-               evo_data(push, 0x00200000); /* ??? */
-               evo_data(push, m->clock * 1000);
-               evo_kick(push, core);
-       }
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 14)))
+               return ret;
+
+       PUSH_MTHD(push, NV907D, HEAD_SET_OVERSCAN_COLOR(i),
+                 NVVAL(NV907D, HEAD_SET_OVERSCAN_COLOR, RED, 0) |
+                 NVVAL(NV907D, HEAD_SET_OVERSCAN_COLOR, GRN, 0) |
+                 NVVAL(NV907D, HEAD_SET_OVERSCAN_COLOR, BLU, 0),
+
+                               HEAD_SET_RASTER_SIZE(i),
+                 NVVAL(NV907D, HEAD_SET_RASTER_SIZE, WIDTH, m->h.active) |
+                 NVVAL(NV907D, HEAD_SET_RASTER_SIZE, HEIGHT, m->v.active),
+
+                               HEAD_SET_RASTER_SYNC_END(i),
+                 NVVAL(NV907D, HEAD_SET_RASTER_SYNC_END, X, m->h.synce) |
+                 NVVAL(NV907D, HEAD_SET_RASTER_SYNC_END, Y, m->v.synce),
+
+                               HEAD_SET_RASTER_BLANK_END(i),
+                 NVVAL(NV907D, HEAD_SET_RASTER_BLANK_END, X, m->h.blanke) |
+                 NVVAL(NV907D, HEAD_SET_RASTER_BLANK_END, Y, m->v.blanke),
+
+                               HEAD_SET_RASTER_BLANK_START(i),
+                 NVVAL(NV907D, HEAD_SET_RASTER_BLANK_START, X, m->h.blanks) |
+                 NVVAL(NV907D, HEAD_SET_RASTER_BLANK_START, Y, m->v.blanks),
+
+                               HEAD_SET_RASTER_VERT_BLANK2(i),
+                 NVVAL(NV907D, HEAD_SET_RASTER_VERT_BLANK2, YSTART, m->v.blank2s) |
+                 NVVAL(NV907D, HEAD_SET_RASTER_VERT_BLANK2, YEND, m->v.blank2e));
+
+       PUSH_MTHD(push, NV907D, HEAD_SET_DEFAULT_BASE_COLOR(i),
+                 NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, RED, 0) |
+                 NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, GREEN, 0) |
+                 NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, BLUE, 0),
+
+                               HEAD_SET_CRC_CONTROL(i),
+                 NVDEF(NV907D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) |
+                 NVDEF(NV907D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
+                 NVDEF(NV907D, HEAD_SET_CRC_CONTROL, TIMESTAMP_MODE, FALSE) |
+                 NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, NONE) |
+                 NVDEF(NV907D, HEAD_SET_CRC_CONTROL, SECONDARY_OUTPUT, NONE));
+
+       PUSH_MTHD(push, NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY(i),
+                 NVVAL(NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, HERTZ, m->clock * 1000) |
+                 NVDEF(NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, ADJ1000DIV1001, FALSE),
+
+                               HEAD_SET_PIXEL_CLOCK_CONFIGURATION(i),
+                 NVDEF(NV907D, HEAD_SET_PIXEL_CLOCK_CONFIGURATION, MODE, CLK_CUSTOM) |
+                 NVDEF(NV907D, HEAD_SET_PIXEL_CLOCK_CONFIGURATION, NOT_DRIVER, FALSE) |
+                 NVDEF(NV907D, HEAD_SET_PIXEL_CLOCK_CONFIGURATION, ENABLE_HOPPING, FALSE),
+
+                               HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(i),
+                 NVVAL(NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, HERTZ, m->clock * 1000) |
+                 NVDEF(NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, ADJ1000DIV1001, FALSE));
+       return 0;
 }
 
-void
+int
 head907d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 8))) {
-               evo_mthd(push, 0x0494 + (head->base.index * 0x300), 1);
-               evo_data(push, 0x00000000);
-               evo_mthd(push, 0x04b8 + (head->base.index * 0x300), 1);
-               evo_data(push, asyh->view.iH << 16 | asyh->view.iW);
-               evo_mthd(push, 0x04c0 + (head->base.index * 0x300), 3);
-               evo_data(push, asyh->view.oH << 16 | asyh->view.oW);
-               evo_data(push, asyh->view.oH << 16 | asyh->view.oW);
-               evo_data(push, asyh->view.oH << 16 | asyh->view.oW);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 8)))
+               return ret;
+
+       PUSH_MTHD(push, NV907D, HEAD_SET_CONTROL_OUTPUT_SCALER(i),
+                 NVDEF(NV907D, HEAD_SET_CONTROL_OUTPUT_SCALER, VERTICAL_TAPS, TAPS_1) |
+                 NVDEF(NV907D, HEAD_SET_CONTROL_OUTPUT_SCALER, HORIZONTAL_TAPS, TAPS_1) |
+                 NVVAL(NV907D, HEAD_SET_CONTROL_OUTPUT_SCALER, HRESPONSE_BIAS, 0) |
+                 NVVAL(NV907D, HEAD_SET_CONTROL_OUTPUT_SCALER, VRESPONSE_BIAS, 0));
+
+       PUSH_MTHD(push, NV907D, HEAD_SET_VIEWPORT_SIZE_IN(i),
+                 NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_IN, WIDTH, asyh->view.iW) |
+                 NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_IN, HEIGHT, asyh->view.iH));
+
+       PUSH_MTHD(push, NV907D, HEAD_SET_VIEWPORT_SIZE_OUT(i),
+                 NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_OUT, WIDTH, asyh->view.oW) |
+                 NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_OUT, HEIGHT, asyh->view.oH),
+
+                               HEAD_SET_VIEWPORT_SIZE_OUT_MIN(i),
+                 NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_OUT_MIN, WIDTH, asyh->view.oW) |
+                 NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_OUT_MIN, HEIGHT, asyh->view.oH),
+
+                               HEAD_SET_VIEWPORT_SIZE_OUT_MAX(i),
+                 NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_OUT_MAX, WIDTH, asyh->view.oW) |
+                 NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_OUT_MAX, HEIGHT, asyh->view.oH));
+       return 0;
 }
 
 const struct nv50_head_func
index 76958ce..a5d8274 100644 (file)
 #include "head.h"
 #include "core.h"
 
-static void
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl917d.h>
+
+static int
 head917d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 2))) {
-               evo_mthd(push, 0x04a0 + (head->base.index * 0x0300), 1);
-               evo_data(push, asyh->dither.mode << 3 |
-                              asyh->dither.bits << 1 |
-                              asyh->dither.enable);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV917D, HEAD_SET_DITHER_CONTROL(i),
+                 NVVAL(NV917D, HEAD_SET_DITHER_CONTROL, ENABLE, asyh->dither.enable) |
+                 NVVAL(NV917D, HEAD_SET_DITHER_CONTROL, BITS, asyh->dither.bits) |
+                 NVVAL(NV917D, HEAD_SET_DITHER_CONTROL, MODE, asyh->dither.mode) |
+                 NVVAL(NV917D, HEAD_SET_DITHER_CONTROL, PHASE, 0));
+       return 0;
 }
 
-static void
+static int
 head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
        u32 bounds = 0;
-       u32 *push;
+       int ret;
 
        if (asyh->base.cpp) {
                switch (asyh->base.cpp) {
-               case 8: bounds |= 0x00000500; break;
-               case 4: bounds |= 0x00000300; break;
-               case 2: bounds |= 0x00000100; break;
-               case 1: bounds |= 0x00000000; break;
+               case 8: bounds |= NVDEF(NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_64); break;
+               case 4: bounds |= NVDEF(NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_32); break;
+               case 2: bounds |= NVDEF(NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16); break;
+               case 1: bounds |= NVDEF(NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_8); break;
                default:
                        WARN_ON(1);
                        break;
                }
-               bounds |= 0x00020001;
+               bounds |= NVDEF(NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, USABLE, TRUE);
+               bounds |= NVDEF(NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, BASE_LUT, USAGE_1025);
        }
 
-       if ((push = evo_wait(core, 2))) {
-               evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1);
-               evo_data(push, bounds);
-               evo_kick(push, core);
-       }
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(i), bounds);
+       return 0;
 }
 
 int
@@ -68,10 +78,10 @@ head917d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
                     struct nv50_head_atom *asyh)
 {
        switch (asyw->state.fb->width) {
-       case  32: asyh->curs.layout = 0; break;
-       case  64: asyh->curs.layout = 1; break;
-       case 128: asyh->curs.layout = 2; break;
-       case 256: asyh->curs.layout = 3; break;
+       case  32: asyh->curs.layout = NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32; break;
+       case  64: asyh->curs.layout = NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64; break;
+       case 128: asyh->curs.layout = NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128; break;
+       case 256: asyh->curs.layout = NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256; break;
        default:
                return -EINVAL;
        }
index 35fcdf8..63adfeb 100644 (file)
 #include "atom.h"
 #include "core.h"
 
-static void
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc37d.h>
+
+static int
 headc37d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
        u8 depth;
-       u32 *push;
-
-       if ((push = evo_wait(core, 2))) {
-               /*XXX: This is a dirty hack until OR depth handling is
-                *     improved later for deep colour etc.
-                */
-               switch (asyh->or.depth) {
-               case 6: depth = 5; break;
-               case 5: depth = 4; break;
-               case 2: depth = 1; break;
-               case 0: depth = 4; break;
-               default:
-                       depth = asyh->or.depth;
-                       WARN_ON(1);
-                       break;
-               }
-
-               evo_mthd(push, 0x2004 + (head->base.index * 0x400), 1);
-               evo_data(push, depth << 4 |
-                              asyh->or.nvsync << 3 |
-                              asyh->or.nhsync << 2 |
-                              asyh->or.crc_raster);
-               evo_kick(push, core);
+       int ret;
+
+       /*XXX: This is a dirty hack until OR depth handling is
+        *     improved later for deep colour etc.
+        */
+       switch (asyh->or.depth) {
+       case 6: depth = 5; break;
+       case 5: depth = 4; break;
+       case 2: depth = 1; break;
+       case 0: depth = 4; break;
+       default:
+               depth = asyh->or.depth;
+               WARN_ON(1);
+               break;
        }
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NVC37D, HEAD_SET_CONTROL_OUTPUT_RESOURCE(i),
+                 NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, CRC_MODE, asyh->or.crc_raster) |
+                 NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, HSYNC_POLARITY, asyh->or.nhsync) |
+                 NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, VSYNC_POLARITY, asyh->or.nvsync) |
+                 NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, PIXEL_DEPTH, depth) |
+                 NVDEF(NVC37D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, COLOR_SPACE_OVERRIDE, DISABLE));
+       return 0;
 }
 
-static void
+static int
 headc37d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 2))) {
-               evo_mthd(push, 0x2000 + (head->base.index * 0x400), 1);
-               evo_data(push, 0x80000000 |
-                              asyh->procamp.sat.sin << 16 |
-                              asyh->procamp.sat.cos << 4);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NVC37D, HEAD_SET_PROCAMP(i),
+                 NVDEF(NVC37D, HEAD_SET_PROCAMP, COLOR_SPACE, RGB) |
+                 NVDEF(NVC37D, HEAD_SET_PROCAMP, CHROMA_LPF, DISABLE) |
+                 NVVAL(NVC37D, HEAD_SET_PROCAMP, SAT_COS, asyh->procamp.sat.cos) |
+                 NVVAL(NVC37D, HEAD_SET_PROCAMP, SAT_SINE, asyh->procamp.sat.sin) |
+                 NVDEF(NVC37D, HEAD_SET_PROCAMP, DYNAMIC_RANGE, VESA) |
+                 NVDEF(NVC37D, HEAD_SET_PROCAMP, RANGE_COMPRESSION, DISABLE) |
+                 NVDEF(NVC37D, HEAD_SET_PROCAMP, BLACK_LEVEL, GRAPHICS));
+       return 0;
 }
 
-void
+int
 headc37d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 2))) {
-               evo_mthd(push, 0x2018 + (head->base.index * 0x0400), 1);
-               evo_data(push, asyh->dither.mode << 8 |
-                              asyh->dither.bits << 4 |
-                              asyh->dither.enable);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NVC37D, HEAD_SET_DITHER_CONTROL(i),
+                 NVVAL(NVC37D, HEAD_SET_DITHER_CONTROL, ENABLE, asyh->dither.enable) |
+                 NVVAL(NVC37D, HEAD_SET_DITHER_CONTROL, BITS, asyh->dither.bits) |
+                 NVDEF(NVC37D, HEAD_SET_DITHER_CONTROL, OFFSET_ENABLE, DISABLE) |
+                 NVVAL(NVC37D, HEAD_SET_DITHER_CONTROL, MODE, asyh->dither.mode) |
+                 NVVAL(NVC37D, HEAD_SET_DITHER_CONTROL, PHASE, 0));
+       return 0;
 }
 
-void
+int
 headc37d_curs_clr(struct nv50_head *head)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 4))) {
-               evo_mthd(push, 0x209c + head->base.index * 0x400, 1);
-               evo_data(push, 0x000000cf);
-               evo_mthd(push, 0x2088 + head->base.index * 0x400, 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 4)))
+               return ret;
+
+       PUSH_MTHD(push, NVC37D, HEAD_SET_CONTROL_CURSOR(i),
+                 NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR, ENABLE, DISABLE) |
+                 NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR, FORMAT, A8R8G8B8));
+
+       PUSH_MTHD(push, NVC37D, HEAD_SET_CONTEXT_DMA_CURSOR(i, 0), 0x00000000);
+       return 0;
 }
 
-void
+int
 headc37d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 7))) {
-               evo_mthd(push, 0x209c + head->base.index * 0x400, 2);
-               evo_data(push, 0x80000000 |
-                              asyh->curs.layout << 8 |
-                              asyh->curs.format << 0);
-               evo_data(push, 0x000072ff);
-               evo_mthd(push, 0x2088 + head->base.index * 0x400, 1);
-               evo_data(push, asyh->curs.handle);
-               evo_mthd(push, 0x2090 + head->base.index * 0x400, 1);
-               evo_data(push, asyh->curs.offset >> 8);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 7)))
+               return ret;
+
+       PUSH_MTHD(push, NVC37D, HEAD_SET_CONTROL_CURSOR(i),
+                 NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
+                 NVVAL(NVC37D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
+                 NVVAL(NVC37D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
+                 NVVAL(NVC37D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
+                 NVVAL(NVC37D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
+                 NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR, DE_GAMMA, NONE),
+
+                               HEAD_SET_CONTROL_CURSOR_COMPOSITION(i),
+                 NVVAL(NVC37D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, K1, 0xff) |
+                 NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, CURSOR_COLOR_FACTOR_SELECT,
+                                                                    K1) |
+                 NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, VIEWPORT_COLOR_FACTOR_SELECT,
+                                                                    NEG_K1_TIMES_SRC) |
+                 NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, MODE, BLEND));
+
+       PUSH_MTHD(push, NVC37D, HEAD_SET_CONTEXT_DMA_CURSOR(i, 0), asyh->curs.handle);
+       PUSH_MTHD(push, NVC37D, HEAD_SET_OFFSET_CURSOR(i, 0), asyh->curs.offset >> 8);
+       return 0;
 }
 
 int
@@ -123,32 +158,38 @@ headc37d_curs_format(struct nv50_head *head, struct nv50_wndw_atom *asyw,
        return 0;
 }
 
-static void
+static int
 headc37d_olut_clr(struct nv50_head *head)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 2))) {
-               evo_mthd(push, 0x20ac + (head->base.index * 0x400), 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NVC37D, HEAD_SET_CONTEXT_DMA_OUTPUT_LUT(i), 0x00000000);
+       return 0;
 }
 
-static void
+static int
 headc37d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 4))) {
-               evo_mthd(push, 0x20a4 + (head->base.index * 0x400), 3);
-               evo_data(push, asyh->olut.output_mode << 8 |
-                              asyh->olut.range << 4 |
-                              asyh->olut.size);
-               evo_data(push, asyh->olut.offset >> 8);
-               evo_data(push, asyh->olut.handle);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 4)))
+               return ret;
+
+       PUSH_MTHD(push, NVC37D, HEAD_SET_CONTROL_OUTPUT_LUT(i),
+                 NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_LUT, SIZE, asyh->olut.size) |
+                 NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_LUT, RANGE, asyh->olut.range) |
+                 NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_LUT, OUTPUT_MODE, asyh->olut.output_mode),
+
+                               HEAD_SET_OFFSET_OUTPUT_LUT(i), asyh->olut.offset >> 8,
+                               HEAD_SET_CONTEXT_DMA_OUTPUT_LUT(i), asyh->olut.handle);
+       return 0;
 }
 
 static bool
@@ -157,51 +198,77 @@ headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
        if (size != 256 && size != 1024)
                return false;
 
-       asyh->olut.mode = 2;
-       asyh->olut.size = size == 1024 ? 2 : 0;
-       asyh->olut.range = 0;
-       asyh->olut.output_mode = 1;
+       asyh->olut.size = size == 1024 ? NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_SIZE_SIZE_1025 :
+                                        NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_SIZE_SIZE_257;
+       asyh->olut.range = NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE_UNITY;
+       asyh->olut.output_mode = NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_OUTPUT_MODE_INTERPOLATE;
        asyh->olut.load = head907d_olut_load;
        return true;
 }
 
-static void
+static int
 headc37d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
        struct nv50_head_mode *m = &asyh->mode;
-       u32 *push;
-       if ((push = evo_wait(core, 13))) {
-               evo_mthd(push, 0x2064 + (head->base.index * 0x400), 5);
-               evo_data(push, (m->v.active  << 16) | m->h.active );
-               evo_data(push, (m->v.synce   << 16) | m->h.synce  );
-               evo_data(push, (m->v.blanke  << 16) | m->h.blanke );
-               evo_data(push, (m->v.blanks  << 16) | m->h.blanks );
-               evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
-               evo_mthd(push, 0x2008 + (head->base.index * 0x400), 2);
-               evo_data(push, m->interlace);
-               evo_data(push, m->clock * 1000);
-               evo_mthd(push, 0x2028 + (head->base.index * 0x400), 1);
-               evo_data(push, m->clock * 1000);
-               /*XXX: HEAD_USAGE_BOUNDS, doesn't belong here. */
-               evo_mthd(push, 0x2030 + (head->base.index * 0x400), 1);
-               evo_data(push, 0x00000124);
-               evo_kick(push, core);
-       }
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 15)))
+               return ret;
+
+       PUSH_MTHD(push, NVC37D, HEAD_SET_RASTER_SIZE(i),
+                 NVVAL(NVC37D, HEAD_SET_RASTER_SIZE, WIDTH, m->h.active) |
+                 NVVAL(NVC37D, HEAD_SET_RASTER_SIZE, HEIGHT, m->v.active),
+
+                               HEAD_SET_RASTER_SYNC_END(i),
+                 NVVAL(NVC37D, HEAD_SET_RASTER_SYNC_END, X, m->h.synce) |
+                 NVVAL(NVC37D, HEAD_SET_RASTER_SYNC_END, Y, m->v.synce),
+
+                               HEAD_SET_RASTER_BLANK_END(i),
+                 NVVAL(NVC37D, HEAD_SET_RASTER_BLANK_END, X, m->h.blanke) |
+                 NVVAL(NVC37D, HEAD_SET_RASTER_BLANK_END, Y, m->v.blanke),
+
+                               HEAD_SET_RASTER_BLANK_START(i),
+                 NVVAL(NVC37D, HEAD_SET_RASTER_BLANK_START, X, m->h.blanks) |
+                 NVVAL(NVC37D, HEAD_SET_RASTER_BLANK_START, Y, m->v.blanks));
+
+       //XXX:
+       PUSH_NVSQ(push, NVC37D, 0x2074 + (i * 0x400), m->v.blank2e << 16 | m->v.blank2s);
+       PUSH_NVSQ(push, NVC37D, 0x2008 + (i * 0x400), m->interlace);
+
+       PUSH_MTHD(push, NVC37D, HEAD_SET_PIXEL_CLOCK_FREQUENCY(i),
+                 NVVAL(NVC37D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, HERTZ, m->clock * 1000));
+
+       PUSH_MTHD(push, NVC37D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(i),
+                 NVVAL(NVC37D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, HERTZ, m->clock * 1000));
+
+       /*XXX: HEAD_USAGE_BOUNDS, doesn't belong here. */
+       PUSH_MTHD(push, NVC37D, HEAD_SET_HEAD_USAGE_BOUNDS(i),
+                 NVDEF(NVC37D, HEAD_SET_HEAD_USAGE_BOUNDS, CURSOR, USAGE_W256_H256) |
+                 NVDEF(NVC37D, HEAD_SET_HEAD_USAGE_BOUNDS, OUTPUT_LUT, USAGE_1025) |
+                 NVDEF(NVC37D, HEAD_SET_HEAD_USAGE_BOUNDS, UPSCALING_ALLOWED, TRUE));
+       return 0;
 }
 
-void
+int
 headc37d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 4))) {
-               evo_mthd(push, 0x204c + (head->base.index * 0x400), 1);
-               evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
-               evo_mthd(push, 0x2058 + (head->base.index * 0x400), 1);
-               evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 4)))
+               return ret;
+
+       PUSH_MTHD(push, NVC37D, HEAD_SET_VIEWPORT_SIZE_IN(i),
+                 NVVAL(NVC37D, HEAD_SET_VIEWPORT_SIZE_IN, WIDTH, asyh->view.iW) |
+                 NVVAL(NVC37D, HEAD_SET_VIEWPORT_SIZE_IN, HEIGHT, asyh->view.iH));
+
+       PUSH_MTHD(push, NVC37D, HEAD_SET_VIEWPORT_SIZE_OUT(i),
+                 NVVAL(NVC37D, HEAD_SET_VIEWPORT_SIZE_OUT, WIDTH, asyh->view.oW) |
+                 NVVAL(NVC37D, HEAD_SET_VIEWPORT_SIZE_OUT, HEIGHT, asyh->view.oH));
+       return 0;
 }
 
 void
index c7d04dd..fd51527 100644 (file)
 #include "atom.h"
 #include "core.h"
 
-static void
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc57d.h>
+
+static int
 headc57d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
        u8 depth;
-       u32 *push;
-
-       if ((push = evo_wait(core, 2))) {
-               /*XXX: This is a dirty hack until OR depth handling is
-                *     improved later for deep colour etc.
-                */
-               switch (asyh->or.depth) {
-               case 6: depth = 5; break;
-               case 5: depth = 4; break;
-               case 2: depth = 1; break;
-               case 0: depth = 4; break;
-               default:
-                       depth = asyh->or.depth;
-                       WARN_ON(1);
-                       break;
-               }
+       int ret;
 
-               evo_mthd(push, 0x2004 + (head->base.index * 0x400), 1);
-               evo_data(push, 0xfc000000 |
-                              depth << 4 |
-                              asyh->or.nvsync << 3 |
-                              asyh->or.nhsync << 2 |
-                              asyh->or.crc_raster);
-               evo_kick(push, core);
+       /*XXX: This is a dirty hack until OR depth handling is
+        *     improved later for deep colour etc.
+        */
+       switch (asyh->or.depth) {
+       case 6: depth = 5; break;
+       case 5: depth = 4; break;
+       case 2: depth = 1; break;
+       case 0: depth = 4; break;
+       default:
+               depth = asyh->or.depth;
+               WARN_ON(1);
+               break;
        }
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE(i),
+                 NVVAL(NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, CRC_MODE, asyh->or.crc_raster) |
+                 NVVAL(NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, HSYNC_POLARITY, asyh->or.nhsync) |
+                 NVVAL(NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, VSYNC_POLARITY, asyh->or.nvsync) |
+                 NVVAL(NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, PIXEL_DEPTH, depth) |
+                 NVDEF(NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, COLOR_SPACE_OVERRIDE, DISABLE) |
+                 NVDEF(NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, EXT_PACKET_WIN, NONE));
+       return 0;
 }
 
-static void
+static int
 headc57d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 2))) {
-               evo_mthd(push, 0x2000 + (head->base.index * 0x400), 1);
-#if 0
-               evo_data(push, 0x80000000 |
-                              asyh->procamp.sat.sin << 16 |
-                              asyh->procamp.sat.cos << 4);
-#else
-               evo_data(push, 0);
-#endif
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       //TODO:
+       PUSH_MTHD(push, NVC57D, HEAD_SET_PROCAMP(i),
+                 NVDEF(NVC57D, HEAD_SET_PROCAMP, COLOR_SPACE, RGB) |
+                 NVDEF(NVC57D, HEAD_SET_PROCAMP, CHROMA_LPF, DISABLE) |
+                 NVDEF(NVC57D, HEAD_SET_PROCAMP, DYNAMIC_RANGE, VESA));
+       return 0;
 }
 
-void
+static int
 headc57d_olut_clr(struct nv50_head *head)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 2))) {
-               evo_mthd(push, 0x2288 + (head->base.index * 0x400), 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NVC57D, HEAD_SET_CONTEXT_DMA_OLUT(i), 0x00000000);
+       return 0;
 }
 
-void
+static int
 headc57d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-       u32 *push;
-       if ((push = evo_wait(core, 4))) {
-               evo_mthd(push, 0x2280 + (head->base.index * 0x400), 4);
-               evo_data(push, asyh->olut.size << 8 |
-                              asyh->olut.mode << 2 |
-                              asyh->olut.output_mode);
-               evo_data(push, 0xffffffff); /* FP_NORM_SCALE. */
-               evo_data(push, asyh->olut.handle);
-               evo_data(push, asyh->olut.offset >> 8);
-               evo_kick(push, core);
-       }
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 5)))
+               return ret;
+
+       PUSH_MTHD(push, NVC57D, HEAD_SET_OLUT_CONTROL(i),
+                 NVVAL(NVC57D, HEAD_SET_OLUT_CONTROL, INTERPOLATE, asyh->olut.output_mode) |
+                 NVDEF(NVC57D, HEAD_SET_OLUT_CONTROL, MIRROR, DISABLE) |
+                 NVVAL(NVC57D, HEAD_SET_OLUT_CONTROL, MODE, asyh->olut.mode) |
+                 NVVAL(NVC57D, HEAD_SET_OLUT_CONTROL, SIZE, asyh->olut.size),
+
+                               HEAD_SET_OLUT_FP_NORM_SCALE(i), 0xffffffff,
+                               HEAD_SET_CONTEXT_DMA_OLUT(i), asyh->olut.handle,
+                               HEAD_SET_OFFSET_OLUT(i), asyh->olut.offset >> 8);
+       return 0;
 }
 
 static void
@@ -161,9 +175,9 @@ headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
        if (size != 0 && size != 256 && size != 1024)
                return false;
 
-       asyh->olut.mode = 2; /* DIRECT10 */
+       asyh->olut.mode = NVC57D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT10;
        asyh->olut.size = 4 /* VSS header. */ + 1024 + 1 /* Entries. */;
-       asyh->olut.output_mode = 1; /* INTERPOLATE_ENABLE. */
+       asyh->olut.output_mode = NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_ENABLE;
        if (size == 256)
                asyh->olut.load = headc57d_olut_load_8;
        else
@@ -171,29 +185,50 @@ headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
        return true;
 }
 
-static void
+static int
 headc57d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
        struct nv50_head_mode *m = &asyh->mode;
-       u32 *push;
-       if ((push = evo_wait(core, 13))) {
-               evo_mthd(push, 0x2064 + (head->base.index * 0x400), 5);
-               evo_data(push, (m->v.active  << 16) | m->h.active );
-               evo_data(push, (m->v.synce   << 16) | m->h.synce  );
-               evo_data(push, (m->v.blanke  << 16) | m->h.blanke );
-               evo_data(push, (m->v.blanks  << 16) | m->h.blanks );
-               evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
-               evo_mthd(push, 0x2008 + (head->base.index * 0x400), 2);
-               evo_data(push, m->interlace);
-               evo_data(push, m->clock * 1000);
-               evo_mthd(push, 0x2028 + (head->base.index * 0x400), 1);
-               evo_data(push, m->clock * 1000);
-               /*XXX: HEAD_USAGE_BOUNDS, doesn't belong here. */
-               evo_mthd(push, 0x2030 + (head->base.index * 0x400), 1);
-               evo_data(push, 0x00001014);
-               evo_kick(push, core);
-       }
+       const int i = head->base.index;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 15)))
+               return ret;
+
+       PUSH_MTHD(push, NVC57D, HEAD_SET_RASTER_SIZE(i),
+                 NVVAL(NVC57D, HEAD_SET_RASTER_SIZE, WIDTH, m->h.active) |
+                 NVVAL(NVC57D, HEAD_SET_RASTER_SIZE, HEIGHT, m->v.active),
+
+                               HEAD_SET_RASTER_SYNC_END(i),
+                 NVVAL(NVC57D, HEAD_SET_RASTER_SYNC_END, X, m->h.synce) |
+                 NVVAL(NVC57D, HEAD_SET_RASTER_SYNC_END, Y, m->v.synce),
+
+                               HEAD_SET_RASTER_BLANK_END(i),
+                 NVVAL(NVC57D, HEAD_SET_RASTER_BLANK_END, X, m->h.blanke) |
+                 NVVAL(NVC57D, HEAD_SET_RASTER_BLANK_END, Y, m->v.blanke),
+
+                               HEAD_SET_RASTER_BLANK_START(i),
+                 NVVAL(NVC57D, HEAD_SET_RASTER_BLANK_START, X, m->h.blanks) |
+                 NVVAL(NVC57D, HEAD_SET_RASTER_BLANK_START, Y, m->v.blanks));
+
+       //XXX:
+       PUSH_NVSQ(push, NVC57D, 0x2074 + (i * 0x400), m->v.blank2e << 16 | m->v.blank2s);
+       PUSH_NVSQ(push, NVC57D, 0x2008 + (i * 0x400), m->interlace);
+
+       PUSH_MTHD(push, NVC57D, HEAD_SET_PIXEL_CLOCK_FREQUENCY(i),
+                 NVVAL(NVC57D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, HERTZ, m->clock * 1000));
+
+       PUSH_MTHD(push, NVC57D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(i),
+                 NVVAL(NVC57D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, HERTZ, m->clock * 1000));
+
+       /*XXX: HEAD_USAGE_BOUNDS, doesn't belong here. */
+       PUSH_MTHD(push, NVC57D, HEAD_SET_HEAD_USAGE_BOUNDS(i),
+                 NVDEF(NVC57D, HEAD_SET_HEAD_USAGE_BOUNDS, CURSOR, USAGE_W256_H256) |
+                 NVDEF(NVC57D, HEAD_SET_HEAD_USAGE_BOUNDS, OLUT_ALLOWED, TRUE) |
+                 NVDEF(NVC57D, HEAD_SET_HEAD_USAGE_BOUNDS, OUTPUT_SCALER_TAPS, TAPS_2) |
+                 NVDEF(NVC57D, HEAD_SET_HEAD_USAGE_BOUNDS, UPSCALING_ALLOWED, TRUE));
+       return 0;
 }
 
 const struct nv50_head_func
index 4e95ca5..6b2ad1e 100644 (file)
@@ -60,7 +60,7 @@ nv50_lut_fini(struct nv50_lut *lut)
 {
        int i;
        for (i = 0; i < ARRAY_SIZE(lut->mem); i++)
-               nvif_mem_fini(&lut->mem[i]);
+               nvif_mem_dtor(&lut->mem[i]);
 }
 
 int
@@ -70,8 +70,8 @@ nv50_lut_init(struct nv50_disp *disp, struct nvif_mmu *mmu,
        const u32 size = disp->disp->object.oclass < GF110_DISP ? 257 : 1025;
        int i;
        for (i = 0; i < ARRAY_SIZE(lut->mem); i++) {
-               int ret = nvif_mem_init_map(mmu, NVIF_MEM_VRAM, size * 8,
-                                           &lut->mem[i]);
+               int ret = nvif_mem_ctor_map(mmu, "kmsLut", NVIF_MEM_VRAM,
+                                           size * 8, &lut->mem[i]);
                if (ret)
                        return ret;
        }
index 2ee404b..a6c3a9b 100644 (file)
@@ -33,8 +33,8 @@ oimm507b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
        struct nv50_disp *disp = nv50_disp(drm->dev);
        int ret;
 
-       ret = nvif_object_init(&disp->disp->object, 0, oclass, &args,
-                              sizeof(args), &wndw->wimm.base.user);
+       ret = nvif_object_ctor(&disp->disp->object, "kmsOvim", 0, oclass,
+                              &args, sizeof(args), &wndw->wimm.base.user);
        if (ret) {
                NV_ERROR(drm, "oimm%04x allocation failed: %d\n", oclass, ret);
                return ret;
index 4869d52..6ae1fbe 100644 (file)
@@ -10,11 +10,7 @@ int ovly507e_acquire(struct nv50_wndw *, struct nv50_wndw_atom *,
                     struct nv50_head_atom *);
 void ovly507e_release(struct nv50_wndw *, struct nv50_wndw_atom *,
                      struct nv50_head_atom *);
-void ovly507e_ntfy_set(struct nv50_wndw *, struct nv50_wndw_atom *);
-void ovly507e_ntfy_clr(struct nv50_wndw *);
-void ovly507e_image_clr(struct nv50_wndw *);
-void ovly507e_scale_set(struct nv50_wndw *, struct nv50_wndw_atom *);
-void ovly507e_update(struct nv50_wndw *, u32 *);
+int ovly507e_scale_set(struct nv50_wndw *, struct nv50_wndw_atom *);
 
 extern const u32 ovly827e_format[];
 void ovly827e_ntfy_reset(struct nouveau_bo *, u32);
index 4cce107..afd6c72 100644 (file)
 
 #include <nvif/cl507e.h>
 #include <nvif/event.h>
+#include <nvif/push507c.h>
 
-void
-ovly507e_update(struct nv50_wndw *wndw, u32 *interlock)
-{
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 2))) {
-               evo_mthd(push, 0x0080, 1);
-               evo_data(push, interlock[NV50_DISP_INTERLOCK_CORE]);
-               evo_kick(push, &wndw->wndw);
-       }
-}
+#include <nvhw/class/cl507e.h>
 
-void
+int
 ovly507e_scale_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 4))) {
-               evo_mthd(push, 0x00e0, 3);
-               evo_data(push, asyw->scale.sy << 16 | asyw->scale.sx);
-               evo_data(push, asyw->scale.sh << 16 | asyw->scale.sw);
-               evo_data(push, asyw->scale.dw);
-               evo_kick(push, &wndw->wndw);
-       }
-}
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
 
-void
-ovly507e_image_clr(struct nv50_wndw *wndw)
-{
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 4))) {
-               evo_mthd(push, 0x0084, 1);
-               evo_data(push, 0x00000000);
-               evo_mthd(push, 0x00c0, 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, &wndw->wndw);
-       }
+       if ((ret = PUSH_WAIT(push, 4)))
+               return ret;
+
+       PUSH_MTHD(push, NV507E, SET_POINT_IN,
+                 NVVAL(NV507E, SET_POINT_IN, X, asyw->scale.sx) |
+                 NVVAL(NV507E, SET_POINT_IN, Y, asyw->scale.sy),
+
+                               SET_SIZE_IN,
+                 NVVAL(NV507E, SET_SIZE_IN, WIDTH, asyw->scale.sw) |
+                 NVVAL(NV507E, SET_SIZE_IN, HEIGHT, asyw->scale.sh),
+
+                               SET_SIZE_OUT,
+                 NVVAL(NV507E, SET_SIZE_OUT, WIDTH, asyw->scale.dw));
+       return 0;
 }
 
-static void
+static int
 ovly507e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 12))) {
-               evo_mthd(push, 0x0084, 1);
-               evo_data(push, asyw->image.interval << 4);
-               evo_mthd(push, 0x00c0, 1);
-               evo_data(push, asyw->image.handle[0]);
-               evo_mthd(push, 0x0100, 1);
-               evo_data(push, 0x00000002);
-               evo_mthd(push, 0x0800, 1);
-               evo_data(push, asyw->image.offset[0] >> 8);
-               evo_mthd(push, 0x0808, 3);
-               evo_data(push, asyw->image.h << 16 | asyw->image.w);
-               evo_data(push, asyw->image.layout << 20 |
-                              (asyw->image.pitch[0] >> 8) << 8 |
-                              asyw->image.blocks[0] << 8 |
-                              asyw->image.blockh);
-               evo_data(push, asyw->image.kind << 16 |
-                              asyw->image.format << 8 |
-                              asyw->image.colorspace);
-               evo_kick(push, &wndw->wndw);
-       }
-}
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
 
-void
-ovly507e_ntfy_clr(struct nv50_wndw *wndw)
-{
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 2))) {
-               evo_mthd(push, 0x00a4, 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, &wndw->wndw);
-       }
-}
+       if ((ret = PUSH_WAIT(push, 12)))
+               return ret;
 
-void
-ovly507e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
-{
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 3))) {
-               evo_mthd(push, 0x00a0, 2);
-               evo_data(push, asyw->ntfy.awaken << 30 | asyw->ntfy.offset);
-               evo_data(push, asyw->ntfy.handle);
-               evo_kick(push, &wndw->wndw);
-       }
+       PUSH_MTHD(push, NV507E, SET_PRESENT_CONTROL,
+                 NVDEF(NV507E, SET_PRESENT_CONTROL, BEGIN_MODE, ASAP) |
+                 NVVAL(NV507E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval));
+
+       PUSH_MTHD(push, NV507E, SET_CONTEXT_DMA_ISO, asyw->image.handle[0]);
+
+       PUSH_MTHD(push, NV507E, SET_COMPOSITION_CONTROL,
+                 NVDEF(NV507E, SET_COMPOSITION_CONTROL, MODE, OPAQUE_SUSPEND_BASE));
+
+       PUSH_MTHD(push, NV507E, SURFACE_SET_OFFSET, asyw->image.offset[0] >> 8);
+
+       PUSH_MTHD(push, NV507E, SURFACE_SET_SIZE,
+                 NVVAL(NV507E, SURFACE_SET_SIZE, WIDTH, asyw->image.w) |
+                 NVVAL(NV507E, SURFACE_SET_SIZE, HEIGHT, asyw->image.h),
+
+                               SURFACE_SET_STORAGE,
+                 NVVAL(NV507E, SURFACE_SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
+                 NVVAL(NV507E, SURFACE_SET_STORAGE, PITCH, (asyw->image.pitch[0] >> 8)) |
+                 NVVAL(NV507E, SURFACE_SET_STORAGE, PITCH, asyw->image.blocks[0]) |
+                 NVVAL(NV507E, SURFACE_SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
+
+                               SURFACE_SET_PARAMS,
+                 NVVAL(NV507E, SURFACE_SET_PARAMS, FORMAT, asyw->image.format) |
+                 NVVAL(NV507E, SURFACE_SET_PARAMS, COLOR_SPACE, asyw->image.colorspace) |
+                 NVVAL(NV507E, SURFACE_SET_PARAMS, KIND, asyw->image.kind) |
+                 NVDEF(NV507E, SURFACE_SET_PARAMS, PART_STRIDE, PARTSTRIDE_256));
+       return 0;
 }
 
 void
@@ -146,14 +123,14 @@ static const struct nv50_wndw_func
 ovly507e = {
        .acquire = ovly507e_acquire,
        .release = ovly507e_release,
-       .ntfy_set = ovly507e_ntfy_set,
-       .ntfy_clr = ovly507e_ntfy_clr,
+       .ntfy_set = base507c_ntfy_set,
+       .ntfy_clr = base507c_ntfy_clr,
        .ntfy_reset = base507c_ntfy_reset,
        .ntfy_wait_begun = base507c_ntfy_wait_begun,
        .image_set = ovly507e_image_set,
-       .image_clr = ovly507e_image_clr,
+       .image_clr = base507c_image_clr,
        .scale_set = ovly507e_scale_set,
-       .update = ovly507e_update,
+       .update = base507c_update,
 };
 
 static const u32
@@ -192,7 +169,8 @@ ovly507e_new_(const struct nv50_wndw_func *func, const u32 *format,
                return ret;
        }
 
-       ret = nvif_notify_init(&wndw->wndw.base.user, wndw->notify.func, false,
+       ret = nvif_notify_ctor(&wndw->wndw.base.user, "kmsOvlyNtfy",
+                              wndw->notify.func, false,
                               NV50_DISP_OVERLAY_CHANNEL_DMA_V0_NTFY_UEVENT,
                               &(struct nvif_notify_uevent_req) {},
                               sizeof(struct nvif_notify_uevent_req),
index 4f7ce57..02dc02d 100644 (file)
 
 #include <nouveau_bo.h>
 
+#include <nvif/push507c.h>
 #include <nvif/timer.h>
 
-static void
+#include <nvhw/class/cl827e.h>
+
+static int
 ovly827e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 12))) {
-               evo_mthd(push, 0x0084, 1);
-               evo_data(push, asyw->image.interval << 4);
-               evo_mthd(push, 0x00c0, 1);
-               evo_data(push, asyw->image.handle[0]);
-               evo_mthd(push, 0x0100, 1);
-               evo_data(push, 0x00000002);
-               evo_mthd(push, 0x0800, 1);
-               evo_data(push, asyw->image.offset[0] >> 8);
-               evo_mthd(push, 0x0808, 3);
-               evo_data(push, asyw->image.h << 16 | asyw->image.w);
-               evo_data(push, asyw->image.layout << 20 |
-                              (asyw->image.pitch[0] >> 8) << 8 |
-                              asyw->image.blocks[0] << 8 |
-                              asyw->image.blockh);
-               evo_data(push, asyw->image.format << 8 |
-                              asyw->image.colorspace);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 12)))
+               return ret;
+
+       PUSH_MTHD(push, NV827E, SET_PRESENT_CONTROL,
+                 NVDEF(NV827E, SET_PRESENT_CONTROL, BEGIN_MODE, ASAP) |
+                 NVVAL(NV827E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval));
+
+       PUSH_MTHD(push, NV827E, SET_CONTEXT_DMA_ISO, asyw->image.handle[0]);
+
+       PUSH_MTHD(push, NV827E, SET_COMPOSITION_CONTROL,
+                 NVDEF(NV827E, SET_COMPOSITION_CONTROL, MODE, OPAQUE_SUSPEND_BASE));
+
+       PUSH_MTHD(push, NV827E, SURFACE_SET_OFFSET, asyw->image.offset[0] >> 8);
+
+       PUSH_MTHD(push, NV827E, SURFACE_SET_SIZE,
+                 NVVAL(NV827E, SURFACE_SET_SIZE, WIDTH, asyw->image.w) |
+                 NVVAL(NV827E, SURFACE_SET_SIZE, HEIGHT, asyw->image.h),
+
+                               SURFACE_SET_STORAGE,
+                 NVVAL(NV827E, SURFACE_SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
+                 NVVAL(NV827E, SURFACE_SET_STORAGE, PITCH, (asyw->image.pitch[0] >> 8)) |
+                 NVVAL(NV827E, SURFACE_SET_STORAGE, PITCH, asyw->image.blocks[0]) |
+                 NVVAL(NV827E, SURFACE_SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
+
+                               SURFACE_SET_PARAMS,
+                 NVVAL(NV827E, SURFACE_SET_PARAMS, FORMAT, asyw->image.format) |
+                 NVVAL(NV827E, SURFACE_SET_PARAMS, COLOR_SPACE, asyw->image.colorspace));
+       return 0;
 }
 
 int
@@ -56,8 +70,7 @@ ovly827e_ntfy_wait_begun(struct nouveau_bo *bo, u32 offset,
                         struct nvif_device *device)
 {
        s64 time = nvif_msec(device, 2000ULL,
-               u32 data = nouveau_bo_rd32(bo, offset / 4 + 3);
-               if ((data & 0xffff0000) == 0xffff0000)
+               if (NVBO_TD32(bo, offset, NV_DISP_NOTIFICATION_1, _3, STATUS, ==, BEGUN))
                        break;
                usleep_range(1, 2);
        );
@@ -67,24 +80,25 @@ ovly827e_ntfy_wait_begun(struct nouveau_bo *bo, u32 offset,
 void
 ovly827e_ntfy_reset(struct nouveau_bo *bo, u32 offset)
 {
-       nouveau_bo_wr32(bo, offset / 4 + 0, 0x00000000);
-       nouveau_bo_wr32(bo, offset / 4 + 1, 0x00000000);
-       nouveau_bo_wr32(bo, offset / 4 + 2, 0x00000000);
-       nouveau_bo_wr32(bo, offset / 4 + 3, 0x80000000);
+       NVBO_WR32(bo, offset, NV_DISP_NOTIFICATION_1, TIME_STAMP_0, 0);
+       NVBO_WR32(bo, offset, NV_DISP_NOTIFICATION_1, TIME_STAMP_1, 0);
+       NVBO_WR32(bo, offset, NV_DISP_NOTIFICATION_1, _2, 0);
+       NVBO_WR32(bo, offset, NV_DISP_NOTIFICATION_1, _3,
+                       NVDEF(NV_DISP_NOTIFICATION_1, _3, STATUS, NOT_BEGUN));
 }
 
 static const struct nv50_wndw_func
 ovly827e = {
        .acquire = ovly507e_acquire,
        .release = ovly507e_release,
-       .ntfy_set = ovly507e_ntfy_set,
-       .ntfy_clr = ovly507e_ntfy_clr,
+       .ntfy_set = base507c_ntfy_set,
+       .ntfy_clr = base507c_ntfy_clr,
        .ntfy_reset = ovly827e_ntfy_reset,
        .ntfy_wait_begun = ovly827e_ntfy_wait_begun,
        .image_set = ovly827e_image_set,
-       .image_clr = ovly507e_image_clr,
+       .image_clr = base507c_image_clr,
        .scale_set = ovly507e_scale_set,
-       .update = ovly507e_update,
+       .update = base507c_update,
 };
 
 const u32
index 9efe5e9..645130d 100644 (file)
 #include "ovly.h"
 #include "atom.h"
 
-static void
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl907e.h>
+
+static int
 ovly907e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 12))) {
-               evo_mthd(push, 0x0084, 1);
-               evo_data(push, asyw->image.interval << 4);
-               evo_mthd(push, 0x00c0, 1);
-               evo_data(push, asyw->image.handle[0]);
-               evo_mthd(push, 0x0100, 1);
-               evo_data(push, 0x00000002);
-               evo_mthd(push, 0x0400, 1);
-               evo_data(push, asyw->image.offset[0] >> 8);
-               evo_mthd(push, 0x0408, 3);
-               evo_data(push, asyw->image.h << 16 | asyw->image.w);
-               evo_data(push, asyw->image.layout << 24 |
-                              (asyw->image.pitch[0] >> 8) << 8 |
-                              asyw->image.blocks[0] << 8 |
-                              asyw->image.blockh);
-               evo_data(push, asyw->image.format << 8 |
-                              asyw->image.colorspace);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 12)))
+               return ret;
+
+       PUSH_MTHD(push, NV907E, SET_PRESENT_CONTROL,
+                 NVDEF(NV907E, SET_PRESENT_CONTROL, BEGIN_MODE, ASAP) |
+                 NVVAL(NV907E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval));
+
+       PUSH_MTHD(push, NV907E, SET_CONTEXT_DMA_ISO, asyw->image.handle[0]);
+
+       PUSH_MTHD(push, NV907E, SET_COMPOSITION_CONTROL,
+                 NVDEF(NV907E, SET_COMPOSITION_CONTROL, MODE, OPAQUE));
+
+       PUSH_MTHD(push, NV907E, SURFACE_SET_OFFSET, asyw->image.offset[0] >> 8);
+
+       PUSH_MTHD(push, NV907E, SURFACE_SET_SIZE,
+                 NVVAL(NV907E, SURFACE_SET_SIZE, WIDTH, asyw->image.w) |
+                 NVVAL(NV907E, SURFACE_SET_SIZE, HEIGHT, asyw->image.h),
+
+                               SURFACE_SET_STORAGE,
+                 NVVAL(NV907E, SURFACE_SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
+                 NVVAL(NV907E, SURFACE_SET_STORAGE, PITCH, (asyw->image.pitch[0] >> 8)) |
+                 NVVAL(NV907E, SURFACE_SET_STORAGE, PITCH, asyw->image.blocks[0]) |
+                 NVVAL(NV907E, SURFACE_SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
+
+                               SURFACE_SET_PARAMS,
+                 NVVAL(NV907E, SURFACE_SET_PARAMS, FORMAT, asyw->image.format) |
+                 NVVAL(NV907E, SURFACE_SET_PARAMS, COLOR_SPACE, asyw->image.colorspace));
+       return 0;
 }
 
 const struct nv50_wndw_func
 ovly907e = {
        .acquire = ovly507e_acquire,
        .release = ovly507e_release,
-       .ntfy_set = ovly507e_ntfy_set,
-       .ntfy_clr = ovly507e_ntfy_clr,
+       .ntfy_set = base507c_ntfy_set,
+       .ntfy_clr = base507c_ntfy_clr,
        .ntfy_reset = ovly827e_ntfy_reset,
        .ntfy_wait_begun = ovly827e_ntfy_wait_begun,
        .image_set = ovly907e_image_set,
-       .image_clr = ovly507e_image_clr,
+       .image_clr = base507c_image_clr,
        .scale_set = ovly507e_scale_set,
-       .update = ovly507e_update,
+       .update = base507c_update,
 };
 
 static const u32
index 45d8ce7..17d2302 100644 (file)
  */
 #include "core.h"
 
-static void
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl507d.h>
+#include <nvhw/class/cl837d.h>
+
+static int
 pior507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
              struct nv50_head_atom *asyh)
 {
-       u32 *push;
-       if ((push = evo_wait(&core->chan, 2))) {
-               if (asyh) {
-                       ctrl |= asyh->or.depth  << 16;
-                       ctrl |= asyh->or.nvsync << 13;
-                       ctrl |= asyh->or.nhsync << 12;
-               }
-               evo_mthd(push, 0x0700 + (or * 0x040), 1);
-               evo_data(push, ctrl);
-               evo_kick(push, &core->chan);
+       struct nvif_push *push = core->chan.push;
+       int ret;
+
+       if (asyh) {
+               ctrl |= NVVAL(NV507D, PIOR_SET_CONTROL, HSYNC_POLARITY, asyh->or.nhsync);
+               ctrl |= NVVAL(NV507D, PIOR_SET_CONTROL, VSYNC_POLARITY, asyh->or.nvsync);
+               ctrl |= NVVAL(NV837D, PIOR_SET_CONTROL, PIXEL_DEPTH, asyh->or.depth);
        }
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV507D, PIOR_SET_CONTROL(or), ctrl);
+       return 0;
 }
 
 static void
index 9a59fa7..ca73d77 100644 (file)
  */
 #include "core.h"
 
-static void
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl507d.h>
+#include <nvhw/class/cl837d.h>
+
+static int
 sor507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
             struct nv50_head_atom *asyh)
 {
-       u32 *push;
-       if ((push = evo_wait(&core->chan, 2))) {
-               if (asyh) {
-                       ctrl |= asyh->or.depth  << 16;
-                       ctrl |= asyh->or.nvsync << 13;
-                       ctrl |= asyh->or.nhsync << 12;
-               }
-               evo_mthd(push, 0x0600 + (or * 0x40), 1);
-               evo_data(push, ctrl);
-               evo_kick(push, &core->chan);
+       struct nvif_push *push = core->chan.push;
+       int ret;
+
+       if (asyh) {
+               ctrl |= NVVAL(NV507D, SOR_SET_CONTROL, HSYNC_POLARITY, asyh->or.nhsync);
+               ctrl |= NVVAL(NV507D, SOR_SET_CONTROL, VSYNC_POLARITY, asyh->or.nvsync);
+               ctrl |= NVVAL(NV837D, SOR_SET_CONTROL, PIXEL_DEPTH, asyh->or.depth);
        }
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV507D, SOR_SET_CONTROL(or), ctrl);
+       return 0;
 }
 
 static void
index 9577ccf..c86cd8f 100644 (file)
  */
 #include "core.h"
 
-#include <nouveau_bo.h>
 #include <nvif/class.h>
+#include <nvif/push507c.h>
 
-static void
+#include <nvhw/class/cl907d.h>
+
+#include <nouveau_bo.h>
+
+static int
 sor907d_ctrl(struct nv50_core *core, int or, u32 ctrl,
             struct nv50_head_atom *asyh)
 {
-       u32 *push;
-       if ((push = evo_wait(&core->chan, 2))) {
-               evo_mthd(push, 0x0200 + (or * 0x20), 1);
-               evo_data(push, ctrl);
-               evo_kick(push, &core->chan);
-       }
+       struct nvif_push *push = core->chan.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NV907D, SOR_SET_CONTROL(or), ctrl);
+       return 0;
 }
 
 static void
 sor907d_get_caps(struct nv50_disp *disp, struct nouveau_encoder *outp, int or)
 {
+       struct nouveau_bo *bo = disp->sync;
        const int off = or * 2;
-       u32 tmp = nouveau_bo_rd32(disp->sync, 0x000014 + off);
-
-       outp->caps.dp_interlace = !!(tmp & 0x04000000);
+       outp->caps.dp_interlace =
+               NVBO_RV32(bo, off, NV907D_CORE_NOTIFIER_3, CAPABILITIES_CAP_SOR0_20, DP_INTERLACE);
 }
 
 const struct nv50_outp_func
index c86ca95..9eaef34 100644 (file)
  */
 #include "core.h"
 
-static void
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc37d.h>
+
+static int
 sorc37d_ctrl(struct nv50_core *core, int or, u32 ctrl,
             struct nv50_head_atom *asyh)
 {
-       u32 *push;
-       if ((push = evo_wait(&core->chan, 2))) {
-               evo_mthd(push, 0x0300 + (or * 0x20), 1);
-               evo_data(push, ctrl);
-               evo_kick(push, &core->chan);
-       }
+       struct nvif_push *push = core->chan.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NVC37D, SOR_SET_CONTROL(or), ctrl);
+       return 0;
 }
 
 static void
index f7dbd96..685b708 100644 (file)
 #include "wndw.h"
 
 #include <nvif/clc37b.h>
+#include <nvif/pushc37b.h>
 
-static void
+#include <nvhw/class/clc37b.h>
+
+static int
 wimmc37b_update(struct nv50_wndw *wndw, u32 *interlock)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wimm, 2))) {
-               evo_mthd(push, 0x0200, 1);
-               if (interlock[NV50_DISP_INTERLOCK_WNDW] & wndw->interlock.data)
-                       evo_data(push, 0x00000003);
-               else
-                       evo_data(push, 0x00000001);
-               evo_kick(push, &wndw->wimm);
-       }
+       struct nvif_push *push = wndw->wimm.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NVC37B, UPDATE, 0x00000001 |
+                 NVVAL(NVC37B, UPDATE, INTERLOCK_WITH_WINDOW,
+                       !!(interlock[NV50_DISP_INTERLOCK_WNDW] & wndw->interlock.data)));
+       return PUSH_KICK(push);
 }
 
-static void
+static int
 wimmc37b_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wimm, 2))) {
-               evo_mthd(push, 0x0208, 1);
-               evo_data(push, asyw->point.y << 16 | asyw->point.x);
-               evo_kick(push, &wndw->wimm);
-       }
+       struct nvif_push *push = wndw->wimm.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NVC37B, SET_POINT_OUT(0),
+                 NVVAL(NVC37B, SET_POINT_OUT, X, asyw->point.x) |
+                 NVVAL(NVC37B, SET_POINT_OUT, Y, asyw->point.y));
+       return 0;
 }
 
 static const struct nv50_wimm_func
index 293ccfd..447ecc9 100644 (file)
 #include <nvif/class.h>
 #include <nvif/cl0002.h>
 
+#include <nvhw/class/cl507c.h>
+#include <nvhw/class/cl507e.h>
+#include <nvhw/class/clc37e.h>
+
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_fourcc.h>
 
@@ -35,7 +39,7 @@
 static void
 nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
 {
-       nvif_object_fini(&ctxdma->object);
+       nvif_object_dtor(&ctxdma->object);
        list_del(&ctxdma->head);
        kfree(ctxdma);
 }
@@ -94,8 +98,8 @@ nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct drm_framebuffer *fb)
                argc += sizeof(args.gf119);
        }
 
-       ret = nvif_object_init(wndw->ctxdma.parent, handle, NV_DMA_IN_MEMORY,
-                              &args, argc, &ctxdma->object);
+       ret = nvif_object_ctor(wndw->ctxdma.parent, "kmsFbCtxDma", handle,
+                              NV_DMA_IN_MEMORY, &args, argc, &ctxdma->object);
        if (ret) {
                nv50_wndw_ctxdma_del(ctxdma);
                return ERR_PTR(ret);
@@ -137,7 +141,7 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
                    struct nv50_wndw_atom *asyw)
 {
        if (interlock[NV50_DISP_INTERLOCK_CORE]) {
-               asyw->image.mode = 0;
+               asyw->image.mode = NV507C_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING;
                asyw->image.interval = 1;
        }
 
@@ -201,13 +205,18 @@ static int
 nv50_wndw_atomic_check_acquire_yuv(struct nv50_wndw_atom *asyw)
 {
        switch (asyw->state.fb->format->format) {
-       case DRM_FORMAT_YUYV: asyw->image.format = 0x28; break;
-       case DRM_FORMAT_UYVY: asyw->image.format = 0x29; break;
+       case DRM_FORMAT_YUYV:
+               asyw->image.format = NV507E_SURFACE_SET_PARAMS_FORMAT_VE8YO8UE8YE8;
+               break;
+       case DRM_FORMAT_UYVY:
+               asyw->image.format = NV507E_SURFACE_SET_PARAMS_FORMAT_YO8VE8YE8UE8;
+               break;
        default:
                WARN_ON(1);
                return -EINVAL;
        }
-       asyw->image.colorspace = 1;
+
+       asyw->image.colorspace = NV507E_SURFACE_SET_PARAMS_COLOR_SPACE_YUV_601;
        return 0;
 }
 
@@ -215,24 +224,41 @@ static int
 nv50_wndw_atomic_check_acquire_rgb(struct nv50_wndw_atom *asyw)
 {
        switch (asyw->state.fb->format->format) {
-       case DRM_FORMAT_C8           : asyw->image.format = 0x1e; break;
-       case DRM_FORMAT_XRGB8888     :
-       case DRM_FORMAT_ARGB8888     : asyw->image.format = 0xcf; break;
-       case DRM_FORMAT_RGB565       : asyw->image.format = 0xe8; break;
-       case DRM_FORMAT_XRGB1555     :
-       case DRM_FORMAT_ARGB1555     : asyw->image.format = 0xe9; break;
-       case DRM_FORMAT_XBGR2101010  :
-       case DRM_FORMAT_ABGR2101010  : asyw->image.format = 0xd1; break;
-       case DRM_FORMAT_XBGR8888     :
-       case DRM_FORMAT_ABGR8888     : asyw->image.format = 0xd5; break;
-       case DRM_FORMAT_XRGB2101010  :
-       case DRM_FORMAT_ARGB2101010  : asyw->image.format = 0xdf; break;
+       case DRM_FORMAT_C8:
+               asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_I8;
+               break;
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ARGB8888:
+               asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8;
+               break;
+       case DRM_FORMAT_RGB565:
+               asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_R5G6B5;
+               break;
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_ARGB1555:
+               asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5;
+               break;
+       case DRM_FORMAT_XBGR2101010:
+       case DRM_FORMAT_ABGR2101010:
+               asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10;
+               break;
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_ABGR8888:
+               asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_A8B8G8R8;
+               break;
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_ARGB2101010:
+               asyw->image.format = NVC37E_SET_PARAMS_FORMAT_A2R10G10B10;
+               break;
        case DRM_FORMAT_XBGR16161616F:
-       case DRM_FORMAT_ABGR16161616F: asyw->image.format = 0xca; break;
+       case DRM_FORMAT_ABGR16161616F:
+               asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16;
+               break;
        default:
                return -EINVAL;
        }
-       asyw->image.colorspace = 0;
+
+       asyw->image.colorspace = NV507E_SURFACE_SET_PARAMS_COLOR_SPACE_RGB;
        return 0;
 }
 
@@ -265,7 +291,7 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
                }
 
                if (asyw->image.kind) {
-                       asyw->image.layout = 0;
+                       asyw->image.layout = NV507C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR;
                        if (drm->client.device.info.chipset >= 0xc0)
                                asyw->image.blockh = tile_mode >> 4;
                        else
@@ -273,8 +299,8 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
                        asyw->image.blocks[0] = fb->pitches[0] / 64;
                        asyw->image.pitch[0] = 0;
                } else {
-                       asyw->image.layout = 1;
-                       asyw->image.blockh = 0;
+                       asyw->image.layout = NV507C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH;
+                       asyw->image.blockh = NV507C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB;
                        asyw->image.blocks[0] = 0;
                        asyw->image.pitch[0] = fb->pitches[0];
                }
@@ -283,7 +309,12 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
                        asyw->image.interval = 1;
                else
                        asyw->image.interval = 0;
-               asyw->image.mode = asyw->image.interval ? 0 : 1;
+
+               if (asyw->image.interval)
+                       asyw->image.mode = NV507C_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING;
+               else
+                       asyw->image.mode = NV507C_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE;
+
                asyw->set.image = wndw->func->image_set != NULL;
        }
 
@@ -303,17 +334,17 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
                asyw->blend.k1 = asyw->state.alpha >> 8;
                switch (asyw->state.pixel_blend_mode) {
                case DRM_MODE_BLEND_PREMULTI:
-                       asyw->blend.src_color = 2; /* K1 */
-                       asyw->blend.dst_color = 7; /* NEG_K1_TIMES_SRC */
+                       asyw->blend.src_color = NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1;
+                       asyw->blend.dst_color = NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC;
                        break;
                case DRM_MODE_BLEND_COVERAGE:
-                       asyw->blend.src_color = 5; /* K1_TIMES_SRC */
-                       asyw->blend.dst_color = 7; /* NEG_K1_TIMES_SRC */
+                       asyw->blend.src_color = NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_SRC;
+                       asyw->blend.dst_color = NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC;
                        break;
                case DRM_MODE_BLEND_PIXEL_NONE:
                default:
-                       asyw->blend.src_color = 2; /* K1 */
-                       asyw->blend.dst_color = 4; /* NEG_K1 */
+                       asyw->blend.src_color = NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1;
+                       asyw->blend.dst_color = NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1;
                        break;
                }
                if (memcmp(&armw->blend, &asyw->blend, sizeof(asyw->blend)))
@@ -609,7 +640,7 @@ nv50_wndw_destroy(struct drm_plane *plane)
                nv50_wndw_ctxdma_del(ctxdma);
        }
 
-       nvif_notify_fini(&wndw->notify);
+       nvif_notify_dtor(&wndw->notify);
        nv50_dmac_destroy(&wndw->wimm);
        nv50_dmac_destroy(&wndw->wndw);
 
index a7412b9..3278e28 100644 (file)
@@ -57,48 +57,59 @@ struct nv50_wndw_func {
        void (*prepare)(struct nv50_wndw *, struct nv50_head_atom *asyh,
                        struct nv50_wndw_atom *asyw);
 
-       void (*sema_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
-       void (*sema_clr)(struct nv50_wndw *);
+       int (*sema_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+       int (*sema_clr)(struct nv50_wndw *);
        void (*ntfy_reset)(struct nouveau_bo *, u32 offset);
-       void (*ntfy_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
-       void (*ntfy_clr)(struct nv50_wndw *);
+       int (*ntfy_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+       int (*ntfy_clr)(struct nv50_wndw *);
        int (*ntfy_wait_begun)(struct nouveau_bo *, u32 offset,
                               struct nvif_device *);
        bool (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *, int);
        void (*csc)(struct nv50_wndw *, struct nv50_wndw_atom *,
                    const struct drm_color_ctm *);
-       void (*csc_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
-       void (*csc_clr)(struct nv50_wndw *);
+       int (*csc_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+       int (*csc_clr)(struct nv50_wndw *);
        bool ilut_identity;
        int  ilut_size;
        bool olut_core;
-       void (*xlut_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
-       void (*xlut_clr)(struct nv50_wndw *);
-       void (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
-       void (*image_clr)(struct nv50_wndw *);
-       void (*scale_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
-       void (*blend_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
-
-       void (*update)(struct nv50_wndw *, u32 *interlock);
+       int (*xlut_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+       int (*xlut_clr)(struct nv50_wndw *);
+       int (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+       int (*image_clr)(struct nv50_wndw *);
+       int (*scale_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+       int (*blend_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+
+       int (*update)(struct nv50_wndw *, u32 *interlock);
 };
 
 extern const struct drm_plane_funcs nv50_wndw;
 
 void base507c_ntfy_reset(struct nouveau_bo *, u32);
+int base507c_ntfy_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int base507c_ntfy_clr(struct nv50_wndw *);
 int base507c_ntfy_wait_begun(struct nouveau_bo *, u32, struct nvif_device *);
+int base507c_image_clr(struct nv50_wndw *);
+int base507c_update(struct nv50_wndw *, u32 *);
 
 void base907c_csc(struct nv50_wndw *, struct nv50_wndw_atom *,
                  const struct drm_color_ctm *);
 
 struct nv50_wimm_func {
-       void (*point)(struct nv50_wndw *, struct nv50_wndw_atom *);
+       int (*point)(struct nv50_wndw *, struct nv50_wndw_atom *);
 
-       void (*update)(struct nv50_wndw *, u32 *interlock);
+       int (*update)(struct nv50_wndw *, u32 *interlock);
 };
 
 extern const struct nv50_wimm_func curs507a;
 bool curs507a_space(struct nv50_wndw *);
 
+static inline __must_check int
+nvif_chan_wait(struct nv50_dmac *dmac, u32 size)
+{
+       struct nv50_wndw *wndw = container_of(dmac, typeof(*wndw), wimm);
+       return curs507a_space(wndw) ? 0 : -ETIMEDOUT;
+}
+
 int wndwc37e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
                 struct nv50_wndw **);
 int wndwc37e_new_(const struct nv50_wndw_func *, struct nouveau_drm *,
@@ -108,13 +119,13 @@ int wndwc37e_acquire(struct nv50_wndw *, struct nv50_wndw_atom *,
                     struct nv50_head_atom *);
 void wndwc37e_release(struct nv50_wndw *, struct nv50_wndw_atom *,
                      struct nv50_head_atom *);
-void wndwc37e_sema_set(struct nv50_wndw *, struct nv50_wndw_atom *);
-void wndwc37e_sema_clr(struct nv50_wndw *);
-void wndwc37e_ntfy_set(struct nv50_wndw *, struct nv50_wndw_atom *);
-void wndwc37e_ntfy_clr(struct nv50_wndw *);
-void wndwc37e_image_clr(struct nv50_wndw *);
-void wndwc37e_blend_set(struct nv50_wndw *, struct nv50_wndw_atom *);
-void wndwc37e_update(struct nv50_wndw *, u32 *);
+int wndwc37e_sema_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int wndwc37e_sema_clr(struct nv50_wndw *);
+int wndwc37e_ntfy_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int wndwc37e_ntfy_clr(struct nv50_wndw *);
+int wndwc37e_image_clr(struct nv50_wndw *);
+int wndwc37e_blend_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int wndwc37e_update(struct nv50_wndw *, u32 *);
 
 int wndwc57e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
                 struct nv50_wndw **);
index bb84e4d..57df997 100644 (file)
 #include <nouveau_bo.h>
 
 #include <nvif/clc37e.h>
+#include <nvif/pushc37b.h>
 
-static void
+#include <nvhw/class/clc37e.h>
+
+static int
 wndwc37e_csc_clr(struct nv50_wndw *wndw)
 {
+       return 0;
 }
 
-static void
+static int
 wndwc37e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-       u32 *push, i;
-       if ((push = evo_wait(&wndw->wndw, 13))) {
-                evo_mthd(push, 0x02bc, 12);
-                for (i = 0; i < 12; i++)
-                         evo_data(push, asyw->csc.matrix[i]);
-                evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 13)))
+               return ret;
+
+       PUSH_MTHD(push, NVC37E, SET_CSC_RED2RED, asyw->csc.matrix, 12);
+       return 0;
 }
 
-static void
+static int
 wndwc37e_ilut_clr(struct nv50_wndw *wndw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 2))) {
-               evo_mthd(push, 0x02b8, 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NVC37E, SET_CONTEXT_DMA_INPUT_LUT, 0x00000000);
+       return 0;
 }
 
-static void
+static int
 wndwc37e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 4))) {
-               evo_mthd(push, 0x02b0, 3);
-               evo_data(push, asyw->xlut.i.output_mode << 8 |
-                              asyw->xlut.i.range << 4 |
-                              asyw->xlut.i.size);
-               evo_data(push, asyw->xlut.i.offset >> 8);
-               evo_data(push, asyw->xlut.handle);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 4)))
+               return ret;
+
+       PUSH_MTHD(push, NVC37E, SET_CONTROL_INPUT_LUT,
+                 NVVAL(NVC37E, SET_CONTROL_INPUT_LUT, OUTPUT_MODE, asyw->xlut.i.output_mode) |
+                 NVVAL(NVC37E, SET_CONTROL_INPUT_LUT, RANGE, asyw->xlut.i.range) |
+                 NVVAL(NVC37E, SET_CONTROL_INPUT_LUT, SIZE, asyw->xlut.i.size),
+
+                               SET_OFFSET_INPUT_LUT, asyw->xlut.i.offset >> 8,
+                               SET_CONTEXT_DMA_INPUT_LUT, asyw->xlut.handle);
+       return 0;
 }
 
 static bool
@@ -77,144 +88,206 @@ wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
        if (size != 256 && size != 1024)
                return false;
 
-       asyw->xlut.i.mode = 2;
-       asyw->xlut.i.size = size == 1024 ? 2 : 0;
-       asyw->xlut.i.range = 0;
-       asyw->xlut.i.output_mode = 1;
+       asyw->xlut.i.size = size == 1024 ? NVC37E_SET_CONTROL_INPUT_LUT_SIZE_SIZE_1025 :
+                                          NVC37E_SET_CONTROL_INPUT_LUT_SIZE_SIZE_257;
+       asyw->xlut.i.range = NVC37E_SET_CONTROL_INPUT_LUT_RANGE_UNITY;
+       asyw->xlut.i.output_mode = NVC37E_SET_CONTROL_INPUT_LUT_OUTPUT_MODE_INTERPOLATE;
        asyw->xlut.i.load = head907d_olut_load;
        return true;
 }
 
-void
+int
 wndwc37e_blend_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 8))) {
-               evo_mthd(push, 0x02ec, 7);
-               evo_data(push, asyw->blend.depth << 4);
-               evo_data(push, asyw->blend.k1);
-               evo_data(push, asyw->blend.dst_color << 12 |
-                              asyw->blend.dst_color << 8 |
-                              asyw->blend.src_color << 4 |
-                              asyw->blend.src_color);
-               evo_data(push, 0xffff0000);
-               evo_data(push, 0xffff0000);
-               evo_data(push, 0xffff0000);
-               evo_data(push, 0xffff0000);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 8)))
+               return ret;
+
+       PUSH_MTHD(push, NVC37E, SET_COMPOSITION_CONTROL,
+                 NVDEF(NVC37E, SET_COMPOSITION_CONTROL, COLOR_KEY_SELECT, DISABLE) |
+                 NVVAL(NVC37E, SET_COMPOSITION_CONTROL, DEPTH, asyw->blend.depth),
+
+                               SET_COMPOSITION_CONSTANT_ALPHA,
+                 NVVAL(NVC37E, SET_COMPOSITION_CONSTANT_ALPHA, K1, asyw->blend.k1) |
+                 NVVAL(NVC37E, SET_COMPOSITION_CONSTANT_ALPHA, K2, 0),
+
+                               SET_COMPOSITION_FACTOR_SELECT,
+                 NVVAL(NVC37E, SET_COMPOSITION_FACTOR_SELECT, SRC_COLOR_FACTOR_MATCH_SELECT,
+                                                              asyw->blend.src_color) |
+                 NVVAL(NVC37E, SET_COMPOSITION_FACTOR_SELECT, SRC_COLOR_FACTOR_NO_MATCH_SELECT,
+                                                              asyw->blend.src_color) |
+                 NVVAL(NVC37E, SET_COMPOSITION_FACTOR_SELECT, DST_COLOR_FACTOR_MATCH_SELECT,
+                                                              asyw->blend.dst_color) |
+                 NVVAL(NVC37E, SET_COMPOSITION_FACTOR_SELECT, DST_COLOR_FACTOR_NO_MATCH_SELECT,
+                                                              asyw->blend.dst_color),
+
+                               SET_KEY_ALPHA,
+                 NVVAL(NVC37E, SET_KEY_ALPHA, MIN, 0x0000) |
+                 NVVAL(NVC37E, SET_KEY_ALPHA, MAX, 0xffff),
+
+                               SET_KEY_RED_CR,
+                 NVVAL(NVC37E, SET_KEY_RED_CR, MIN, 0x0000) |
+                 NVVAL(NVC37E, SET_KEY_RED_CR, MAX, 0xffff),
+
+                               SET_KEY_GREEN_Y,
+                 NVVAL(NVC37E, SET_KEY_GREEN_Y, MIN, 0x0000) |
+                 NVVAL(NVC37E, SET_KEY_GREEN_Y, MAX, 0xffff),
+
+                               SET_KEY_BLUE_CB,
+                 NVVAL(NVC37E, SET_KEY_BLUE_CB, MIN, 0x0000) |
+                 NVVAL(NVC37E, SET_KEY_BLUE_CB, MAX, 0xffff));
+       return 0;
 }
 
-void
+int
 wndwc37e_image_clr(struct nv50_wndw *wndw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 4))) {
-               evo_mthd(push, 0x0308, 1);
-               evo_data(push, 0x00000000);
-               evo_mthd(push, 0x0240, 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 4)))
+               return ret;
+
+       PUSH_MTHD(push, NVC37E, SET_PRESENT_CONTROL,
+                 NVVAL(NVC37E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, 0) |
+                 NVDEF(NVC37E, SET_PRESENT_CONTROL, BEGIN_MODE, NON_TEARING));
+
+       PUSH_MTHD(push, NVC37E, SET_CONTEXT_DMA_ISO(0), 0x00000000);
+       return 0;
 }
 
-static void
+static int
 wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-       u32 *push;
-
-       if (!(push = evo_wait(&wndw->wndw, 17)))
-               return;
-
-       evo_mthd(push, 0x0308, 1);
-       evo_data(push, asyw->image.mode << 4 | asyw->image.interval);
-       evo_mthd(push, 0x0224, 4);
-       evo_data(push, asyw->image.h << 16 | asyw->image.w);
-       evo_data(push, asyw->image.layout << 4 | asyw->image.blockh);
-       evo_data(push, asyw->csc.valid << 17 |
-                      asyw->image.colorspace << 8 |
-                      asyw->image.format);
-       evo_data(push, asyw->image.blocks[0] | (asyw->image.pitch[0] >> 6));
-       evo_mthd(push, 0x0240, 1);
-       evo_data(push, asyw->image.handle[0]);
-       evo_mthd(push, 0x0260, 1);
-       evo_data(push, asyw->image.offset[0] >> 8);
-       evo_mthd(push, 0x0290, 1);
-       evo_data(push, (asyw->state.src_y >> 16) << 16 |
-                      (asyw->state.src_x >> 16));
-       evo_mthd(push, 0x0298, 1);
-       evo_data(push, (asyw->state.src_h >> 16) << 16 |
-                      (asyw->state.src_w >> 16));
-       evo_mthd(push, 0x02a4, 1);
-       evo_data(push, asyw->state.crtc_h << 16 |
-                      asyw->state.crtc_w);
-       evo_kick(push, &wndw->wndw);
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 17)))
+               return ret;
+
+       PUSH_MTHD(push, NVC37E, SET_PRESENT_CONTROL,
+                 NVVAL(NVC37E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval) |
+                 NVVAL(NVC37E, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+                 NVDEF(NVC37E, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE));
+
+       PUSH_MTHD(push, NVC37E, SET_SIZE,
+                 NVVAL(NVC37E, SET_SIZE, WIDTH, asyw->image.w) |
+                 NVVAL(NVC37E, SET_SIZE, HEIGHT, asyw->image.h),
+
+                               SET_STORAGE,
+                 NVVAL(NVC37E, SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
+                 NVVAL(NVC37E, SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
+
+                               SET_PARAMS,
+                 NVVAL(NVC37E, SET_PARAMS, FORMAT, asyw->image.format) |
+                 NVVAL(NVC37E, SET_PARAMS, COLOR_SPACE, asyw->image.colorspace) |
+                 NVDEF(NVC37E, SET_PARAMS, INPUT_RANGE, BYPASS) |
+                 NVDEF(NVC37E, SET_PARAMS, UNDERREPLICATE, DISABLE) |
+                 NVDEF(NVC37E, SET_PARAMS, DE_GAMMA, NONE) |
+                 NVVAL(NVC37E, SET_PARAMS, CSC, asyw->csc.valid) |
+                 NVDEF(NVC37E, SET_PARAMS, CLAMP_BEFORE_BLEND, DISABLE) |
+                 NVDEF(NVC37E, SET_PARAMS, SWAP_UV, DISABLE),
+
+                               SET_PLANAR_STORAGE(0),
+                 NVVAL(NVC37E, SET_PLANAR_STORAGE, PITCH, asyw->image.blocks[0]) |
+                 NVVAL(NVC37E, SET_PLANAR_STORAGE, PITCH, asyw->image.pitch[0] >> 6));
+
+       PUSH_MTHD(push, NVC37E, SET_CONTEXT_DMA_ISO(0), asyw->image.handle, 1);
+       PUSH_MTHD(push, NVC37E, SET_OFFSET(0), asyw->image.offset[0] >> 8);
+
+       PUSH_MTHD(push, NVC37E, SET_POINT_IN(0),
+                 NVVAL(NVC37E, SET_POINT_IN, X, asyw->state.src_x >> 16) |
+                 NVVAL(NVC37E, SET_POINT_IN, Y, asyw->state.src_y >> 16));
+
+       PUSH_MTHD(push, NVC37E, SET_SIZE_IN,
+                 NVVAL(NVC37E, SET_SIZE_IN, WIDTH, asyw->state.src_w >> 16) |
+                 NVVAL(NVC37E, SET_SIZE_IN, HEIGHT, asyw->state.src_h >> 16));
+
+       PUSH_MTHD(push, NVC37E, SET_SIZE_OUT,
+                 NVVAL(NVC37E, SET_SIZE_OUT, WIDTH, asyw->state.crtc_w) |
+                 NVVAL(NVC37E, SET_SIZE_OUT, HEIGHT, asyw->state.crtc_h));
+       return 0;
 }
 
-void
+int
 wndwc37e_ntfy_clr(struct nv50_wndw *wndw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 2))) {
-               evo_mthd(push, 0x021c, 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NVC37E, SET_CONTEXT_DMA_NOTIFIER, 0x00000000);
+       return 0;
 }
 
-void
+int
 wndwc37e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 3))) {
-               evo_mthd(push, 0x021c, 2);
-               evo_data(push, asyw->ntfy.handle);
-               evo_data(push, asyw->ntfy.offset | asyw->ntfy.awaken);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 3)))
+               return ret;
+
+       PUSH_MTHD(push, NVC37E, SET_CONTEXT_DMA_NOTIFIER, asyw->ntfy.handle,
+
+                               SET_NOTIFIER_CONTROL,
+                 NVVAL(NVC37E, SET_NOTIFIER_CONTROL, MODE, asyw->ntfy.awaken) |
+                 NVVAL(NVC37E, SET_NOTIFIER_CONTROL, OFFSET, asyw->ntfy.offset >> 4));
+       return 0;
 }
 
-void
+int
 wndwc37e_sema_clr(struct nv50_wndw *wndw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 2))) {
-               evo_mthd(push, 0x0218, 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NVC37E, SET_CONTEXT_DMA_SEMAPHORE, 0x00000000);
+       return 0;
 }
 
-void
+int
 wndwc37e_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 5))) {
-               evo_mthd(push, 0x020c, 4);
-               evo_data(push, asyw->sema.offset);
-               evo_data(push, asyw->sema.acquire);
-               evo_data(push, asyw->sema.release);
-               evo_data(push, asyw->sema.handle);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 5)))
+               return ret;
+
+       PUSH_MTHD(push, NVC37E, SET_SEMAPHORE_CONTROL, asyw->sema.offset,
+                               SET_SEMAPHORE_ACQUIRE, asyw->sema.acquire,
+                               SET_SEMAPHORE_RELEASE, asyw->sema.release,
+                               SET_CONTEXT_DMA_SEMAPHORE, asyw->sema.handle);
+       return 0;
 }
 
-void
+int
 wndwc37e_update(struct nv50_wndw *wndw, u32 *interlock)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 5))) {
-               evo_mthd(push, 0x0370, 2);
-               evo_data(push, interlock[NV50_DISP_INTERLOCK_CURS] << 1 |
-                              interlock[NV50_DISP_INTERLOCK_CORE]);
-               evo_data(push, interlock[NV50_DISP_INTERLOCK_WNDW]);
-               evo_mthd(push, 0x0200, 1);
-               if (interlock[NV50_DISP_INTERLOCK_WIMM] & wndw->interlock.data)
-                       evo_data(push, 0x00001001);
-               else
-                       evo_data(push, 0x00000001);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 5)))
+               return ret;
+
+       PUSH_MTHD(push, NVC37E, SET_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_CURS] << 1 |
+                                                    interlock[NV50_DISP_INTERLOCK_CORE],
+                               SET_WINDOW_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_WNDW]);
+
+       PUSH_MTHD(push, NVC37E, UPDATE, 0x00000001 |
+                 NVVAL(NVC37E, UPDATE, INTERLOCK_WITH_WIN_IMM,
+                         !!(interlock[NV50_DISP_INTERLOCK_WIMM] & wndw->interlock.data)));
+
+       return PUSH_KICK(push);
 }
 
 void
index 1d64741..429be0b 100644 (file)
 #include <nouveau_bo.h>
 
 #include <nvif/clc37e.h>
+#include <nvif/pushc37b.h>
 
-static void
+#include <nvhw/class/clc57e.h>
+
+static int
 wndwc57e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-       u32 *push;
-
-       if (!(push = evo_wait(&wndw->wndw, 17)))
-               return;
-
-       evo_mthd(push, 0x0308, 1);
-       evo_data(push, asyw->image.mode << 4 | asyw->image.interval);
-       evo_mthd(push, 0x0224, 4);
-       evo_data(push, asyw->image.h << 16 | asyw->image.w);
-       evo_data(push, asyw->image.layout << 4 | asyw->image.blockh);
-       evo_data(push, asyw->image.colorspace << 8 |
-                      asyw->image.format);
-       evo_data(push, asyw->image.blocks[0] | (asyw->image.pitch[0] >> 6));
-       evo_mthd(push, 0x0240, 1);
-       evo_data(push, asyw->image.handle[0]);
-       evo_mthd(push, 0x0260, 1);
-       evo_data(push, asyw->image.offset[0] >> 8);
-       evo_mthd(push, 0x0290, 1);
-       evo_data(push, (asyw->state.src_y >> 16) << 16 |
-                      (asyw->state.src_x >> 16));
-       evo_mthd(push, 0x0298, 1);
-       evo_data(push, (asyw->state.src_h >> 16) << 16 |
-                      (asyw->state.src_w >> 16));
-       evo_mthd(push, 0x02a4, 1);
-       evo_data(push, asyw->state.crtc_h << 16 |
-                      asyw->state.crtc_w);
-       evo_kick(push, &wndw->wndw);
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 17)))
+               return ret;
+
+       PUSH_MTHD(push, NVC57E, SET_PRESENT_CONTROL,
+                 NVVAL(NVC57E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval) |
+                 NVVAL(NVC57E, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+                 NVDEF(NVC57E, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE));
+
+       PUSH_MTHD(push, NVC57E, SET_SIZE,
+                 NVVAL(NVC57E, SET_SIZE, WIDTH, asyw->image.w) |
+                 NVVAL(NVC57E, SET_SIZE, HEIGHT, asyw->image.h),
+
+                               SET_STORAGE,
+                 NVVAL(NVC57E, SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
+                 NVVAL(NVC57E, SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
+
+                               SET_PARAMS,
+                 NVVAL(NVC57E, SET_PARAMS, FORMAT, asyw->image.format) |
+                 NVDEF(NVC57E, SET_PARAMS, CLAMP_BEFORE_BLEND, DISABLE) |
+                 NVDEF(NVC57E, SET_PARAMS, SWAP_UV, DISABLE) |
+                 NVDEF(NVC57E, SET_PARAMS, FMT_ROUNDING_MODE, ROUND_TO_NEAREST),
+
+                               SET_PLANAR_STORAGE(0),
+                 NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.blocks[0]) |
+                 NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.pitch[0] >> 6));
+
+       PUSH_MTHD(push, NVC57E, SET_CONTEXT_DMA_ISO(0), asyw->image.handle, 1);
+       PUSH_MTHD(push, NVC57E, SET_OFFSET(0), asyw->image.offset[0] >> 8);
+
+       PUSH_MTHD(push, NVC57E, SET_POINT_IN(0),
+                 NVVAL(NVC57E, SET_POINT_IN, X, asyw->state.src_x >> 16) |
+                 NVVAL(NVC57E, SET_POINT_IN, Y, asyw->state.src_y >> 16));
+
+       PUSH_MTHD(push, NVC57E, SET_SIZE_IN,
+                 NVVAL(NVC57E, SET_SIZE_IN, WIDTH, asyw->state.src_w >> 16) |
+                 NVVAL(NVC57E, SET_SIZE_IN, HEIGHT, asyw->state.src_h >> 16));
+
+       PUSH_MTHD(push, NVC57E, SET_SIZE_OUT,
+                 NVVAL(NVC57E, SET_SIZE_OUT, WIDTH, asyw->state.crtc_w) |
+                 NVVAL(NVC57E, SET_SIZE_OUT, HEIGHT, asyw->state.crtc_h));
+       return 0;
 }
 
-static void
+static int
 wndwc57e_csc_clr(struct nv50_wndw *wndw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 13))) {
-                evo_mthd(push, 0x0400, 12);
-                evo_data(push, 0x00010000);
-                evo_data(push, 0x00000000);
-                evo_data(push, 0x00000000);
-                evo_data(push, 0x00000000);
-                evo_data(push, 0x00000000);
-                evo_data(push, 0x00010000);
-                evo_data(push, 0x00000000);
-                evo_data(push, 0x00000000);
-                evo_data(push, 0x00000000);
-                evo_data(push, 0x00000000);
-                evo_data(push, 0x00010000);
-                evo_data(push, 0x00000000);
-                evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       const u32 identity[12] = {
+               0x00010000, 0x00000000, 0x00000000, 0x00000000,
+               0x00000000, 0x00010000, 0x00000000, 0x00000000,
+               0x00000000, 0x00000000, 0x00010000, 0x00000000,
+       };
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 1 + ARRAY_SIZE(identity))))
+               return ret;
+
+       PUSH_MTHD(push, NVC57E, SET_FMT_COEFFICIENT_C00, identity, ARRAY_SIZE(identity));
+       return 0;
 }
 
-static void
+static int
 wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-       u32 *push, i;
-       if ((push = evo_wait(&wndw->wndw, 13))) {
-                evo_mthd(push, 0x0400, 12);
-                for (i = 0; i < 12; i++)
-                         evo_data(push, asyw->csc.matrix[i]);
-                evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 13)))
+               return ret;
+
+       PUSH_MTHD(push, NVC57E, SET_FMT_COEFFICIENT_C00, asyw->csc.matrix, 12);
+       return 0;
 }
 
-static void
+static int
 wndwc57e_ilut_clr(struct nv50_wndw *wndw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 2))) {
-               evo_mthd(push, 0x0444, 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 2)))
+               return ret;
+
+       PUSH_MTHD(push, NVC57E, SET_CONTEXT_DMA_ILUT, 0x00000000);
+       return 0;
 }
 
-static void
+static int
 wndwc57e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-       u32 *push;
-       if ((push = evo_wait(&wndw->wndw, 4))) {
-               evo_mthd(push, 0x0440, 3);
-               evo_data(push, asyw->xlut.i.size << 8 |
-                              asyw->xlut.i.mode << 2 |
-                              asyw->xlut.i.output_mode);
-               evo_data(push, asyw->xlut.handle);
-               evo_data(push, asyw->xlut.i.offset >> 8);
-               evo_kick(push, &wndw->wndw);
-       }
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 4)))
+               return ret;
+
+       PUSH_MTHD(push, NVC57E, SET_ILUT_CONTROL,
+                 NVVAL(NVC57E, SET_ILUT_CONTROL, SIZE, asyw->xlut.i.size) |
+                 NVVAL(NVC57E, SET_ILUT_CONTROL, MODE, asyw->xlut.i.mode) |
+                 NVVAL(NVC57E, SET_ILUT_CONTROL, INTERPOLATE, asyw->xlut.i.output_mode),
+
+                               SET_CONTEXT_DMA_ILUT, asyw->xlut.handle,
+                               SET_OFFSET_ILUT, asyw->xlut.i.offset >> 8);
+       return 0;
 }
 
 static u16
@@ -162,13 +185,13 @@ wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
        if (size = size ? size : 1024, size != 256 && size != 1024)
                return false;
 
-       if (size == 256) {
-               asyw->xlut.i.mode = 1; /* DIRECT8. */
-       } else {
-               asyw->xlut.i.mode = 2; /* DIRECT10. */
-       }
+       if (size == 256)
+               asyw->xlut.i.mode = NVC57E_SET_ILUT_CONTROL_MODE_DIRECT8;
+       else
+               asyw->xlut.i.mode = NVC57E_SET_ILUT_CONTROL_MODE_DIRECT10;
+
        asyw->xlut.i.size = 4 /* VSS header. */ + size + 1 /* Entries. */;
-       asyw->xlut.i.output_mode = 0; /* INTERPOLATE_DISABLE. */
+       asyw->xlut.i.output_mode = NVC57E_SET_ILUT_CONTROL_INTERPOLATE_DISABLE;
        asyw->xlut.i.load = wndwc57e_ilut_load;
        return true;
 }
index 452ed7d..64a51e7 100644 (file)
@@ -25,7 +25,7 @@ struct nv_pmu_args {
 #define NV_PMU_UNIT_ACR                                                    0x0a
 
 struct nv_pmu_init_msg {
-       struct nv_falcon_msg hdr;
+       struct nvfw_falcon_msg hdr;
 #define NV_PMU_INIT_MSG_INIT                                               0x00
        u8 msg_type;
 
@@ -44,7 +44,7 @@ struct nv_pmu_init_msg {
 };
 
 struct nv_pmu_acr_cmd {
-       struct nv_falcon_cmd hdr;
+       struct nvfw_falcon_cmd hdr;
 #define NV_PMU_ACR_CMD_INIT_WPR_REGION                                     0x00
 #define NV_PMU_ACR_CMD_BOOTSTRAP_FALCON                                    0x01
 #define NV_PMU_ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS                          0x03
@@ -52,7 +52,7 @@ struct nv_pmu_acr_cmd {
 };
 
 struct nv_pmu_acr_msg {
-       struct nv_falcon_cmd hdr;
+       struct nvfw_falcon_cmd hdr;
        u8 msg_type;
 };
 
index 0349655..9a37ad4 100644 (file)
@@ -13,7 +13,7 @@ struct nv_sec2_args {
 #define NV_SEC2_UNIT_ACR                                                   0x08
 
 struct nv_sec2_init_msg {
-       struct nv_falcon_msg hdr;
+       struct nvfw_falcon_msg hdr;
 #define NV_SEC2_INIT_MSG_INIT                                              0x00
        u8 msg_type;
 
@@ -34,13 +34,13 @@ struct nv_sec2_init_msg {
 };
 
 struct nv_sec2_acr_cmd {
-       struct nv_falcon_cmd hdr;
+       struct nvfw_falcon_cmd hdr;
 #define NV_SEC2_ACR_CMD_BOOTSTRAP_FALCON                                   0x00
        u8 cmd_type;
 };
 
 struct nv_sec2_acr_msg {
-       struct nv_falcon_cmd hdr;
+       struct nvfw_falcon_cmd hdr;
        u8 msg_type;
 };
 
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl0039.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl0039.h
new file mode 100644 (file)
index 0000000..5386ed6
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2001-2001, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _cl0039_h_
+#define _cl0039_h_
+
+/* dma method offsets, fields, and values */
+#define NV039_SET_OBJECT                                           (0x00000000)
+#define NV039_NO_OPERATION                                         (0x00000100)
+#define NV039_SET_CONTEXT_DMA_NOTIFIES                             (0x00000180)
+#define NV039_SET_CONTEXT_DMA_BUFFER_IN                            (0x00000184)
+#define NV039_SET_CONTEXT_DMA_BUFFER_OUT                           (0x00000188)
+
+#define NV039_OFFSET_IN                                            (0x0000030C)
+#define NV039_OFFSET_OUT                                           (0x00000310)
+#define NV039_PITCH_IN                                             (0x00000314)
+#define NV039_PITCH_OUT                                            (0x00000318)
+#define NV039_LINE_LENGTH_IN                                       (0x0000031C)
+#define NV039_LINE_COUNT                                           (0x00000320)
+#define NV039_FORMAT                                               (0x00000324)
+#define NV039_FORMAT_IN                                            7:0
+#define NV039_FORMAT_OUT                                           31:8
+#define NV039_BUFFER_NOTIFY                                        (0x00000328)
+#define NV039_BUFFER_NOTIFY_WRITE_ONLY                             (0x00000000)
+#define NV039_BUFFER_NOTIFY_WRITE_THEN_AWAKEN                      (0x00000001)
+#endif /* _cl0039_h_ */
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl006c.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl006c.h
new file mode 100644 (file)
index 0000000..9ab2a22
--- /dev/null
@@ -0,0 +1,46 @@
+/*******************************************************************************
+    Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+
+    Permission is hereby granted, free of charge, to any person obtaining a
+    copy of this software and associated documentation files (the "Software"),
+    to deal in the Software without restriction, including without limitation
+    the rights to use, copy, modify, merge, publish, distribute, sublicense,
+    and/or sell copies of the Software, and to permit persons to whom the
+    Software is furnished to do so, subject to the following conditions:
+
+    The above copyright notice and this permission notice shall be included in
+    all copies or substantial portions of the Software.
+
+    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+    IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+    THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+    FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+    DEALINGS IN THE SOFTWARE.
+
+*******************************************************************************/
+#ifndef _cl006c_h_
+#define _cl006c_h_
+
+/* fields and values */
+#define NV06C_PUT                                                  (0x00000040)
+#define NV06C_PUT_PTR                                              31:2
+#define NV06C_GET                                                  (0x00000044)
+#define NV06C_GET_PTR                                              31:2
+
+/* dma method descriptor format */
+#define NV06C_METHOD_ADDRESS                                       12:2
+#define NV06C_METHOD_SUBCHANNEL                                    15:13
+#define NV06C_METHOD_COUNT                                         28:18
+#define NV06C_OPCODE                                               31:29
+#define NV06C_OPCODE_METHOD                                        (0x00000000)
+#define NV06C_OPCODE_NONINC_METHOD                                 (0x00000002)
+
+/* dma data format */
+#define NV06C_DATA                                                 31:0
+
+/* dma jump format */
+#define NV06C_OPCODE_JUMP                                          (0x00000001)
+#define NV06C_JUMP_OFFSET                                          28:2
+#endif /* _cl006c_h_ */
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl006e.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl006e.h
new file mode 100644 (file)
index 0000000..8cfb596
--- /dev/null
@@ -0,0 +1,30 @@
+/*******************************************************************************
+    Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+
+    Permission is hereby granted, free of charge, to any person obtaining a
+    copy of this software and associated documentation files (the "Software"),
+    to deal in the Software without restriction, including without limitation
+    the rights to use, copy, modify, merge, publish, distribute, sublicense,
+    and/or sell copies of the Software, and to permit persons to whom the
+    Software is furnished to do so, subject to the following conditions:
+
+    The above copyright notice and this permission notice shall be included in
+    all copies or substantial portions of the Software.
+
+    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+    IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+    THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+    FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+    DEALINGS IN THE SOFTWARE.
+
+*******************************************************************************/
+#ifndef _cl006e_h_
+#define _cl006e_h_
+
+/* fields and values */
+#define NV06E_SET_OBJECT                                           (0x00000000)
+#define NV06E_REFERENCE                                            (0x00000048)
+#define NV06E_SET_REFERENCE                                        (0x00000050)
+#endif /* _cl006e_h_ */
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl176e.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl176e.h
new file mode 100644 (file)
index 0000000..fa09725
--- /dev/null
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef _cl176e_h_
+#define _cl176e_h_
+
+#define NV176E_SET_OBJECT                                          (0x00000000)
+#define NV176E_SET_CONTEXT_DMA_SEMAPHORE                           (0x00000060)
+#define NV176E_SEMAPHORE_OFFSET                                    (0x00000064)
+#define NV176E_SEMAPHORE_ACQUIRE                                   (0x00000068)
+#define NV176E_SEMAPHORE_RELEASE                                   (0x0000006c)
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl206e.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl206e.h
new file mode 100644 (file)
index 0000000..27313c7
--- /dev/null
@@ -0,0 +1,35 @@
+/*******************************************************************************
+    Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+
+    Permission is hereby granted, free of charge, to any person obtaining a
+    copy of this software and associated documentation files (the "Software"),
+    to deal in the Software without restriction, including without limitation
+    the rights to use, copy, modify, merge, publish, distribute, sublicense,
+    and/or sell copies of the Software, and to permit persons to whom the
+    Software is furnished to do so, subject to the following conditions:
+
+    The above copyright notice and this permission notice shall be included in
+    all copies or substantial portions of the Software.
+
+    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+    IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+    THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+    FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+    DEALINGS IN THE SOFTWARE.
+
+*******************************************************************************/
+#ifndef _cl206e_h_
+#define _cl206e_h_
+
+/* dma opcode2 format */
+#define NV206E_DMA_OPCODE2                                         1:0
+#define NV206E_DMA_OPCODE2_NONE                                    (0x00000000)
+/* dma jump_long format */
+#define NV206E_DMA_OPCODE2_JUMP_LONG                               (0x00000001)
+#define NV206E_DMA_JUMP_LONG_OFFSET                                31:2
+/* dma call format */
+#define NV206E_DMA_OPCODE2_CALL                                    (0x00000002)
+#define NV206E_DMA_CALL_OFFSET                                     31:2
+#endif /* _cl206e_h_ */
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl502d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl502d.h
new file mode 100644 (file)
index 0000000..47fe91b
--- /dev/null
@@ -0,0 +1,337 @@
+/*
+ * Copyright (c) 2003 - 2004, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _cl_nv50_twod_h_
+#define _cl_nv50_twod_h_
+
+#define NV502D_SET_OBJECT                                                                                  0x0000
+#define NV502D_SET_OBJECT_POINTER                                                                            15:0
+
+#define NV502D_WAIT_FOR_IDLE                                                                               0x0110
+#define NV502D_WAIT_FOR_IDLE_V                                                                               31:0
+
+#define NV502D_SET_DST_CONTEXT_DMA                                                                         0x0184
+#define NV502D_SET_DST_CONTEXT_DMA_HANDLE                                                                    31:0
+
+#define NV502D_SET_SRC_CONTEXT_DMA                                                                         0x0188
+#define NV502D_SET_SRC_CONTEXT_DMA_HANDLE                                                                    31:0
+
+#define NV502D_SET_SEMAPHORE_CONTEXT_DMA                                                                   0x018c
+#define NV502D_SET_SEMAPHORE_CONTEXT_DMA_HANDLE                                                              31:0
+
+#define NV502D_SET_DST_FORMAT                                                                              0x0200
+#define NV502D_SET_DST_FORMAT_V                                                                               7:0
+#define NV502D_SET_DST_FORMAT_V_A8R8G8B8                                                               0x000000CF
+#define NV502D_SET_DST_FORMAT_V_A8RL8GL8BL8                                                            0x000000D0
+#define NV502D_SET_DST_FORMAT_V_A2R10G10B10                                                            0x000000DF
+#define NV502D_SET_DST_FORMAT_V_A8B8G8R8                                                               0x000000D5
+#define NV502D_SET_DST_FORMAT_V_A8BL8GL8RL8                                                            0x000000D6
+#define NV502D_SET_DST_FORMAT_V_A2B10G10R10                                                            0x000000D1
+#define NV502D_SET_DST_FORMAT_V_X8R8G8B8                                                               0x000000E6
+#define NV502D_SET_DST_FORMAT_V_X8RL8GL8BL8                                                            0x000000E7
+#define NV502D_SET_DST_FORMAT_V_X8B8G8R8                                                               0x000000F9
+#define NV502D_SET_DST_FORMAT_V_X8BL8GL8RL8                                                            0x000000FA
+#define NV502D_SET_DST_FORMAT_V_R5G6B5                                                                 0x000000E8
+#define NV502D_SET_DST_FORMAT_V_A1R5G5B5                                                               0x000000E9
+#define NV502D_SET_DST_FORMAT_V_X1R5G5B5                                                               0x000000F8
+#define NV502D_SET_DST_FORMAT_V_Y8                                                                     0x000000F3
+#define NV502D_SET_DST_FORMAT_V_Y16                                                                    0x000000EE
+#define NV502D_SET_DST_FORMAT_V_Y32                                                                    0x000000FF
+#define NV502D_SET_DST_FORMAT_V_Z1R5G5B5                                                               0x000000FB
+#define NV502D_SET_DST_FORMAT_V_O1R5G5B5                                                               0x000000FC
+#define NV502D_SET_DST_FORMAT_V_Z8R8G8B8                                                               0x000000FD
+#define NV502D_SET_DST_FORMAT_V_O8R8G8B8                                                               0x000000FE
+#define NV502D_SET_DST_FORMAT_V_Y1_8X8                                                                 0x0000001C
+#define NV502D_SET_DST_FORMAT_V_RF16                                                                   0x000000F2
+#define NV502D_SET_DST_FORMAT_V_RF32                                                                   0x000000E5
+#define NV502D_SET_DST_FORMAT_V_RF32_GF32                                                              0x000000CB
+#define NV502D_SET_DST_FORMAT_V_RF16_GF16_BF16_AF16                                                    0x000000CA
+#define NV502D_SET_DST_FORMAT_V_RF16_GF16_BF16_X16                                                     0x000000CE
+#define NV502D_SET_DST_FORMAT_V_RF32_GF32_BF32_AF32                                                    0x000000C0
+#define NV502D_SET_DST_FORMAT_V_RF32_GF32_BF32_X32                                                     0x000000C3
+
+#define NV502D_SET_DST_MEMORY_LAYOUT                                                                       0x0204
+#define NV502D_SET_DST_MEMORY_LAYOUT_V                                                                        0:0
+#define NV502D_SET_DST_MEMORY_LAYOUT_V_BLOCKLINEAR                                                     0x00000000
+#define NV502D_SET_DST_MEMORY_LAYOUT_V_PITCH                                                           0x00000001
+
+#define NV502D_SET_DST_PITCH                                                                               0x0214
+#define NV502D_SET_DST_PITCH_V                                                                               31:0
+
+#define NV502D_SET_DST_WIDTH                                                                               0x0218
+#define NV502D_SET_DST_WIDTH_V                                                                               31:0
+
+#define NV502D_SET_DST_HEIGHT                                                                              0x021c
+#define NV502D_SET_DST_HEIGHT_V                                                                              31:0
+
+#define NV502D_SET_DST_OFFSET_UPPER                                                                        0x0220
+#define NV502D_SET_DST_OFFSET_UPPER_V                                                                         7:0
+
+#define NV502D_SET_DST_OFFSET_LOWER                                                                        0x0224
+#define NV502D_SET_DST_OFFSET_LOWER_V                                                                        31:0
+
+#define NV502D_SET_SRC_FORMAT                                                                              0x0230
+#define NV502D_SET_SRC_FORMAT_V                                                                               7:0
+#define NV502D_SET_SRC_FORMAT_V_A8R8G8B8                                                               0x000000CF
+#define NV502D_SET_SRC_FORMAT_V_A8RL8GL8BL8                                                            0x000000D0
+#define NV502D_SET_SRC_FORMAT_V_A2R10G10B10                                                            0x000000DF
+#define NV502D_SET_SRC_FORMAT_V_A8B8G8R8                                                               0x000000D5
+#define NV502D_SET_SRC_FORMAT_V_A8BL8GL8RL8                                                            0x000000D6
+#define NV502D_SET_SRC_FORMAT_V_A2B10G10R10                                                            0x000000D1
+#define NV502D_SET_SRC_FORMAT_V_X8R8G8B8                                                               0x000000E6
+#define NV502D_SET_SRC_FORMAT_V_X8RL8GL8BL8                                                            0x000000E7
+#define NV502D_SET_SRC_FORMAT_V_X8B8G8R8                                                               0x000000F9
+#define NV502D_SET_SRC_FORMAT_V_X8BL8GL8RL8                                                            0x000000FA
+#define NV502D_SET_SRC_FORMAT_V_R5G6B5                                                                 0x000000E8
+#define NV502D_SET_SRC_FORMAT_V_A1R5G5B5                                                               0x000000E9
+#define NV502D_SET_SRC_FORMAT_V_X1R5G5B5                                                               0x000000F8
+#define NV502D_SET_SRC_FORMAT_V_Y8                                                                     0x000000F3
+#define NV502D_SET_SRC_FORMAT_V_AY8                                                                    0x0000001D
+#define NV502D_SET_SRC_FORMAT_V_Y16                                                                    0x000000EE
+#define NV502D_SET_SRC_FORMAT_V_Y32                                                                    0x000000FF
+#define NV502D_SET_SRC_FORMAT_V_Z1R5G5B5                                                               0x000000FB
+#define NV502D_SET_SRC_FORMAT_V_O1R5G5B5                                                               0x000000FC
+#define NV502D_SET_SRC_FORMAT_V_Z8R8G8B8                                                               0x000000FD
+#define NV502D_SET_SRC_FORMAT_V_O8R8G8B8                                                               0x000000FE
+#define NV502D_SET_SRC_FORMAT_V_Y1_8X8                                                                 0x0000001C
+#define NV502D_SET_SRC_FORMAT_V_RF16                                                                   0x000000F2
+#define NV502D_SET_SRC_FORMAT_V_RF32                                                                   0x000000E5
+#define NV502D_SET_SRC_FORMAT_V_RF32_GF32                                                              0x000000CB
+#define NV502D_SET_SRC_FORMAT_V_RF16_GF16_BF16_AF16                                                    0x000000CA
+#define NV502D_SET_SRC_FORMAT_V_RF16_GF16_BF16_X16                                                     0x000000CE
+#define NV502D_SET_SRC_FORMAT_V_RF32_GF32_BF32_AF32                                                    0x000000C0
+#define NV502D_SET_SRC_FORMAT_V_RF32_GF32_BF32_X32                                                     0x000000C3
+
+#define NV502D_SET_SRC_MEMORY_LAYOUT                                                                       0x0234
+#define NV502D_SET_SRC_MEMORY_LAYOUT_V                                                                        0:0
+#define NV502D_SET_SRC_MEMORY_LAYOUT_V_BLOCKLINEAR                                                     0x00000000
+#define NV502D_SET_SRC_MEMORY_LAYOUT_V_PITCH                                                           0x00000001
+
+#define NV502D_SET_SRC_PITCH                                                                               0x0244
+#define NV502D_SET_SRC_PITCH_V                                                                               31:0
+
+#define NV502D_SET_SRC_WIDTH                                                                               0x0248
+#define NV502D_SET_SRC_WIDTH_V                                                                               31:0
+
+#define NV502D_SET_SRC_HEIGHT                                                                              0x024c
+#define NV502D_SET_SRC_HEIGHT_V                                                                              31:0
+
+#define NV502D_SET_SRC_OFFSET_UPPER                                                                        0x0250
+#define NV502D_SET_SRC_OFFSET_UPPER_V                                                                         7:0
+
+#define NV502D_SET_SRC_OFFSET_LOWER                                                                        0x0254
+#define NV502D_SET_SRC_OFFSET_LOWER_V                                                                        31:0
+
+#define NV502D_SET_CLIP_ENABLE                                                                             0x0290
+#define NV502D_SET_CLIP_ENABLE_V                                                                              0:0
+#define NV502D_SET_CLIP_ENABLE_V_FALSE                                                                 0x00000000
+#define NV502D_SET_CLIP_ENABLE_V_TRUE                                                                  0x00000001
+
+#define NV502D_SET_ROP                                                                                     0x02a0
+#define NV502D_SET_ROP_V                                                                                      7:0
+
+#define NV502D_SET_OPERATION                                                                               0x02ac
+#define NV502D_SET_OPERATION_V                                                                                2:0
+#define NV502D_SET_OPERATION_V_SRCCOPY_AND                                                             0x00000000
+#define NV502D_SET_OPERATION_V_ROP_AND                                                                 0x00000001
+#define NV502D_SET_OPERATION_V_BLEND_AND                                                               0x00000002
+#define NV502D_SET_OPERATION_V_SRCCOPY                                                                 0x00000003
+#define NV502D_SET_OPERATION_V_ROP                                                                     0x00000004
+#define NV502D_SET_OPERATION_V_SRCCOPY_PREMULT                                                         0x00000005
+#define NV502D_SET_OPERATION_V_BLEND_PREMULT                                                           0x00000006
+
+#define NV502D_SET_MONOCHROME_PATTERN_COLOR_FORMAT                                                         0x02e8
+#define NV502D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V                                                          2:0
+#define NV502D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A8X8R5G6B5                                        0x00000000
+#define NV502D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A1R5G5B5                                          0x00000001
+#define NV502D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A8R8G8B8                                          0x00000002
+#define NV502D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A8Y8                                              0x00000003
+#define NV502D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A8X8Y16                                           0x00000004
+#define NV502D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_Y32                                               0x00000005
+
+#define NV502D_SET_MONOCHROME_PATTERN_FORMAT                                                               0x02ec
+#define NV502D_SET_MONOCHROME_PATTERN_FORMAT_V                                                                0:0
+#define NV502D_SET_MONOCHROME_PATTERN_FORMAT_V_CGA6_M1                                                 0x00000000
+#define NV502D_SET_MONOCHROME_PATTERN_FORMAT_V_LE_M1                                                   0x00000001
+
+#define NV502D_RENDER_SOLID_PRIM_MODE                                                                      0x0580
+#define NV502D_RENDER_SOLID_PRIM_MODE_V                                                                       2:0
+#define NV502D_RENDER_SOLID_PRIM_MODE_V_POINTS                                                         0x00000000
+#define NV502D_RENDER_SOLID_PRIM_MODE_V_LINES                                                          0x00000001
+#define NV502D_RENDER_SOLID_PRIM_MODE_V_POLYLINE                                                       0x00000002
+#define NV502D_RENDER_SOLID_PRIM_MODE_V_TRIANGLES                                                      0x00000003
+#define NV502D_RENDER_SOLID_PRIM_MODE_V_RECTS                                                          0x00000004
+
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT                                                          0x0584
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V                                                           7:0
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A8R8G8B8                                           0x000000CF
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A2R10G10B10                                        0x000000DF
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A8B8G8R8                                           0x000000D5
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A2B10G10R10                                        0x000000D1
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_X8R8G8B8                                           0x000000E6
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_X8B8G8R8                                           0x000000F9
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_R5G6B5                                             0x000000E8
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A1R5G5B5                                           0x000000E9
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_X1R5G5B5                                           0x000000F8
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Y8                                                 0x000000F3
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Y16                                                0x000000EE
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Y32                                                0x000000FF
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Z1R5G5B5                                           0x000000FB
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_O1R5G5B5                                           0x000000FC
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Z8R8G8B8                                           0x000000FD
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_O8R8G8B8                                           0x000000FE
+
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR                                                                 0x0588
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_V                                                                 31:0
+
+#define NV502D_RENDER_SOLID_PRIM_POINT_SET_X(j)                                                    (0x0600+(j)*8)
+#define NV502D_RENDER_SOLID_PRIM_POINT_SET_X_V                                                               31:0
+
+#define NV502D_RENDER_SOLID_PRIM_POINT_Y(j)                                                        (0x0604+(j)*8)
+#define NV502D_RENDER_SOLID_PRIM_POINT_Y_V                                                                   31:0
+
+#define NV502D_SET_PIXELS_FROM_CPU_DATA_TYPE                                                               0x0800
+#define NV502D_SET_PIXELS_FROM_CPU_DATA_TYPE_V                                                                0:0
+#define NV502D_SET_PIXELS_FROM_CPU_DATA_TYPE_V_COLOR                                                   0x00000000
+#define NV502D_SET_PIXELS_FROM_CPU_DATA_TYPE_V_INDEX                                                   0x00000001
+
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT                                                            0x0804
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V                                                             7:0
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A8R8G8B8                                             0x000000CF
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A2R10G10B10                                          0x000000DF
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A8B8G8R8                                             0x000000D5
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A2B10G10R10                                          0x000000D1
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_X8R8G8B8                                             0x000000E6
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_X8B8G8R8                                             0x000000F9
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_R5G6B5                                               0x000000E8
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A1R5G5B5                                             0x000000E9
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_X1R5G5B5                                             0x000000F8
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Y8                                                   0x000000F3
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Y16                                                  0x000000EE
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Y32                                                  0x000000FF
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Z1R5G5B5                                             0x000000FB
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_O1R5G5B5                                             0x000000FC
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Z8R8G8B8                                             0x000000FD
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_O8R8G8B8                                             0x000000FE
+
+#define NV502D_SET_PIXELS_FROM_CPU_INDEX_FORMAT                                                            0x0808
+#define NV502D_SET_PIXELS_FROM_CPU_INDEX_FORMAT_V                                                             1:0
+#define NV502D_SET_PIXELS_FROM_CPU_INDEX_FORMAT_V_I1                                                   0x00000000
+#define NV502D_SET_PIXELS_FROM_CPU_INDEX_FORMAT_V_I4                                                   0x00000001
+#define NV502D_SET_PIXELS_FROM_CPU_INDEX_FORMAT_V_I8                                                   0x00000002
+
+#define NV502D_SET_PIXELS_FROM_CPU_MONO_FORMAT                                                             0x080c
+#define NV502D_SET_PIXELS_FROM_CPU_MONO_FORMAT_V                                                              0:0
+#define NV502D_SET_PIXELS_FROM_CPU_MONO_FORMAT_V_CGA6_M1                                               0x00000000
+#define NV502D_SET_PIXELS_FROM_CPU_MONO_FORMAT_V_LE_M1                                                 0x00000001
+
+#define NV502D_SET_PIXELS_FROM_CPU_WRAP                                                                    0x0810
+#define NV502D_SET_PIXELS_FROM_CPU_WRAP_V                                                                     1:0
+#define NV502D_SET_PIXELS_FROM_CPU_WRAP_V_WRAP_PIXEL                                                   0x00000000
+#define NV502D_SET_PIXELS_FROM_CPU_WRAP_V_WRAP_BYTE                                                    0x00000001
+#define NV502D_SET_PIXELS_FROM_CPU_WRAP_V_WRAP_DWORD                                                   0x00000002
+
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR0                                                                  0x0814
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR0_V                                                                  31:0
+
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR1                                                                  0x0818
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR1_V                                                                  31:0
+
+#define NV502D_SET_PIXELS_FROM_CPU_MONO_OPACITY                                                            0x081c
+#define NV502D_SET_PIXELS_FROM_CPU_MONO_OPACITY_V                                                             0:0
+#define NV502D_SET_PIXELS_FROM_CPU_MONO_OPACITY_V_TRANSPARENT                                          0x00000000
+#define NV502D_SET_PIXELS_FROM_CPU_MONO_OPACITY_V_OPAQUE                                               0x00000001
+
+#define NV502D_SET_PIXELS_FROM_CPU_SRC_WIDTH                                                               0x0838
+#define NV502D_SET_PIXELS_FROM_CPU_SRC_WIDTH_V                                                               31:0
+
+#define NV502D_SET_PIXELS_FROM_CPU_SRC_HEIGHT                                                              0x083c
+#define NV502D_SET_PIXELS_FROM_CPU_SRC_HEIGHT_V                                                              31:0
+
+#define NV502D_SET_PIXELS_FROM_CPU_DX_DU_FRAC                                                              0x0840
+#define NV502D_SET_PIXELS_FROM_CPU_DX_DU_FRAC_V                                                              31:0
+
+#define NV502D_SET_PIXELS_FROM_CPU_DX_DU_INT                                                               0x0844
+#define NV502D_SET_PIXELS_FROM_CPU_DX_DU_INT_V                                                               31:0
+
+#define NV502D_SET_PIXELS_FROM_CPU_DY_DV_FRAC                                                              0x0848
+#define NV502D_SET_PIXELS_FROM_CPU_DY_DV_FRAC_V                                                              31:0
+
+#define NV502D_SET_PIXELS_FROM_CPU_DY_DV_INT                                                               0x084c
+#define NV502D_SET_PIXELS_FROM_CPU_DY_DV_INT_V                                                               31:0
+
+#define NV502D_SET_PIXELS_FROM_CPU_DST_X0_FRAC                                                             0x0850
+#define NV502D_SET_PIXELS_FROM_CPU_DST_X0_FRAC_V                                                             31:0
+
+#define NV502D_SET_PIXELS_FROM_CPU_DST_X0_INT                                                              0x0854
+#define NV502D_SET_PIXELS_FROM_CPU_DST_X0_INT_V                                                              31:0
+
+#define NV502D_SET_PIXELS_FROM_CPU_DST_Y0_FRAC                                                             0x0858
+#define NV502D_SET_PIXELS_FROM_CPU_DST_Y0_FRAC_V                                                             31:0
+
+#define NV502D_SET_PIXELS_FROM_CPU_DST_Y0_INT                                                              0x085c
+#define NV502D_SET_PIXELS_FROM_CPU_DST_Y0_INT_V                                                              31:0
+
+#define NV502D_PIXELS_FROM_CPU_DATA                                                                        0x0860
+#define NV502D_PIXELS_FROM_CPU_DATA_V                                                                        31:0
+
+#define NV502D_SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP                                                         0x0888
+#define NV502D_SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP_V                                                          0:0
+#define NV502D_SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP_V_FALSE                                             0x00000000
+#define NV502D_SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP_V_TRUE                                              0x00000001
+
+#define NV502D_SET_PIXELS_FROM_MEMORY_DST_X0                                                               0x08b0
+#define NV502D_SET_PIXELS_FROM_MEMORY_DST_X0_V                                                               31:0
+
+#define NV502D_SET_PIXELS_FROM_MEMORY_DST_Y0                                                               0x08b4
+#define NV502D_SET_PIXELS_FROM_MEMORY_DST_Y0_V                                                               31:0
+
+#define NV502D_SET_PIXELS_FROM_MEMORY_DST_WIDTH                                                            0x08b8
+#define NV502D_SET_PIXELS_FROM_MEMORY_DST_WIDTH_V                                                            31:0
+
+#define NV502D_SET_PIXELS_FROM_MEMORY_DST_HEIGHT                                                           0x08bc
+#define NV502D_SET_PIXELS_FROM_MEMORY_DST_HEIGHT_V                                                           31:0
+
+#define NV502D_SET_PIXELS_FROM_MEMORY_DU_DX_FRAC                                                           0x08c0
+#define NV502D_SET_PIXELS_FROM_MEMORY_DU_DX_FRAC_V                                                           31:0
+
+#define NV502D_SET_PIXELS_FROM_MEMORY_DU_DX_INT                                                            0x08c4
+#define NV502D_SET_PIXELS_FROM_MEMORY_DU_DX_INT_V                                                            31:0
+
+#define NV502D_SET_PIXELS_FROM_MEMORY_DV_DY_FRAC                                                           0x08c8
+#define NV502D_SET_PIXELS_FROM_MEMORY_DV_DY_FRAC_V                                                           31:0
+
+#define NV502D_SET_PIXELS_FROM_MEMORY_DV_DY_INT                                                            0x08cc
+#define NV502D_SET_PIXELS_FROM_MEMORY_DV_DY_INT_V                                                            31:0
+
+#define NV502D_SET_PIXELS_FROM_MEMORY_SRC_X0_FRAC                                                          0x08d0
+#define NV502D_SET_PIXELS_FROM_MEMORY_SRC_X0_FRAC_V                                                          31:0
+
+#define NV502D_SET_PIXELS_FROM_MEMORY_SRC_X0_INT                                                           0x08d4
+#define NV502D_SET_PIXELS_FROM_MEMORY_SRC_X0_INT_V                                                           31:0
+
+#define NV502D_SET_PIXELS_FROM_MEMORY_SRC_Y0_FRAC                                                          0x08d8
+#define NV502D_SET_PIXELS_FROM_MEMORY_SRC_Y0_FRAC_V                                                          31:0
+
+#define NV502D_PIXELS_FROM_MEMORY_SRC_Y0_INT                                                               0x08dc
+#define NV502D_PIXELS_FROM_MEMORY_SRC_Y0_INT_V                                                               31:0
+#endif /* _cl_nv50_twod_h_ */
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl5039.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl5039.h
new file mode 100644 (file)
index 0000000..5b2ca33
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2003-2004, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _cl_nv50_memory_to_memory_format_h_
+#define _cl_nv50_memory_to_memory_format_h_
+
+#define NV5039_SET_OBJECT                                                                                  0x0000
+#define NV5039_SET_OBJECT_POINTER                                                                            15:0
+
+#define NV5039_NO_OPERATION                                                                                0x0100
+#define NV5039_NO_OPERATION_V                                                                                31:0
+
+#define NV5039_SET_CONTEXT_DMA_NOTIFY                                                                      0x0180
+#define NV5039_SET_CONTEXT_DMA_NOTIFY_HANDLE                                                                 31:0
+
+#define NV5039_SET_CONTEXT_DMA_BUFFER_IN                                                                   0x0184
+#define NV5039_SET_CONTEXT_DMA_BUFFER_IN_HANDLE                                                              31:0
+
+#define NV5039_SET_CONTEXT_DMA_BUFFER_OUT                                                                  0x0188
+#define NV5039_SET_CONTEXT_DMA_BUFFER_OUT_HANDLE                                                             31:0
+
+#define NV5039_SET_SRC_MEMORY_LAYOUT                                                                       0x0200
+#define NV5039_SET_SRC_MEMORY_LAYOUT_V                                                                        0:0
+#define NV5039_SET_SRC_MEMORY_LAYOUT_V_BLOCKLINEAR                                                     0x00000000
+#define NV5039_SET_SRC_MEMORY_LAYOUT_V_PITCH                                                           0x00000001
+
+#define NV5039_SET_SRC_BLOCK_SIZE                                                                          0x0204
+#define NV5039_SET_SRC_BLOCK_SIZE_WIDTH                                                                       3:0
+#define NV5039_SET_SRC_BLOCK_SIZE_WIDTH_ONE_GOB                                                        0x00000000
+#define NV5039_SET_SRC_BLOCK_SIZE_HEIGHT                                                                      7:4
+#define NV5039_SET_SRC_BLOCK_SIZE_HEIGHT_ONE_GOB                                                       0x00000000
+#define NV5039_SET_SRC_BLOCK_SIZE_HEIGHT_TWO_GOBS                                                      0x00000001
+#define NV5039_SET_SRC_BLOCK_SIZE_HEIGHT_FOUR_GOBS                                                     0x00000002
+#define NV5039_SET_SRC_BLOCK_SIZE_HEIGHT_EIGHT_GOBS                                                    0x00000003
+#define NV5039_SET_SRC_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS                                                  0x00000004
+#define NV5039_SET_SRC_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS                                                0x00000005
+#define NV5039_SET_SRC_BLOCK_SIZE_DEPTH                                                                      11:8
+#define NV5039_SET_SRC_BLOCK_SIZE_DEPTH_ONE_GOB                                                        0x00000000
+#define NV5039_SET_SRC_BLOCK_SIZE_DEPTH_TWO_GOBS                                                       0x00000001
+#define NV5039_SET_SRC_BLOCK_SIZE_DEPTH_FOUR_GOBS                                                      0x00000002
+#define NV5039_SET_SRC_BLOCK_SIZE_DEPTH_EIGHT_GOBS                                                     0x00000003
+#define NV5039_SET_SRC_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS                                                   0x00000004
+#define NV5039_SET_SRC_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS                                                 0x00000005
+
+#define NV5039_SET_SRC_WIDTH                                                                               0x0208
+#define NV5039_SET_SRC_WIDTH_V                                                                               31:0
+
+#define NV5039_SET_SRC_HEIGHT                                                                              0x020c
+#define NV5039_SET_SRC_HEIGHT_V                                                                              31:0
+
+#define NV5039_SET_SRC_DEPTH                                                                               0x0210
+#define NV5039_SET_SRC_DEPTH_V                                                                               31:0
+
+#define NV5039_SET_SRC_LAYER                                                                               0x0214
+#define NV5039_SET_SRC_LAYER_V                                                                               31:0
+
+#define NV5039_SET_SRC_ORIGIN                                                                              0x0218
+#define NV5039_SET_SRC_ORIGIN_X                                                                              15:0
+#define NV5039_SET_SRC_ORIGIN_Y                                                                             31:16
+
+#define NV5039_SET_DST_MEMORY_LAYOUT                                                                       0x021c
+#define NV5039_SET_DST_MEMORY_LAYOUT_V                                                                        0:0
+#define NV5039_SET_DST_MEMORY_LAYOUT_V_BLOCKLINEAR                                                     0x00000000
+#define NV5039_SET_DST_MEMORY_LAYOUT_V_PITCH                                                           0x00000001
+
+#define NV5039_SET_DST_BLOCK_SIZE                                                                          0x0220
+#define NV5039_SET_DST_BLOCK_SIZE_WIDTH                                                                       3:0
+#define NV5039_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB                                                        0x00000000
+#define NV5039_SET_DST_BLOCK_SIZE_HEIGHT                                                                      7:4
+#define NV5039_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB                                                       0x00000000
+#define NV5039_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS                                                      0x00000001
+#define NV5039_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS                                                     0x00000002
+#define NV5039_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS                                                    0x00000003
+#define NV5039_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS                                                  0x00000004
+#define NV5039_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS                                                0x00000005
+#define NV5039_SET_DST_BLOCK_SIZE_DEPTH                                                                      11:8
+#define NV5039_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB                                                        0x00000000
+#define NV5039_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS                                                       0x00000001
+#define NV5039_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS                                                      0x00000002
+#define NV5039_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS                                                     0x00000003
+#define NV5039_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS                                                   0x00000004
+#define NV5039_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS                                                 0x00000005
+
+#define NV5039_SET_DST_WIDTH                                                                               0x0224
+#define NV5039_SET_DST_WIDTH_V                                                                               31:0
+
+#define NV5039_SET_DST_HEIGHT                                                                              0x0228
+#define NV5039_SET_DST_HEIGHT_V                                                                              31:0
+
+#define NV5039_SET_DST_DEPTH                                                                               0x022c
+#define NV5039_SET_DST_DEPTH_V                                                                               31:0
+
+#define NV5039_SET_DST_LAYER                                                                               0x0230
+#define NV5039_SET_DST_LAYER_V                                                                               31:0
+
+#define NV5039_SET_DST_ORIGIN                                                                              0x0234
+#define NV5039_SET_DST_ORIGIN_X                                                                              15:0
+#define NV5039_SET_DST_ORIGIN_Y                                                                             31:16
+
+#define NV5039_OFFSET_IN_UPPER                                                                             0x0238
+#define NV5039_OFFSET_IN_UPPER_VALUE                                                                          7:0
+
+#define NV5039_OFFSET_OUT_UPPER                                                                            0x023c
+#define NV5039_OFFSET_OUT_UPPER_VALUE                                                                         7:0
+
+#define NV5039_OFFSET_IN                                                                                   0x030c
+#define NV5039_OFFSET_IN_VALUE                                                                               31:0
+
+#define NV5039_OFFSET_OUT                                                                                  0x0310
+#define NV5039_OFFSET_OUT_VALUE                                                                              31:0
+
+#define NV5039_PITCH_IN                                                                                    0x0314
+#define NV5039_PITCH_IN_VALUE                                                                                31:0
+
+#define NV5039_PITCH_OUT                                                                                   0x0318
+#define NV5039_PITCH_OUT_VALUE                                                                               31:0
+
+#define NV5039_LINE_LENGTH_IN                                                                              0x031c
+#define NV5039_LINE_LENGTH_IN_VALUE                                                                          31:0
+
+#define NV5039_LINE_COUNT                                                                                  0x0320
+#define NV5039_LINE_COUNT_VALUE                                                                              31:0
+
+#define NV5039_FORMAT                                                                                      0x0324
+#define NV5039_FORMAT_IN                                                                                      7:0
+#define NV5039_FORMAT_IN_ONE                                                                           0x00000001
+#define NV5039_FORMAT_OUT                                                                                    15:8
+#define NV5039_FORMAT_OUT_ONE                                                                          0x00000001
+
+#define NV5039_BUFFER_NOTIFY                                                                               0x0328
+#define NV5039_BUFFER_NOTIFY_TYPE                                                                            31:0
+#define NV5039_BUFFER_NOTIFY_TYPE_WRITE_ONLY                                                           0x00000000
+#define NV5039_BUFFER_NOTIFY_TYPE_WRITE_THEN_AWAKEN                                                    0x00000001
+#endif /* _cl_nv50_memory_to_memory_format_h_ */
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl507a.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl507a.h
new file mode 100644 (file)
index 0000000..a97bcec
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl507a_h_
+#define _cl507a_h_
+
+#define NV507A_FREE                                                             (0x00000008)
+#define NV507A_FREE_COUNT                                                       5:0
+#define NV507A_UPDATE                                                           (0x00000080)
+#define NV507A_UPDATE_INTERLOCK_WITH_CORE                                       0:0
+#define NV507A_UPDATE_INTERLOCK_WITH_CORE_DISABLE                               (0x00000000)
+#define NV507A_UPDATE_INTERLOCK_WITH_CORE_ENABLE                                (0x00000001)
+#define NV507A_SET_CURSOR_HOT_SPOT_POINT_OUT                                    (0x00000084)
+#define NV507A_SET_CURSOR_HOT_SPOT_POINT_OUT_X                                  15:0
+#define NV507A_SET_CURSOR_HOT_SPOT_POINT_OUT_Y                                  31:16
+#endif // _cl507a_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl507c.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl507c.h
new file mode 100644 (file)
index 0000000..ada1701
--- /dev/null
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl507c_h_
+#define _cl507c_h_
+
+#define NV_DISP_BASE_NOTIFIER_1                                                      0x00000000
+#define NV_DISP_BASE_NOTIFIER_1_SIZEOF                                               0x00000004
+#define NV_DISP_BASE_NOTIFIER_1__0                                                   0x00000000
+#define NV_DISP_BASE_NOTIFIER_1__0_PRESENTATION_COUNT                                15:0
+#define NV_DISP_BASE_NOTIFIER_1__0_TIMESTAMP                                         29:16
+#define NV_DISP_BASE_NOTIFIER_1__0_STATUS                                            31:30
+#define NV_DISP_BASE_NOTIFIER_1__0_STATUS_NOT_BEGUN                                  0x00000000
+#define NV_DISP_BASE_NOTIFIER_1__0_STATUS_BEGUN                                      0x00000001
+#define NV_DISP_BASE_NOTIFIER_1__0_STATUS_FINISHED                                   0x00000002
+
+
+// dma opcode instructions
+#define NV507C_DMA                                     0x00000000
+#define NV507C_DMA_OPCODE                                   31:29
+#define NV507C_DMA_OPCODE_METHOD                       0x00000000
+#define NV507C_DMA_OPCODE_JUMP                         0x00000001
+#define NV507C_DMA_OPCODE_NONINC_METHOD                0x00000002
+#define NV507C_DMA_OPCODE_SET_SUBDEVICE_MASK           0x00000003
+#define NV507C_DMA_OPCODE                                   31:29
+#define NV507C_DMA_OPCODE_METHOD                       0x00000000
+#define NV507C_DMA_OPCODE_NONINC_METHOD                0x00000002
+#define NV507C_DMA_METHOD_COUNT                             27:18
+#define NV507C_DMA_METHOD_OFFSET                             11:2
+#define NV507C_DMA_DATA                                      31:0
+#define NV507C_DMA_NOP                                 0x00000000
+#define NV507C_DMA_OPCODE                                   31:29
+#define NV507C_DMA_OPCODE_JUMP                         0x00000001
+#define NV507C_DMA_JUMP_OFFSET                               11:2
+#define NV507C_DMA_OPCODE                                   31:29
+#define NV507C_DMA_OPCODE_SET_SUBDEVICE_MASK           0x00000003
+#define NV507C_DMA_SET_SUBDEVICE_MASK_VALUE                  11:0
+
+// class methods
+#define NV507C_PUT                                                              (0x00000000)
+#define NV507C_PUT_PTR                                                          11:2
+#define NV507C_GET                                                              (0x00000004)
+#define NV507C_GET_PTR                                                          11:2
+#define NV507C_UPDATE                                                           (0x00000080)
+#define NV507C_UPDATE_INTERLOCK_WITH_CORE                                       0:0
+#define NV507C_UPDATE_INTERLOCK_WITH_CORE_DISABLE                               (0x00000000)
+#define NV507C_UPDATE_INTERLOCK_WITH_CORE_ENABLE                                (0x00000001)
+#define NV507C_SET_PRESENT_CONTROL                                              (0x00000084)
+#define NV507C_SET_PRESENT_CONTROL_BEGIN_MODE                                   9:8
+#define NV507C_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING                       (0x00000000)
+#define NV507C_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE                         (0x00000001)
+#define NV507C_SET_PRESENT_CONTROL_BEGIN_MODE_ON_LINE                           (0x00000002)
+#define NV507C_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL                         7:4
+#define NV507C_SET_PRESENT_CONTROL_BEGIN_LINE                                   30:16
+#define NV507C_SET_PRESENT_CONTROL_ON_LINE_MARGIN                               15:10
+#define NV507C_SET_SEMAPHORE_CONTROL                                            (0x00000088)
+#define NV507C_SET_SEMAPHORE_CONTROL_OFFSET                                     11:2
+#define NV507C_SET_SEMAPHORE_ACQUIRE                                            (0x0000008C)
+#define NV507C_SET_SEMAPHORE_ACQUIRE_VALUE                                      31:0
+#define NV507C_SET_SEMAPHORE_RELEASE                                            (0x00000090)
+#define NV507C_SET_SEMAPHORE_RELEASE_VALUE                                      31:0
+#define NV507C_SET_CONTEXT_DMA_SEMAPHORE                                        (0x00000094)
+#define NV507C_SET_CONTEXT_DMA_SEMAPHORE_HANDLE                                 31:0
+#define NV507C_SET_NOTIFIER_CONTROL                                             (0x000000A0)
+#define NV507C_SET_NOTIFIER_CONTROL_MODE                                        30:30
+#define NV507C_SET_NOTIFIER_CONTROL_MODE_WRITE                                  (0x00000000)
+#define NV507C_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN                           (0x00000001)
+#define NV507C_SET_NOTIFIER_CONTROL_OFFSET                                      11:2
+#define NV507C_SET_CONTEXT_DMA_NOTIFIER                                         (0x000000A4)
+#define NV507C_SET_CONTEXT_DMA_NOTIFIER_HANDLE                                  31:0
+#define NV507C_SET_CONTEXT_DMA_ISO                                              (0x000000C0)
+#define NV507C_SET_CONTEXT_DMA_ISO_HANDLE                                       31:0
+#define NV507C_SET_BASE_LUT_LO                                                  (0x000000E0)
+#define NV507C_SET_BASE_LUT_LO_ENABLE                                           31:30
+#define NV507C_SET_BASE_LUT_LO_ENABLE_DISABLE                                   (0x00000000)
+#define NV507C_SET_BASE_LUT_LO_ENABLE_USE_CORE_LUT                              (0x00000001)
+#define NV507C_SET_BASE_LUT_LO_ENABLE_ENABLE                                    (0x00000003)
+#define NV507C_SET_BASE_LUT_LO_MODE                                             29:29
+#define NV507C_SET_BASE_LUT_LO_MODE_LORES                                       (0x00000000)
+#define NV507C_SET_BASE_LUT_LO_MODE_HIRES                                       (0x00000001)
+#define NV507C_SET_BASE_LUT_LO_ORIGIN                                           7:2
+#define NV507C_SET_PROCESSING                                                   (0x00000110)
+#define NV507C_SET_PROCESSING_USE_GAIN_OFS                                      0:0
+#define NV507C_SET_PROCESSING_USE_GAIN_OFS_DISABLE                              (0x00000000)
+#define NV507C_SET_PROCESSING_USE_GAIN_OFS_ENABLE                               (0x00000001)
+#define NV507C_SET_CONVERSION                                                   (0x00000114)
+#define NV507C_SET_CONVERSION_GAIN                                              15:0
+#define NV507C_SET_CONVERSION_OFS                                               31:16
+
+#define NV507C_SURFACE_SET_OFFSET(a,b)                                          (0x00000800 + (a)*0x00000020 + (b)*0x00000004)
+#define NV507C_SURFACE_SET_OFFSET_ORIGIN                                        31:0
+#define NV507C_SURFACE_SET_SIZE(a)                                              (0x00000808 + (a)*0x00000020)
+#define NV507C_SURFACE_SET_SIZE_WIDTH                                           14:0
+#define NV507C_SURFACE_SET_SIZE_HEIGHT                                          30:16
+#define NV507C_SURFACE_SET_STORAGE(a)                                           (0x0000080C + (a)*0x00000020)
+#define NV507C_SURFACE_SET_STORAGE_BLOCK_HEIGHT                                 3:0
+#define NV507C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB                         (0x00000000)
+#define NV507C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS                        (0x00000001)
+#define NV507C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS                       (0x00000002)
+#define NV507C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS                      (0x00000003)
+#define NV507C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS                    (0x00000004)
+#define NV507C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS                  (0x00000005)
+#define NV507C_SURFACE_SET_STORAGE_PITCH                                        17:8
+#define NV507C_SURFACE_SET_STORAGE_MEMORY_LAYOUT                                20:20
+#define NV507C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR                    (0x00000000)
+#define NV507C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH                          (0x00000001)
+#define NV507C_SURFACE_SET_PARAMS(a)                                            (0x00000810 + (a)*0x00000020)
+#define NV507C_SURFACE_SET_PARAMS_FORMAT                                        15:8
+#define NV507C_SURFACE_SET_PARAMS_FORMAT_I8                                     (0x0000001E)
+#define NV507C_SURFACE_SET_PARAMS_FORMAT_VOID16                                 (0x0000001F)
+#define NV507C_SURFACE_SET_PARAMS_FORMAT_VOID32                                 (0x0000002E)
+#define NV507C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16                    (0x000000CA)
+#define NV507C_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8                               (0x000000CF)
+#define NV507C_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10                            (0x000000D1)
+#define NV507C_SURFACE_SET_PARAMS_FORMAT_A8B8G8R8                               (0x000000D5)
+#define NV507C_SURFACE_SET_PARAMS_FORMAT_R5G6B5                                 (0x000000E8)
+#define NV507C_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5                               (0x000000E9)
+#define NV507C_SURFACE_SET_PARAMS_SUPER_SAMPLE                                  1:0
+#define NV507C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X1_AA                            (0x00000000)
+#define NV507C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X4_AA                            (0x00000002)
+#define NV507C_SURFACE_SET_PARAMS_GAMMA                                         2:2
+#define NV507C_SURFACE_SET_PARAMS_GAMMA_LINEAR                                  (0x00000000)
+#define NV507C_SURFACE_SET_PARAMS_GAMMA_SRGB                                    (0x00000001)
+#define NV507C_SURFACE_SET_PARAMS_LAYOUT                                        5:4
+#define NV507C_SURFACE_SET_PARAMS_LAYOUT_FRM                                    (0x00000000)
+#define NV507C_SURFACE_SET_PARAMS_LAYOUT_FLD1                                   (0x00000001)
+#define NV507C_SURFACE_SET_PARAMS_LAYOUT_FLD2                                   (0x00000002)
+#define NV507C_SURFACE_SET_PARAMS_KIND                                          22:16
+#define NV507C_SURFACE_SET_PARAMS_KIND_KIND_PITCH                               (0x00000000)
+#define NV507C_SURFACE_SET_PARAMS_KIND_KIND_GENERIC_8BX2                        (0x00000070)
+#define NV507C_SURFACE_SET_PARAMS_KIND_KIND_GENERIC_8BX2_BANKSWIZ               (0x00000072)
+#define NV507C_SURFACE_SET_PARAMS_KIND_KIND_GENERIC_16BX1                       (0x00000074)
+#define NV507C_SURFACE_SET_PARAMS_KIND_KIND_GENERIC_16BX1_BANKSWIZ              (0x00000076)
+#define NV507C_SURFACE_SET_PARAMS_KIND_KIND_C32_MS4                             (0x00000078)
+#define NV507C_SURFACE_SET_PARAMS_KIND_KIND_C32_MS8                             (0x00000079)
+#define NV507C_SURFACE_SET_PARAMS_KIND_KIND_C32_MS4_BANKSWIZ                    (0x0000007A)
+#define NV507C_SURFACE_SET_PARAMS_KIND_KIND_C32_MS8_BANKSWIZ                    (0x0000007B)
+#define NV507C_SURFACE_SET_PARAMS_KIND_KIND_C64_MS4                             (0x0000007C)
+#define NV507C_SURFACE_SET_PARAMS_KIND_KIND_C64_MS8                             (0x0000007D)
+#define NV507C_SURFACE_SET_PARAMS_KIND_KIND_C128_MS4                            (0x0000007E)
+#define NV507C_SURFACE_SET_PARAMS_KIND_FROM_PTE                                 (0x0000007F)
+#define NV507C_SURFACE_SET_PARAMS_PART_STRIDE                                   24:24
+#define NV507C_SURFACE_SET_PARAMS_PART_STRIDE_PARTSTRIDE_256                    (0x00000000)
+#define NV507C_SURFACE_SET_PARAMS_PART_STRIDE_PARTSTRIDE_1024                   (0x00000001)
+#endif // _cl507c_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h
new file mode 100644 (file)
index 0000000..2e444ba
--- /dev/null
@@ -0,0 +1,372 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl507d_h_
+#define _cl507d_h_
+
+#define NV_DISP_CORE_NOTIFIER_1                                                      0x00000000
+#define NV_DISP_CORE_NOTIFIER_1_SIZEOF                                               0x00000054
+#define NV_DISP_CORE_NOTIFIER_1_COMPLETION_0                                         0x00000000
+#define NV_DISP_CORE_NOTIFIER_1_COMPLETION_0_DONE                                    0:0
+#define NV_DISP_CORE_NOTIFIER_1_COMPLETION_0_DONE_FALSE                              0x00000000
+#define NV_DISP_CORE_NOTIFIER_1_COMPLETION_0_DONE_TRUE                               0x00000001
+#define NV_DISP_CORE_NOTIFIER_1_COMPLETION_0_R0                                      15:1
+#define NV_DISP_CORE_NOTIFIER_1_COMPLETION_0_TIMESTAMP                               29:16
+
+
+// class methods
+#define NV507D_UPDATE                                                           (0x00000080)
+#define NV507D_UPDATE_INTERLOCK_WITH_CURSOR0                                    0:0
+#define NV507D_UPDATE_INTERLOCK_WITH_CURSOR0_DISABLE                            (0x00000000)
+#define NV507D_UPDATE_INTERLOCK_WITH_CURSOR0_ENABLE                             (0x00000001)
+#define NV507D_UPDATE_INTERLOCK_WITH_CURSOR1                                    8:8
+#define NV507D_UPDATE_INTERLOCK_WITH_CURSOR1_DISABLE                            (0x00000000)
+#define NV507D_UPDATE_INTERLOCK_WITH_CURSOR1_ENABLE                             (0x00000001)
+#define NV507D_UPDATE_INTERLOCK_WITH_BASE0                                      1:1
+#define NV507D_UPDATE_INTERLOCK_WITH_BASE0_DISABLE                              (0x00000000)
+#define NV507D_UPDATE_INTERLOCK_WITH_BASE0_ENABLE                               (0x00000001)
+#define NV507D_UPDATE_INTERLOCK_WITH_BASE1                                      9:9
+#define NV507D_UPDATE_INTERLOCK_WITH_BASE1_DISABLE                              (0x00000000)
+#define NV507D_UPDATE_INTERLOCK_WITH_BASE1_ENABLE                               (0x00000001)
+#define NV507D_UPDATE_INTERLOCK_WITH_OVERLAY0                                   2:2
+#define NV507D_UPDATE_INTERLOCK_WITH_OVERLAY0_DISABLE                           (0x00000000)
+#define NV507D_UPDATE_INTERLOCK_WITH_OVERLAY0_ENABLE                            (0x00000001)
+#define NV507D_UPDATE_INTERLOCK_WITH_OVERLAY1                                   10:10
+#define NV507D_UPDATE_INTERLOCK_WITH_OVERLAY1_DISABLE                           (0x00000000)
+#define NV507D_UPDATE_INTERLOCK_WITH_OVERLAY1_ENABLE                            (0x00000001)
+#define NV507D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0                               3:3
+#define NV507D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_DISABLE                       (0x00000000)
+#define NV507D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_ENABLE                        (0x00000001)
+#define NV507D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1                               11:11
+#define NV507D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_DISABLE                       (0x00000000)
+#define NV507D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_ENABLE                        (0x00000001)
+#define NV507D_UPDATE_NOT_DRIVER_FRIENDLY                                       31:31
+#define NV507D_UPDATE_NOT_DRIVER_FRIENDLY_FALSE                                 (0x00000000)
+#define NV507D_UPDATE_NOT_DRIVER_FRIENDLY_TRUE                                  (0x00000001)
+#define NV507D_UPDATE_NOT_DRIVER_UNFRIENDLY                                     30:30
+#define NV507D_UPDATE_NOT_DRIVER_UNFRIENDLY_FALSE                               (0x00000000)
+#define NV507D_UPDATE_NOT_DRIVER_UNFRIENDLY_TRUE                                (0x00000001)
+#define NV507D_UPDATE_INHIBIT_INTERRUPTS                                        29:29
+#define NV507D_UPDATE_INHIBIT_INTERRUPTS_FALSE                                  (0x00000000)
+#define NV507D_UPDATE_INHIBIT_INTERRUPTS_TRUE                                   (0x00000001)
+#define NV507D_SET_NOTIFIER_CONTROL                                             (0x00000084)
+#define NV507D_SET_NOTIFIER_CONTROL_MODE                                        30:30
+#define NV507D_SET_NOTIFIER_CONTROL_MODE_WRITE                                  (0x00000000)
+#define NV507D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN                           (0x00000001)
+#define NV507D_SET_NOTIFIER_CONTROL_OFFSET                                      11:2
+#define NV507D_SET_NOTIFIER_CONTROL_NOTIFY                                      31:31
+#define NV507D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE                              (0x00000000)
+#define NV507D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE                               (0x00000001)
+#define NV507D_SET_CONTEXT_DMA_NOTIFIER                                         (0x00000088)
+#define NV507D_SET_CONTEXT_DMA_NOTIFIER_HANDLE                                  31:0
+#define NV507D_GET_CAPABILITIES                                                 (0x0000008C)
+#define NV507D_GET_CAPABILITIES_DUMMY                                           31:0
+
+#define NV507D_DAC_SET_CONTROL(a)                                               (0x00000400 + (a)*0x00000080)
+#define NV507D_DAC_SET_CONTROL_OWNER                                            3:0
+#define NV507D_DAC_SET_CONTROL_OWNER_NONE                                       (0x00000000)
+#define NV507D_DAC_SET_CONTROL_OWNER_HEAD0                                      (0x00000001)
+#define NV507D_DAC_SET_CONTROL_OWNER_HEAD1                                      (0x00000002)
+#define NV507D_DAC_SET_CONTROL_SUB_OWNER                                        5:4
+#define NV507D_DAC_SET_CONTROL_SUB_OWNER_NONE                                   (0x00000000)
+#define NV507D_DAC_SET_CONTROL_SUB_OWNER_SUBHEAD0                               (0x00000001)
+#define NV507D_DAC_SET_CONTROL_SUB_OWNER_SUBHEAD1                               (0x00000002)
+#define NV507D_DAC_SET_CONTROL_SUB_OWNER_BOTH                                   (0x00000003)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL                                         13:8
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_RGB_CRT                                 (0x00000000)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_CPST_NTSC_M                             (0x00000001)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_CPST_NTSC_J                             (0x00000002)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_CPST_PAL_BDGHI                          (0x00000003)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_CPST_PAL_M                              (0x00000004)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_CPST_PAL_N                              (0x00000005)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_CPST_PAL_CN                             (0x00000006)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_COMP_NTSC_M                             (0x00000007)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_COMP_NTSC_J                             (0x00000008)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_COMP_PAL_BDGHI                          (0x00000009)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_COMP_PAL_M                              (0x0000000A)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_COMP_PAL_N                              (0x0000000B)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_COMP_PAL_CN                             (0x0000000C)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_COMP_480P_60                            (0x0000000D)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_COMP_576P_50                            (0x0000000E)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_COMP_720P_50                            (0x0000000F)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_COMP_720P_60                            (0x00000010)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_COMP_1080I_50                           (0x00000011)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_COMP_1080I_60                           (0x00000012)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_CUSTOM                                  (0x0000003F)
+#define NV507D_DAC_SET_CONTROL_INVALIDATE_FIRST_FIELD                           14:14
+#define NV507D_DAC_SET_CONTROL_INVALIDATE_FIRST_FIELD_FALSE                     (0x00000000)
+#define NV507D_DAC_SET_CONTROL_INVALIDATE_FIRST_FIELD_TRUE                      (0x00000001)
+#define NV507D_DAC_SET_POLARITY(a)                                              (0x00000404 + (a)*0x00000080)
+#define NV507D_DAC_SET_POLARITY_HSYNC                                           0:0
+#define NV507D_DAC_SET_POLARITY_HSYNC_POSITIVE_TRUE                             (0x00000000)
+#define NV507D_DAC_SET_POLARITY_HSYNC_NEGATIVE_TRUE                             (0x00000001)
+#define NV507D_DAC_SET_POLARITY_VSYNC                                           1:1
+#define NV507D_DAC_SET_POLARITY_VSYNC_POSITIVE_TRUE                             (0x00000000)
+#define NV507D_DAC_SET_POLARITY_VSYNC_NEGATIVE_TRUE                             (0x00000001)
+#define NV507D_DAC_SET_POLARITY_RESERVED                                        31:2
+
+#define NV507D_SOR_SET_CONTROL(a)                                               (0x00000600 + (a)*0x00000040)
+#define NV507D_SOR_SET_CONTROL_OWNER                                            3:0
+#define NV507D_SOR_SET_CONTROL_OWNER_NONE                                       (0x00000000)
+#define NV507D_SOR_SET_CONTROL_OWNER_HEAD0                                      (0x00000001)
+#define NV507D_SOR_SET_CONTROL_OWNER_HEAD1                                      (0x00000002)
+#define NV507D_SOR_SET_CONTROL_SUB_OWNER                                        5:4
+#define NV507D_SOR_SET_CONTROL_SUB_OWNER_NONE                                   (0x00000000)
+#define NV507D_SOR_SET_CONTROL_SUB_OWNER_SUBHEAD0                               (0x00000001)
+#define NV507D_SOR_SET_CONTROL_SUB_OWNER_SUBHEAD1                               (0x00000002)
+#define NV507D_SOR_SET_CONTROL_SUB_OWNER_BOTH                                   (0x00000003)
+#define NV507D_SOR_SET_CONTROL_PROTOCOL                                         11:8
+#define NV507D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM                             (0x00000000)
+#define NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A                           (0x00000001)
+#define NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B                           (0x00000002)
+#define NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_AB                          (0x00000003)
+#define NV507D_SOR_SET_CONTROL_PROTOCOL_DUAL_SINGLE_TMDS                        (0x00000004)
+#define NV507D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS                               (0x00000005)
+#define NV507D_SOR_SET_CONTROL_PROTOCOL_DDI_OUT                                 (0x00000007)
+#define NV507D_SOR_SET_CONTROL_PROTOCOL_CUSTOM                                  (0x0000000F)
+#define NV507D_SOR_SET_CONTROL_HSYNC_POLARITY                                   12:12
+#define NV507D_SOR_SET_CONTROL_HSYNC_POLARITY_POSITIVE_TRUE                     (0x00000000)
+#define NV507D_SOR_SET_CONTROL_HSYNC_POLARITY_NEGATIVE_TRUE                     (0x00000001)
+#define NV507D_SOR_SET_CONTROL_VSYNC_POLARITY                                   13:13
+#define NV507D_SOR_SET_CONTROL_VSYNC_POLARITY_POSITIVE_TRUE                     (0x00000000)
+#define NV507D_SOR_SET_CONTROL_VSYNC_POLARITY_NEGATIVE_TRUE                     (0x00000001)
+#define NV507D_SOR_SET_CONTROL_DE_SYNC_POLARITY                                 14:14
+#define NV507D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE                   (0x00000000)
+#define NV507D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE                   (0x00000001)
+
+#define NV507D_PIOR_SET_CONTROL(a)                                              (0x00000700 + (a)*0x00000040)
+#define NV507D_PIOR_SET_CONTROL_OWNER                                           3:0
+#define NV507D_PIOR_SET_CONTROL_OWNER_NONE                                      (0x00000000)
+#define NV507D_PIOR_SET_CONTROL_OWNER_HEAD0                                     (0x00000001)
+#define NV507D_PIOR_SET_CONTROL_OWNER_HEAD1                                     (0x00000002)
+#define NV507D_PIOR_SET_CONTROL_SUB_OWNER                                       5:4
+#define NV507D_PIOR_SET_CONTROL_SUB_OWNER_NONE                                  (0x00000000)
+#define NV507D_PIOR_SET_CONTROL_SUB_OWNER_SUBHEAD0                              (0x00000001)
+#define NV507D_PIOR_SET_CONTROL_SUB_OWNER_SUBHEAD1                              (0x00000002)
+#define NV507D_PIOR_SET_CONTROL_SUB_OWNER_BOTH                                  (0x00000003)
+#define NV507D_PIOR_SET_CONTROL_PROTOCOL                                        11:8
+#define NV507D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC                           (0x00000000)
+#define NV507D_PIOR_SET_CONTROL_PROTOCOL_EXT_TV_ENC                             (0x00000001)
+#define NV507D_PIOR_SET_CONTROL_HSYNC_POLARITY                                  12:12
+#define NV507D_PIOR_SET_CONTROL_HSYNC_POLARITY_POSITIVE_TRUE                    (0x00000000)
+#define NV507D_PIOR_SET_CONTROL_HSYNC_POLARITY_NEGATIVE_TRUE                    (0x00000001)
+#define NV507D_PIOR_SET_CONTROL_VSYNC_POLARITY                                  13:13
+#define NV507D_PIOR_SET_CONTROL_VSYNC_POLARITY_POSITIVE_TRUE                    (0x00000000)
+#define NV507D_PIOR_SET_CONTROL_VSYNC_POLARITY_NEGATIVE_TRUE                    (0x00000001)
+#define NV507D_PIOR_SET_CONTROL_DE_SYNC_POLARITY                                14:14
+#define NV507D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE                  (0x00000000)
+#define NV507D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE                  (0x00000001)
+
+#define NV507D_HEAD_SET_PIXEL_CLOCK(a)                                          (0x00000804 + (a)*0x00000400)
+#define NV507D_HEAD_SET_PIXEL_CLOCK_FREQUENCY                                   21:0
+#define NV507D_HEAD_SET_PIXEL_CLOCK_MODE                                        23:22
+#define NV507D_HEAD_SET_PIXEL_CLOCK_MODE_CLK_25                                 (0x00000000)
+#define NV507D_HEAD_SET_PIXEL_CLOCK_MODE_CLK_28                                 (0x00000001)
+#define NV507D_HEAD_SET_PIXEL_CLOCK_MODE_CLK_CUSTOM                             (0x00000002)
+#define NV507D_HEAD_SET_PIXEL_CLOCK_ADJ1000DIV1001                              24:24
+#define NV507D_HEAD_SET_PIXEL_CLOCK_ADJ1000DIV1001_FALSE                        (0x00000000)
+#define NV507D_HEAD_SET_PIXEL_CLOCK_ADJ1000DIV1001_TRUE                         (0x00000001)
+#define NV507D_HEAD_SET_PIXEL_CLOCK_NOT_DRIVER                                  25:25
+#define NV507D_HEAD_SET_PIXEL_CLOCK_NOT_DRIVER_FALSE                            (0x00000000)
+#define NV507D_HEAD_SET_PIXEL_CLOCK_NOT_DRIVER_TRUE                             (0x00000001)
+#define NV507D_HEAD_SET_CONTROL(a)                                              (0x00000808 + (a)*0x00000400)
+#define NV507D_HEAD_SET_CONTROL_STRUCTURE                                       2:1
+#define NV507D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE                           (0x00000000)
+#define NV507D_HEAD_SET_CONTROL_STRUCTURE_INTERLACED                            (0x00000001)
+#define NV507D_HEAD_SET_OVERSCAN_COLOR(a)                                       (0x00000810 + (a)*0x00000400)
+#define NV507D_HEAD_SET_OVERSCAN_COLOR_RED                                      9:0
+#define NV507D_HEAD_SET_OVERSCAN_COLOR_GRN                                      19:10
+#define NV507D_HEAD_SET_OVERSCAN_COLOR_BLU                                      29:20
+#define NV507D_HEAD_SET_RASTER_SIZE(a)                                          (0x00000814 + (a)*0x00000400)
+#define NV507D_HEAD_SET_RASTER_SIZE_WIDTH                                       14:0
+#define NV507D_HEAD_SET_RASTER_SIZE_HEIGHT                                      30:16
+#define NV507D_HEAD_SET_RASTER_SYNC_END(a)                                      (0x00000818 + (a)*0x00000400)
+#define NV507D_HEAD_SET_RASTER_SYNC_END_X                                       14:0
+#define NV507D_HEAD_SET_RASTER_SYNC_END_Y                                       30:16
+#define NV507D_HEAD_SET_RASTER_BLANK_END(a)                                     (0x0000081C + (a)*0x00000400)
+#define NV507D_HEAD_SET_RASTER_BLANK_END_X                                      14:0
+#define NV507D_HEAD_SET_RASTER_BLANK_END_Y                                      30:16
+#define NV507D_HEAD_SET_RASTER_BLANK_START(a)                                   (0x00000820 + (a)*0x00000400)
+#define NV507D_HEAD_SET_RASTER_BLANK_START_X                                    14:0
+#define NV507D_HEAD_SET_RASTER_BLANK_START_Y                                    30:16
+#define NV507D_HEAD_SET_RASTER_VERT_BLANK2(a)                                   (0x00000824 + (a)*0x00000400)
+#define NV507D_HEAD_SET_RASTER_VERT_BLANK2_YSTART                               14:0
+#define NV507D_HEAD_SET_RASTER_VERT_BLANK2_YEND                                 30:16
+#define NV507D_HEAD_SET_RASTER_VERT_BLANK_DMI(a)                                (0x00000828 + (a)*0x00000400)
+#define NV507D_HEAD_SET_RASTER_VERT_BLANK_DMI_DURATION                          11:0
+#define NV507D_HEAD_SET_DEFAULT_BASE_COLOR(a)                                   (0x0000082C + (a)*0x00000400)
+#define NV507D_HEAD_SET_DEFAULT_BASE_COLOR_RED                                  9:0
+#define NV507D_HEAD_SET_DEFAULT_BASE_COLOR_GREEN                                19:10
+#define NV507D_HEAD_SET_DEFAULT_BASE_COLOR_BLUE                                 29:20
+#define NV507D_HEAD_SET_BASE_LUT_LO(a)                                          (0x00000840 + (a)*0x00000400)
+#define NV507D_HEAD_SET_BASE_LUT_LO_ENABLE                                      31:31
+#define NV507D_HEAD_SET_BASE_LUT_LO_ENABLE_DISABLE                              (0x00000000)
+#define NV507D_HEAD_SET_BASE_LUT_LO_ENABLE_ENABLE                               (0x00000001)
+#define NV507D_HEAD_SET_BASE_LUT_LO_MODE                                        30:30
+#define NV507D_HEAD_SET_BASE_LUT_LO_MODE_LORES                                  (0x00000000)
+#define NV507D_HEAD_SET_BASE_LUT_LO_MODE_HIRES                                  (0x00000001)
+#define NV507D_HEAD_SET_BASE_LUT_LO_ORIGIN                                      7:2
+#define NV507D_HEAD_SET_BASE_LUT_HI(a)                                          (0x00000844 + (a)*0x00000400)
+#define NV507D_HEAD_SET_BASE_LUT_HI_ORIGIN                                      31:0
+#define NV507D_HEAD_SET_OFFSET(a,b)                                             (0x00000860 + (a)*0x00000400 + (b)*0x00000004)
+#define NV507D_HEAD_SET_OFFSET_ORIGIN                                           31:0
+#define NV507D_HEAD_SET_SIZE(a)                                                 (0x00000868 + (a)*0x00000400)
+#define NV507D_HEAD_SET_SIZE_WIDTH                                              14:0
+#define NV507D_HEAD_SET_SIZE_HEIGHT                                             30:16
+#define NV507D_HEAD_SET_STORAGE(a)                                              (0x0000086C + (a)*0x00000400)
+#define NV507D_HEAD_SET_STORAGE_BLOCK_HEIGHT                                    3:0
+#define NV507D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB                            (0x00000000)
+#define NV507D_HEAD_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS                           (0x00000001)
+#define NV507D_HEAD_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS                          (0x00000002)
+#define NV507D_HEAD_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS                         (0x00000003)
+#define NV507D_HEAD_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS                       (0x00000004)
+#define NV507D_HEAD_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS                     (0x00000005)
+#define NV507D_HEAD_SET_STORAGE_PITCH                                           17:8
+#define NV507D_HEAD_SET_STORAGE_MEMORY_LAYOUT                                   20:20
+#define NV507D_HEAD_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR                       (0x00000000)
+#define NV507D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH                             (0x00000001)
+#define NV507D_HEAD_SET_PARAMS(a)                                               (0x00000870 + (a)*0x00000400)
+#define NV507D_HEAD_SET_PARAMS_FORMAT                                           15:8
+#define NV507D_HEAD_SET_PARAMS_FORMAT_I8                                        (0x0000001E)
+#define NV507D_HEAD_SET_PARAMS_FORMAT_VOID16                                    (0x0000001F)
+#define NV507D_HEAD_SET_PARAMS_FORMAT_VOID32                                    (0x0000002E)
+#define NV507D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16                       (0x000000CA)
+#define NV507D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8                                  (0x000000CF)
+#define NV507D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10                               (0x000000D1)
+#define NV507D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8                                  (0x000000D5)
+#define NV507D_HEAD_SET_PARAMS_FORMAT_R5G6B5                                    (0x000000E8)
+#define NV507D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5                                  (0x000000E9)
+#define NV507D_HEAD_SET_PARAMS_KIND                                             22:16
+#define NV507D_HEAD_SET_PARAMS_KIND_KIND_PITCH                                  (0x00000000)
+#define NV507D_HEAD_SET_PARAMS_KIND_KIND_GENERIC_8BX2                           (0x00000070)
+#define NV507D_HEAD_SET_PARAMS_KIND_KIND_GENERIC_8BX2_BANKSWIZ                  (0x00000072)
+#define NV507D_HEAD_SET_PARAMS_KIND_KIND_GENERIC_16BX1                          (0x00000074)
+#define NV507D_HEAD_SET_PARAMS_KIND_KIND_GENERIC_16BX1_BANKSWIZ                 (0x00000076)
+#define NV507D_HEAD_SET_PARAMS_KIND_KIND_C32_MS4                                (0x00000078)
+#define NV507D_HEAD_SET_PARAMS_KIND_KIND_C32_MS8                                (0x00000079)
+#define NV507D_HEAD_SET_PARAMS_KIND_KIND_C32_MS4_BANKSWIZ                       (0x0000007A)
+#define NV507D_HEAD_SET_PARAMS_KIND_KIND_C32_MS8_BANKSWIZ                       (0x0000007B)
+#define NV507D_HEAD_SET_PARAMS_KIND_KIND_C64_MS4                                (0x0000007C)
+#define NV507D_HEAD_SET_PARAMS_KIND_KIND_C64_MS8                                (0x0000007D)
+#define NV507D_HEAD_SET_PARAMS_KIND_KIND_C128_MS4                               (0x0000007E)
+#define NV507D_HEAD_SET_PARAMS_KIND_FROM_PTE                                    (0x0000007F)
+#define NV507D_HEAD_SET_PARAMS_PART_STRIDE                                      24:24
+#define NV507D_HEAD_SET_PARAMS_PART_STRIDE_PARTSTRIDE_256                       (0x00000000)
+#define NV507D_HEAD_SET_PARAMS_PART_STRIDE_PARTSTRIDE_1024                      (0x00000001)
+#define NV507D_HEAD_SET_CONTEXT_DMA_ISO(a)                                      (0x00000874 + (a)*0x00000400)
+#define NV507D_HEAD_SET_CONTEXT_DMA_ISO_HANDLE                                  31:0
+#define NV507D_HEAD_SET_CONTROL_CURSOR(a)                                       (0x00000880 + (a)*0x00000400)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_ENABLE                                   31:31
+#define NV507D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE                           (0x00000000)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE                            (0x00000001)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_FORMAT                                   25:24
+#define NV507D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5                          (0x00000000)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8                          (0x00000001)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_SIZE                                     26:26
+#define NV507D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32                             (0x00000000)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64                             (0x00000001)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X                               13:8
+#define NV507D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y                               21:16
+#define NV507D_HEAD_SET_CONTROL_CURSOR_COMPOSITION                              29:28
+#define NV507D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND                  (0x00000000)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND          (0x00000001)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR                          (0x00000002)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_SUB_OWNER                                5:4
+#define NV507D_HEAD_SET_CONTROL_CURSOR_SUB_OWNER_NONE                           (0x00000000)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_SUB_OWNER_SUBHEAD0                       (0x00000001)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_SUB_OWNER_SUBHEAD1                       (0x00000002)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_SUB_OWNER_BOTH                           (0x00000003)
+#define NV507D_HEAD_SET_OFFSET_CURSOR(a)                                        (0x00000884 + (a)*0x00000400)
+#define NV507D_HEAD_SET_OFFSET_CURSOR_ORIGIN                                    31:0
+#define NV507D_HEAD_SET_DITHER_CONTROL(a)                                       (0x000008A0 + (a)*0x00000400)
+#define NV507D_HEAD_SET_DITHER_CONTROL_ENABLE                                   0:0
+#define NV507D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE                           (0x00000000)
+#define NV507D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE                            (0x00000001)
+#define NV507D_HEAD_SET_DITHER_CONTROL_BITS                                     2:1
+#define NV507D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_6_BITS                    (0x00000000)
+#define NV507D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_8_BITS                    (0x00000001)
+#define NV507D_HEAD_SET_DITHER_CONTROL_MODE                                     6:3
+#define NV507D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC                     (0x00000000)
+#define NV507D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC                      (0x00000001)
+#define NV507D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2                         (0x00000002)
+#define NV507D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2                          (0x00000003)
+#define NV507D_HEAD_SET_DITHER_CONTROL_PHASE                                    8:7
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER(a)                                (0x000008A4 + (a)*0x00000400)
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS                     2:0
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1              (0x00000000)
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2              (0x00000001)
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3              (0x00000002)
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3_ADAPTIVE     (0x00000003)
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5              (0x00000004)
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS                   4:3
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1            (0x00000000)
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2            (0x00000001)
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8            (0x00000002)
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER_HRESPONSE_BIAS                    23:16
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER_VRESPONSE_BIAS                    31:24
+#define NV507D_HEAD_SET_PROCAMP(a)                                              (0x000008A8 + (a)*0x00000400)
+#define NV507D_HEAD_SET_PROCAMP_COLOR_SPACE                                     1:0
+#define NV507D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB                                 (0x00000000)
+#define NV507D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601                             (0x00000001)
+#define NV507D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709                             (0x00000002)
+#define NV507D_HEAD_SET_PROCAMP_CHROMA_LPF                                      2:2
+#define NV507D_HEAD_SET_PROCAMP_CHROMA_LPF_AUTO                                 (0x00000000)
+#define NV507D_HEAD_SET_PROCAMP_CHROMA_LPF_ON                                   (0x00000001)
+#define NV507D_HEAD_SET_PROCAMP_SAT_COS                                         19:8
+#define NV507D_HEAD_SET_PROCAMP_SAT_SINE                                        31:20
+#define NV507D_HEAD_SET_PROCAMP_TRANSITION                                      4:3
+#define NV507D_HEAD_SET_PROCAMP_TRANSITION_HARD                                 (0x00000000)
+#define NV507D_HEAD_SET_PROCAMP_TRANSITION_NTSC                                 (0x00000001)
+#define NV507D_HEAD_SET_PROCAMP_TRANSITION_PAL                                  (0x00000002)
+#define NV507D_HEAD_SET_VIEWPORT_POINT_IN(a,b)                                  (0x000008C0 + (a)*0x00000400 + (b)*0x00000004)
+#define NV507D_HEAD_SET_VIEWPORT_POINT_IN_X                                     14:0
+#define NV507D_HEAD_SET_VIEWPORT_POINT_IN_Y                                     30:16
+#define NV507D_HEAD_SET_VIEWPORT_SIZE_IN(a)                                     (0x000008C8 + (a)*0x00000400)
+#define NV507D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH                                  14:0
+#define NV507D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT                                 30:16
+#define NV507D_HEAD_SET_VIEWPORT_SIZE_OUT(a)                                    (0x000008D8 + (a)*0x00000400)
+#define NV507D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH                                 14:0
+#define NV507D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT                                30:16
+#define NV507D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(a)                                (0x000008DC + (a)*0x00000400)
+#define NV507D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_WIDTH                             14:0
+#define NV507D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_HEIGHT                            30:16
+#define NV507D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(a)                            (0x00000900 + (a)*0x00000400)
+#define NV507D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE                        0:0
+#define NV507D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_FALSE                  (0x00000000)
+#define NV507D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_TRUE                   (0x00000001)
+#define NV507D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH                   11:8
+#define NV507D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_8             (0x00000000)
+#define NV507D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16            (0x00000001)
+#define NV507D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32            (0x00000003)
+#define NV507D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64            (0x00000005)
+#define NV507D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE                  13:12
+#define NV507D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X1_AA            (0x00000000)
+#define NV507D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X4_AA            (0x00000002)
+#define NV507D_HEAD_SET_OVERLAY_USAGE_BOUNDS(a)                                 (0x00000904 + (a)*0x00000400)
+#define NV507D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE                             0:0
+#define NV507D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_FALSE                       (0x00000000)
+#define NV507D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_TRUE                        (0x00000001)
+#define NV507D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH                        11:8
+#define NV507D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16                 (0x00000001)
+#define NV507D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32                 (0x00000003)
+#endif // _cl507d_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl507e.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl507e.h
new file mode 100644 (file)
index 0000000..1f432b4
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl507e_h_
+#define _cl507e_h_
+
+// class methods
+#define NV507E_SET_PRESENT_CONTROL                                              (0x00000084)
+#define NV507E_SET_PRESENT_CONTROL_BEGIN_MODE                                   1:0
+#define NV507E_SET_PRESENT_CONTROL_BEGIN_MODE_ASAP                              (0x00000000)
+#define NV507E_SET_PRESENT_CONTROL_BEGIN_MODE_TIMESTAMP                         (0x00000003)
+#define NV507E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL                         7:4
+#define NV507E_SET_CONTEXT_DMA_ISO                                              (0x000000C0)
+#define NV507E_SET_CONTEXT_DMA_ISO_HANDLE                                       31:0
+#define NV507E_SET_POINT_IN                                                     (0x000000E0)
+#define NV507E_SET_POINT_IN_X                                                   14:0
+#define NV507E_SET_POINT_IN_Y                                                   30:16
+#define NV507E_SET_SIZE_IN                                                      (0x000000E4)
+#define NV507E_SET_SIZE_IN_WIDTH                                                14:0
+#define NV507E_SET_SIZE_IN_HEIGHT                                               30:16
+#define NV507E_SET_SIZE_OUT                                                     (0x000000E8)
+#define NV507E_SET_SIZE_OUT_WIDTH                                               14:0
+#define NV507E_SET_COMPOSITION_CONTROL                                          (0x00000100)
+#define NV507E_SET_COMPOSITION_CONTROL_MODE                                     3:0
+#define NV507E_SET_COMPOSITION_CONTROL_MODE_SOURCE_COLOR_VALUE_KEYING           (0x00000000)
+#define NV507E_SET_COMPOSITION_CONTROL_MODE_DESTINATION_COLOR_VALUE_KEYING      (0x00000001)
+#define NV507E_SET_COMPOSITION_CONTROL_MODE_OPAQUE_SUSPEND_BASE                 (0x00000002)
+
+#define NV507E_SURFACE_SET_OFFSET                                               (0x00000800)
+#define NV507E_SURFACE_SET_OFFSET_ORIGIN                                        31:0
+#define NV507E_SURFACE_SET_SIZE                                                 (0x00000808)
+#define NV507E_SURFACE_SET_SIZE_WIDTH                                           14:0
+#define NV507E_SURFACE_SET_SIZE_HEIGHT                                          30:16
+#define NV507E_SURFACE_SET_STORAGE                                              (0x0000080C)
+#define NV507E_SURFACE_SET_STORAGE_BLOCK_HEIGHT                                 3:0
+#define NV507E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB                         (0x00000000)
+#define NV507E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS                        (0x00000001)
+#define NV507E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS                       (0x00000002)
+#define NV507E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS                      (0x00000003)
+#define NV507E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS                    (0x00000004)
+#define NV507E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS                  (0x00000005)
+#define NV507E_SURFACE_SET_STORAGE_PITCH                                        17:8
+#define NV507E_SURFACE_SET_STORAGE_MEMORY_LAYOUT                                20:20
+#define NV507E_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR                    (0x00000000)
+#define NV507E_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH                          (0x00000001)
+#define NV507E_SURFACE_SET_PARAMS                                               (0x00000810)
+#define NV507E_SURFACE_SET_PARAMS_FORMAT                                        15:8
+#define NV507E_SURFACE_SET_PARAMS_FORMAT_VE8YO8UE8YE8                           (0x00000028)
+#define NV507E_SURFACE_SET_PARAMS_FORMAT_YO8VE8YE8UE8                           (0x00000029)
+#define NV507E_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8                               (0x000000CF)
+#define NV507E_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5                               (0x000000E9)
+#define NV507E_SURFACE_SET_PARAMS_COLOR_SPACE                                   1:0
+#define NV507E_SURFACE_SET_PARAMS_COLOR_SPACE_RGB                               (0x00000000)
+#define NV507E_SURFACE_SET_PARAMS_COLOR_SPACE_YUV_601                           (0x00000001)
+#define NV507E_SURFACE_SET_PARAMS_COLOR_SPACE_YUV_709                           (0x00000002)
+#define NV507E_SURFACE_SET_PARAMS_KIND                                          22:16
+#define NV507E_SURFACE_SET_PARAMS_KIND_KIND_PITCH                               (0x00000000)
+#define NV507E_SURFACE_SET_PARAMS_KIND_KIND_GENERIC_8BX2                        (0x00000070)
+#define NV507E_SURFACE_SET_PARAMS_KIND_KIND_GENERIC_8BX2_BANKSWIZ               (0x00000072)
+#define NV507E_SURFACE_SET_PARAMS_KIND_KIND_GENERIC_16BX1                       (0x00000074)
+#define NV507E_SURFACE_SET_PARAMS_KIND_KIND_GENERIC_16BX1_BANKSWIZ              (0x00000076)
+#define NV507E_SURFACE_SET_PARAMS_KIND_KIND_C32_MS4                             (0x00000078)
+#define NV507E_SURFACE_SET_PARAMS_KIND_KIND_C32_MS8                             (0x00000079)
+#define NV507E_SURFACE_SET_PARAMS_KIND_KIND_C32_MS4_BANKSWIZ                    (0x0000007A)
+#define NV507E_SURFACE_SET_PARAMS_KIND_KIND_C32_MS8_BANKSWIZ                    (0x0000007B)
+#define NV507E_SURFACE_SET_PARAMS_KIND_KIND_C64_MS4                             (0x0000007C)
+#define NV507E_SURFACE_SET_PARAMS_KIND_KIND_C64_MS8                             (0x0000007D)
+#define NV507E_SURFACE_SET_PARAMS_KIND_KIND_C128_MS4                            (0x0000007E)
+#define NV507E_SURFACE_SET_PARAMS_KIND_FROM_PTE                                 (0x0000007F)
+#define NV507E_SURFACE_SET_PARAMS_PART_STRIDE                                   24:24
+#define NV507E_SURFACE_SET_PARAMS_PART_STRIDE_PARTSTRIDE_256                    (0x00000000)
+#define NV507E_SURFACE_SET_PARAMS_PART_STRIDE_PARTSTRIDE_1024                   (0x00000001)
+#endif // _cl507e_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl826f.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl826f.h
new file mode 100644 (file)
index 0000000..8e7c0fb
--- /dev/null
@@ -0,0 +1,39 @@
+/*******************************************************************************
+    Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+
+    Permission is hereby granted, free of charge, to any person obtaining a
+    copy of this software and associated documentation files (the "Software"),
+    to deal in the Software without restriction, including without limitation
+    the rights to use, copy, modify, merge, publish, distribute, sublicense,
+    and/or sell copies of the Software, and to permit persons to whom the
+    Software is furnished to do so, subject to the following conditions:
+
+    The above copyright notice and this permission notice shall be included in
+    all copies or substantial portions of the Software.
+
+    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+    IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+    THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+    FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+    DEALINGS IN THE SOFTWARE.
+
+*******************************************************************************/
+#ifndef _cl826f_h_
+#define _cl826f_h_
+
+#define NV826F_SEMAPHOREA                                          (0x00000010)
+#define NV826F_SEMAPHOREA_OFFSET_UPPER                                     7:0
+#define NV826F_SEMAPHOREB                                          (0x00000014)
+#define NV826F_SEMAPHOREB_OFFSET_LOWER                                   31:00
+#define NV826F_SEMAPHOREC                                          (0x00000018)
+#define NV826F_SEMAPHOREC_PAYLOAD                                         31:0
+#define NV826F_SEMAPHORED                                          (0x0000001C)
+#define NV826F_SEMAPHORED_OPERATION                                        2:0
+#define NV826F_SEMAPHORED_OPERATION_ACQUIRE                         0x00000001
+#define NV826F_SEMAPHORED_OPERATION_RELEASE                         0x00000002
+#define NV826F_SEMAPHORED_OPERATION_ACQ_GEQ                         0x00000004
+#define NV826F_NON_STALLED_INTERRUPT                               (0x00000020)
+#define NV826F_SET_CONTEXT_DMA_SEMAPHORE                           (0x00000060)
+#endif /* _cl826f_h_ */
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl827c.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl827c.h
new file mode 100644 (file)
index 0000000..4b8938e
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl827c_h_
+#define _cl827c_h_
+
+// class methods
+#define NV827C_SET_PRESENT_CONTROL                                              (0x00000084)
+#define NV827C_SET_PRESENT_CONTROL_BEGIN_MODE                                   9:8
+#define NV827C_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING                       (0x00000000)
+#define NV827C_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE                         (0x00000001)
+#define NV827C_SET_PRESENT_CONTROL_BEGIN_MODE_ON_LINE                           (0x00000002)
+#define NV827C_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL                         7:4
+#define NV827C_SET_PRESENT_CONTROL_BEGIN_LINE                                   30:16
+#define NV827C_SET_PRESENT_CONTROL_ON_LINE_MARGIN                               15:10
+#define NV827C_SET_CONTEXT_DMAS_ISO(b)                                          (0x000000C0 + (b)*0x00000004)
+#define NV827C_SET_CONTEXT_DMAS_ISO_HANDLE                                      31:0
+#define NV827C_SET_PROCESSING                                                   (0x00000110)
+#define NV827C_SET_PROCESSING_USE_GAIN_OFS                                      0:0
+#define NV827C_SET_PROCESSING_USE_GAIN_OFS_DISABLE                              (0x00000000)
+#define NV827C_SET_PROCESSING_USE_GAIN_OFS_ENABLE                               (0x00000001)
+#define NV827C_SET_CONVERSION                                                   (0x00000114)
+#define NV827C_SET_CONVERSION_GAIN                                              15:0
+#define NV827C_SET_CONVERSION_OFS                                               31:16
+
+#define NV827C_SURFACE_SET_OFFSET(a,b)                                          (0x00000800 + (a)*0x00000020 + (b)*0x00000004)
+#define NV827C_SURFACE_SET_OFFSET_ORIGIN                                        31:0
+#define NV827C_SURFACE_SET_SIZE(a)                                              (0x00000808 + (a)*0x00000020)
+#define NV827C_SURFACE_SET_SIZE_WIDTH                                           14:0
+#define NV827C_SURFACE_SET_SIZE_HEIGHT                                          30:16
+#define NV827C_SURFACE_SET_STORAGE(a)                                           (0x0000080C + (a)*0x00000020)
+#define NV827C_SURFACE_SET_STORAGE_BLOCK_HEIGHT                                 3:0
+#define NV827C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB                         (0x00000000)
+#define NV827C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS                        (0x00000001)
+#define NV827C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS                       (0x00000002)
+#define NV827C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS                      (0x00000003)
+#define NV827C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS                    (0x00000004)
+#define NV827C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS                  (0x00000005)
+#define NV827C_SURFACE_SET_STORAGE_PITCH                                        17:8
+#define NV827C_SURFACE_SET_STORAGE_MEMORY_LAYOUT                                20:20
+#define NV827C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR                    (0x00000000)
+#define NV827C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH                          (0x00000001)
+#define NV827C_SURFACE_SET_PARAMS(a)                                            (0x00000810 + (a)*0x00000020)
+#define NV827C_SURFACE_SET_PARAMS_FORMAT                                        15:8
+#define NV827C_SURFACE_SET_PARAMS_FORMAT_I8                                     (0x0000001E)
+#define NV827C_SURFACE_SET_PARAMS_FORMAT_VOID16                                 (0x0000001F)
+#define NV827C_SURFACE_SET_PARAMS_FORMAT_VOID32                                 (0x0000002E)
+#define NV827C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16                    (0x000000CA)
+#define NV827C_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8                               (0x000000CF)
+#define NV827C_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10                            (0x000000D1)
+#define NV827C_SURFACE_SET_PARAMS_FORMAT_A8B8G8R8                               (0x000000D5)
+#define NV827C_SURFACE_SET_PARAMS_FORMAT_R5G6B5                                 (0x000000E8)
+#define NV827C_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5                               (0x000000E9)
+#define NV827C_SURFACE_SET_PARAMS_SUPER_SAMPLE                                  1:0
+#define NV827C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X1_AA                            (0x00000000)
+#define NV827C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X4_AA                            (0x00000002)
+#define NV827C_SURFACE_SET_PARAMS_GAMMA                                         2:2
+#define NV827C_SURFACE_SET_PARAMS_GAMMA_LINEAR                                  (0x00000000)
+#define NV827C_SURFACE_SET_PARAMS_GAMMA_SRGB                                    (0x00000001)
+#define NV827C_SURFACE_SET_PARAMS_LAYOUT                                        5:4
+#define NV827C_SURFACE_SET_PARAMS_LAYOUT_FRM                                    (0x00000000)
+#define NV827C_SURFACE_SET_PARAMS_LAYOUT_FLD1                                   (0x00000001)
+#define NV827C_SURFACE_SET_PARAMS_LAYOUT_FLD2                                   (0x00000002)
+#define NV827C_SURFACE_SET_PARAMS_RESERVED0                                     22:16
+#define NV827C_SURFACE_SET_PARAMS_RESERVED1                                     24:24
+#endif // _cl827c_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl827d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl827d.h
new file mode 100644 (file)
index 0000000..5da5d55
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl827d_h_
+#define _cl827d_h_
+
+// class methods
+#define NV827D_HEAD_SET_BASE_LUT_LO(a)                                          (0x00000840 + (a)*0x00000400)
+#define NV827D_HEAD_SET_BASE_LUT_LO_ENABLE                                      31:31
+#define NV827D_HEAD_SET_BASE_LUT_LO_ENABLE_DISABLE                              (0x00000000)
+#define NV827D_HEAD_SET_BASE_LUT_LO_ENABLE_ENABLE                               (0x00000001)
+#define NV827D_HEAD_SET_BASE_LUT_LO_MODE                                        30:30
+#define NV827D_HEAD_SET_BASE_LUT_LO_MODE_LORES                                  (0x00000000)
+#define NV827D_HEAD_SET_BASE_LUT_LO_MODE_HIRES                                  (0x00000001)
+#define NV827D_HEAD_SET_BASE_LUT_LO_ORIGIN                                      7:2
+#define NV827D_HEAD_SET_BASE_LUT_HI(a)                                          (0x00000844 + (a)*0x00000400)
+#define NV827D_HEAD_SET_BASE_LUT_HI_ORIGIN                                      31:0
+#define NV827D_HEAD_SET_CONTEXT_DMA_LUT(a)                                      (0x0000085C + (a)*0x00000400)
+#define NV827D_HEAD_SET_CONTEXT_DMA_LUT_HANDLE                                  31:0
+#define NV827D_HEAD_SET_OFFSET(a,b)                                             (0x00000860 + (a)*0x00000400 + (b)*0x00000004)
+#define NV827D_HEAD_SET_OFFSET_ORIGIN                                           31:0
+#define NV827D_HEAD_SET_SIZE(a)                                                 (0x00000868 + (a)*0x00000400)
+#define NV827D_HEAD_SET_SIZE_WIDTH                                              14:0
+#define NV827D_HEAD_SET_SIZE_HEIGHT                                             30:16
+#define NV827D_HEAD_SET_STORAGE(a)                                              (0x0000086C + (a)*0x00000400)
+#define NV827D_HEAD_SET_STORAGE_BLOCK_HEIGHT                                    3:0
+#define NV827D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB                            (0x00000000)
+#define NV827D_HEAD_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS                           (0x00000001)
+#define NV827D_HEAD_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS                          (0x00000002)
+#define NV827D_HEAD_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS                         (0x00000003)
+#define NV827D_HEAD_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS                       (0x00000004)
+#define NV827D_HEAD_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS                     (0x00000005)
+#define NV827D_HEAD_SET_STORAGE_PITCH                                           17:8
+#define NV827D_HEAD_SET_STORAGE_MEMORY_LAYOUT                                   20:20
+#define NV827D_HEAD_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR                       (0x00000000)
+#define NV827D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH                             (0x00000001)
+#define NV827D_HEAD_SET_PARAMS(a)                                               (0x00000870 + (a)*0x00000400)
+#define NV827D_HEAD_SET_PARAMS_FORMAT                                           15:8
+#define NV827D_HEAD_SET_PARAMS_FORMAT_I8                                        (0x0000001E)
+#define NV827D_HEAD_SET_PARAMS_FORMAT_VOID16                                    (0x0000001F)
+#define NV827D_HEAD_SET_PARAMS_FORMAT_VOID32                                    (0x0000002E)
+#define NV827D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16                       (0x000000CA)
+#define NV827D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8                                  (0x000000CF)
+#define NV827D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10                               (0x000000D1)
+#define NV827D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8                                  (0x000000D5)
+#define NV827D_HEAD_SET_PARAMS_FORMAT_R5G6B5                                    (0x000000E8)
+#define NV827D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5                                  (0x000000E9)
+#define NV827D_HEAD_SET_PARAMS_SUPER_SAMPLE                                     1:0
+#define NV827D_HEAD_SET_PARAMS_SUPER_SAMPLE_X1_AA                               (0x00000000)
+#define NV827D_HEAD_SET_PARAMS_SUPER_SAMPLE_X4_AA                               (0x00000002)
+#define NV827D_HEAD_SET_PARAMS_GAMMA                                            2:2
+#define NV827D_HEAD_SET_PARAMS_GAMMA_LINEAR                                     (0x00000000)
+#define NV827D_HEAD_SET_PARAMS_GAMMA_SRGB                                       (0x00000001)
+#define NV827D_HEAD_SET_PARAMS_RESERVED0                                        22:16
+#define NV827D_HEAD_SET_PARAMS_RESERVED1                                        24:24
+#define NV827D_HEAD_SET_CONTEXT_DMAS_ISO(a,b)                                   (0x00000874 + (a)*0x00000400 + (b)*0x00000004)
+#define NV827D_HEAD_SET_CONTEXT_DMAS_ISO_HANDLE                                 31:0
+#define NV827D_HEAD_SET_CONTROL_CURSOR(a)                                       (0x00000880 + (a)*0x00000400)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_ENABLE                                   31:31
+#define NV827D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE                           (0x00000000)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE                            (0x00000001)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_FORMAT                                   25:24
+#define NV827D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5                          (0x00000000)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8                          (0x00000001)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_SIZE                                     26:26
+#define NV827D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32                             (0x00000000)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64                             (0x00000001)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X                               13:8
+#define NV827D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y                               21:16
+#define NV827D_HEAD_SET_CONTROL_CURSOR_COMPOSITION                              29:28
+#define NV827D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND                  (0x00000000)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND          (0x00000001)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR                          (0x00000002)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_SUB_OWNER                                5:4
+#define NV827D_HEAD_SET_CONTROL_CURSOR_SUB_OWNER_NONE                           (0x00000000)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_SUB_OWNER_SUBHEAD0                       (0x00000001)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_SUB_OWNER_SUBHEAD1                       (0x00000002)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_SUB_OWNER_BOTH                           (0x00000003)
+#define NV827D_HEAD_SET_OFFSET_CURSOR(a)                                        (0x00000884 + (a)*0x00000400)
+#define NV827D_HEAD_SET_OFFSET_CURSOR_ORIGIN                                    31:0
+#define NV827D_HEAD_SET_CONTEXT_DMA_CURSOR(a)                                   (0x0000089C + (a)*0x00000400)
+#define NV827D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE                               31:0
+#define NV827D_HEAD_SET_VIEWPORT_POINT_IN(a,b)                                  (0x000008C0 + (a)*0x00000400 + (b)*0x00000004)
+#define NV827D_HEAD_SET_VIEWPORT_POINT_IN_X                                     14:0
+#define NV827D_HEAD_SET_VIEWPORT_POINT_IN_Y                                     30:16
+#endif // _cl827d_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl827e.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl827e.h
new file mode 100644 (file)
index 0000000..8cae7a5
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl827e_h_
+#define _cl827e_h_
+
+#define NV_DISP_NOTIFICATION_1                                                       0x00000000
+#define NV_DISP_NOTIFICATION_1_SIZEOF                                                0x00000010
+#define NV_DISP_NOTIFICATION_1_TIME_STAMP_0                                          0x00000000
+#define NV_DISP_NOTIFICATION_1_TIME_STAMP_0_NANOSECONDS0                             31:0
+#define NV_DISP_NOTIFICATION_1_TIME_STAMP_1                                          0x00000001
+#define NV_DISP_NOTIFICATION_1_TIME_STAMP_1_NANOSECONDS1                             31:0
+#define NV_DISP_NOTIFICATION_1__2                                                    0x00000002
+#define NV_DISP_NOTIFICATION_1__2_AUDIT_TIMESTAMP                                    31:0
+#define NV_DISP_NOTIFICATION_1__3                                                    0x00000003
+#define NV_DISP_NOTIFICATION_1__3_PRESENT_COUNT                                      7:0
+#define NV_DISP_NOTIFICATION_1__3_R0                                                 15:8
+#define NV_DISP_NOTIFICATION_1__3_STATUS                                             31:16
+#define NV_DISP_NOTIFICATION_1__3_STATUS_NOT_BEGUN                                   0x00008000
+#define NV_DISP_NOTIFICATION_1__3_STATUS_BEGUN                                       0x0000FFFF
+#define NV_DISP_NOTIFICATION_1__3_STATUS_FINISHED                                    0x00000000
+
+
+// class methods
+#define NV827E_SET_PRESENT_CONTROL                                              (0x00000084)
+#define NV827E_SET_PRESENT_CONTROL_BEGIN_MODE                                   1:0
+#define NV827E_SET_PRESENT_CONTROL_BEGIN_MODE_ASAP                              (0x00000000)
+#define NV827E_SET_PRESENT_CONTROL_BEGIN_MODE_TIMESTAMP                         (0x00000003)
+#define NV827E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL                         7:4
+#define NV827E_SET_CONTEXT_DMA_ISO                                              (0x000000C0)
+#define NV827E_SET_CONTEXT_DMA_ISO_HANDLE                                       31:0
+#define NV827E_SET_COMPOSITION_CONTROL                                          (0x00000100)
+#define NV827E_SET_COMPOSITION_CONTROL_MODE                                     3:0
+#define NV827E_SET_COMPOSITION_CONTROL_MODE_SOURCE_COLOR_VALUE_KEYING           (0x00000000)
+#define NV827E_SET_COMPOSITION_CONTROL_MODE_DESTINATION_COLOR_VALUE_KEYING      (0x00000001)
+#define NV827E_SET_COMPOSITION_CONTROL_MODE_OPAQUE_SUSPEND_BASE                 (0x00000002)
+
+#define NV827E_SURFACE_SET_OFFSET                                               (0x00000800)
+#define NV827E_SURFACE_SET_OFFSET_ORIGIN                                        31:0
+#define NV827E_SURFACE_SET_SIZE                                                 (0x00000808)
+#define NV827E_SURFACE_SET_SIZE_WIDTH                                           14:0
+#define NV827E_SURFACE_SET_SIZE_HEIGHT                                          30:16
+#define NV827E_SURFACE_SET_STORAGE                                              (0x0000080C)
+#define NV827E_SURFACE_SET_STORAGE_BLOCK_HEIGHT                                 3:0
+#define NV827E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB                         (0x00000000)
+#define NV827E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS                        (0x00000001)
+#define NV827E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS                       (0x00000002)
+#define NV827E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS                      (0x00000003)
+#define NV827E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS                    (0x00000004)
+#define NV827E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS                  (0x00000005)
+#define NV827E_SURFACE_SET_STORAGE_PITCH                                        17:8
+#define NV827E_SURFACE_SET_STORAGE_MEMORY_LAYOUT                                20:20
+#define NV827E_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR                    (0x00000000)
+#define NV827E_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH                          (0x00000001)
+#define NV827E_SURFACE_SET_PARAMS                                               (0x00000810)
+#define NV827E_SURFACE_SET_PARAMS_FORMAT                                        15:8
+#define NV827E_SURFACE_SET_PARAMS_FORMAT_VE8YO8UE8YE8                           (0x00000028)
+#define NV827E_SURFACE_SET_PARAMS_FORMAT_YO8VE8YE8UE8                           (0x00000029)
+#define NV827E_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10                            (0x000000D1)
+#define NV827E_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8                               (0x000000CF)
+#define NV827E_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5                               (0x000000E9)
+#define NV827E_SURFACE_SET_PARAMS_COLOR_SPACE                                   1:0
+#define NV827E_SURFACE_SET_PARAMS_COLOR_SPACE_RGB                               (0x00000000)
+#define NV827E_SURFACE_SET_PARAMS_COLOR_SPACE_YUV_601                           (0x00000001)
+#define NV827E_SURFACE_SET_PARAMS_COLOR_SPACE_YUV_709                           (0x00000002)
+#define NV827E_SURFACE_SET_PARAMS_RESERVED0                                     22:16
+#define NV827E_SURFACE_SET_PARAMS_RESERVED1                                     24:24
+#endif // _cl827e_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl837d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl837d.h
new file mode 100644 (file)
index 0000000..0db9d4e
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl837d_h_
+#define _cl837d_h_
+
+// class methods
+#define NV837D_SOR_SET_CONTROL(a)                                               (0x00000600 + (a)*0x00000040)
+#define NV837D_SOR_SET_CONTROL_OWNER                                            3:0
+#define NV837D_SOR_SET_CONTROL_OWNER_NONE                                       (0x00000000)
+#define NV837D_SOR_SET_CONTROL_OWNER_HEAD0                                      (0x00000001)
+#define NV837D_SOR_SET_CONTROL_OWNER_HEAD1                                      (0x00000002)
+#define NV837D_SOR_SET_CONTROL_SUB_OWNER                                        5:4
+#define NV837D_SOR_SET_CONTROL_SUB_OWNER_NONE                                   (0x00000000)
+#define NV837D_SOR_SET_CONTROL_SUB_OWNER_SUBHEAD0                               (0x00000001)
+#define NV837D_SOR_SET_CONTROL_SUB_OWNER_SUBHEAD1                               (0x00000002)
+#define NV837D_SOR_SET_CONTROL_SUB_OWNER_BOTH                                   (0x00000003)
+#define NV837D_SOR_SET_CONTROL_PROTOCOL                                         11:8
+#define NV837D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM                             (0x00000000)
+#define NV837D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A                           (0x00000001)
+#define NV837D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B                           (0x00000002)
+#define NV837D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_AB                          (0x00000003)
+#define NV837D_SOR_SET_CONTROL_PROTOCOL_DUAL_SINGLE_TMDS                        (0x00000004)
+#define NV837D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS                               (0x00000005)
+#define NV837D_SOR_SET_CONTROL_PROTOCOL_DDI_OUT                                 (0x00000007)
+#define NV837D_SOR_SET_CONTROL_PROTOCOL_CUSTOM                                  (0x0000000F)
+#define NV837D_SOR_SET_CONTROL_HSYNC_POLARITY                                   12:12
+#define NV837D_SOR_SET_CONTROL_HSYNC_POLARITY_POSITIVE_TRUE                     (0x00000000)
+#define NV837D_SOR_SET_CONTROL_HSYNC_POLARITY_NEGATIVE_TRUE                     (0x00000001)
+#define NV837D_SOR_SET_CONTROL_VSYNC_POLARITY                                   13:13
+#define NV837D_SOR_SET_CONTROL_VSYNC_POLARITY_POSITIVE_TRUE                     (0x00000000)
+#define NV837D_SOR_SET_CONTROL_VSYNC_POLARITY_NEGATIVE_TRUE                     (0x00000001)
+#define NV837D_SOR_SET_CONTROL_DE_SYNC_POLARITY                                 14:14
+#define NV837D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE                   (0x00000000)
+#define NV837D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE                   (0x00000001)
+#define NV837D_SOR_SET_CONTROL_PIXEL_DEPTH                                      19:16
+#define NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT                              (0x00000000)
+#define NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_16_422                           (0x00000001)
+#define NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_18_444                           (0x00000002)
+#define NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_20_422                           (0x00000003)
+#define NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_422                           (0x00000004)
+#define NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_444                           (0x00000005)
+#define NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_30_444                           (0x00000006)
+#define NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_32_422                           (0x00000007)
+#define NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_36_444                           (0x00000008)
+#define NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_48_444                           (0x00000009)
+
+#define NV837D_PIOR_SET_CONTROL(a)                                              (0x00000700 + (a)*0x00000040)
+#define NV837D_PIOR_SET_CONTROL_OWNER                                           3:0
+#define NV837D_PIOR_SET_CONTROL_OWNER_NONE                                      (0x00000000)
+#define NV837D_PIOR_SET_CONTROL_OWNER_HEAD0                                     (0x00000001)
+#define NV837D_PIOR_SET_CONTROL_OWNER_HEAD1                                     (0x00000002)
+#define NV837D_PIOR_SET_CONTROL_SUB_OWNER                                       5:4
+#define NV837D_PIOR_SET_CONTROL_SUB_OWNER_NONE                                  (0x00000000)
+#define NV837D_PIOR_SET_CONTROL_SUB_OWNER_SUBHEAD0                              (0x00000001)
+#define NV837D_PIOR_SET_CONTROL_SUB_OWNER_SUBHEAD1                              (0x00000002)
+#define NV837D_PIOR_SET_CONTROL_SUB_OWNER_BOTH                                  (0x00000003)
+#define NV837D_PIOR_SET_CONTROL_PROTOCOL                                        11:8
+#define NV837D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC                           (0x00000000)
+#define NV837D_PIOR_SET_CONTROL_PROTOCOL_EXT_TV_ENC                             (0x00000001)
+#define NV837D_PIOR_SET_CONTROL_HSYNC_POLARITY                                  12:12
+#define NV837D_PIOR_SET_CONTROL_HSYNC_POLARITY_POSITIVE_TRUE                    (0x00000000)
+#define NV837D_PIOR_SET_CONTROL_HSYNC_POLARITY_NEGATIVE_TRUE                    (0x00000001)
+#define NV837D_PIOR_SET_CONTROL_VSYNC_POLARITY                                  13:13
+#define NV837D_PIOR_SET_CONTROL_VSYNC_POLARITY_POSITIVE_TRUE                    (0x00000000)
+#define NV837D_PIOR_SET_CONTROL_VSYNC_POLARITY_NEGATIVE_TRUE                    (0x00000001)
+#define NV837D_PIOR_SET_CONTROL_DE_SYNC_POLARITY                                14:14
+#define NV837D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE                  (0x00000000)
+#define NV837D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE                  (0x00000001)
+#define NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH                                     19:16
+#define NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT                             (0x00000000)
+#define NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_16_422                          (0x00000001)
+#define NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_18_444                          (0x00000002)
+#define NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_20_422                          (0x00000003)
+#define NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_422                          (0x00000004)
+#define NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_444                          (0x00000005)
+#define NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_30_444                          (0x00000006)
+#define NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_32_422                          (0x00000007)
+#define NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_36_444                          (0x00000008)
+#define NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_48_444                          (0x00000009)
+#endif // _cl837d_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl887d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl887d.h
new file mode 100644 (file)
index 0000000..c93efc6
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl887d_h_
+#define _cl887d_h_
+
+#define NV887D_SOR_SET_CONTROL(a)                                               (0x00000600 + (a)*0x00000040)
+#define NV887D_SOR_SET_CONTROL_OWNER                                            3:0
+#define NV887D_SOR_SET_CONTROL_OWNER_NONE                                       (0x00000000)
+#define NV887D_SOR_SET_CONTROL_OWNER_HEAD0                                      (0x00000001)
+#define NV887D_SOR_SET_CONTROL_OWNER_HEAD1                                      (0x00000002)
+#define NV887D_SOR_SET_CONTROL_SUB_OWNER                                        5:4
+#define NV887D_SOR_SET_CONTROL_SUB_OWNER_NONE                                   (0x00000000)
+#define NV887D_SOR_SET_CONTROL_SUB_OWNER_SUBHEAD0                               (0x00000001)
+#define NV887D_SOR_SET_CONTROL_SUB_OWNER_SUBHEAD1                               (0x00000002)
+#define NV887D_SOR_SET_CONTROL_SUB_OWNER_BOTH                                   (0x00000003)
+#define NV887D_SOR_SET_CONTROL_PROTOCOL                                         11:8
+#define NV887D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM                             (0x00000000)
+#define NV887D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A                           (0x00000001)
+#define NV887D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B                           (0x00000002)
+#define NV887D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_AB                          (0x00000003)
+#define NV887D_SOR_SET_CONTROL_PROTOCOL_DUAL_SINGLE_TMDS                        (0x00000004)
+#define NV887D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS                               (0x00000005)
+#define NV887D_SOR_SET_CONTROL_PROTOCOL_DDI_OUT                                 (0x00000007)
+#define NV887D_SOR_SET_CONTROL_PROTOCOL_DP_A                                    (0x00000008)
+#define NV887D_SOR_SET_CONTROL_PROTOCOL_DP_B                                    (0x00000009)
+#define NV887D_SOR_SET_CONTROL_PROTOCOL_CUSTOM                                  (0x0000000F)
+#define NV887D_SOR_SET_CONTROL_HSYNC_POLARITY                                   12:12
+#define NV887D_SOR_SET_CONTROL_HSYNC_POLARITY_POSITIVE_TRUE                     (0x00000000)
+#define NV887D_SOR_SET_CONTROL_HSYNC_POLARITY_NEGATIVE_TRUE                     (0x00000001)
+#define NV887D_SOR_SET_CONTROL_VSYNC_POLARITY                                   13:13
+#define NV887D_SOR_SET_CONTROL_VSYNC_POLARITY_POSITIVE_TRUE                     (0x00000000)
+#define NV887D_SOR_SET_CONTROL_VSYNC_POLARITY_NEGATIVE_TRUE                     (0x00000001)
+#define NV887D_SOR_SET_CONTROL_DE_SYNC_POLARITY                                 14:14
+#define NV887D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE                   (0x00000000)
+#define NV887D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE                   (0x00000001)
+#define NV887D_SOR_SET_CONTROL_PIXEL_DEPTH                                      19:16
+#define NV887D_SOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT                              (0x00000000)
+#define NV887D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_16_422                           (0x00000001)
+#define NV887D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_18_444                           (0x00000002)
+#define NV887D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_20_422                           (0x00000003)
+#define NV887D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_422                           (0x00000004)
+#define NV887D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_444                           (0x00000005)
+#define NV887D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_30_444                           (0x00000006)
+#define NV887D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_32_422                           (0x00000007)
+#define NV887D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_36_444                           (0x00000008)
+#define NV887D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_48_444                           (0x00000009)
+#endif // _cl887d_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl902d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl902d.h
new file mode 100644 (file)
index 0000000..8d0b42c
--- /dev/null
@@ -0,0 +1,357 @@
+/*
+ * Copyright (c) 2003 - 2004, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _cl_fermi_twod_a_h_
+#define _cl_fermi_twod_a_h_
+
+#define NV902D_SET_OBJECT                                                                                  0x0000
+#define NV902D_SET_OBJECT_CLASS_ID                                                                           15:0
+#define NV902D_SET_OBJECT_ENGINE_ID                                                                         20:16
+
+#define NV902D_WAIT_FOR_IDLE                                                                               0x0110
+#define NV902D_WAIT_FOR_IDLE_V                                                                               31:0
+
+#define NV902D_SET_DST_FORMAT                                                                              0x0200
+#define NV902D_SET_DST_FORMAT_V                                                                               7:0
+#define NV902D_SET_DST_FORMAT_V_A8R8G8B8                                                               0x000000CF
+#define NV902D_SET_DST_FORMAT_V_A8RL8GL8BL8                                                            0x000000D0
+#define NV902D_SET_DST_FORMAT_V_A2R10G10B10                                                            0x000000DF
+#define NV902D_SET_DST_FORMAT_V_A8B8G8R8                                                               0x000000D5
+#define NV902D_SET_DST_FORMAT_V_A8BL8GL8RL8                                                            0x000000D6
+#define NV902D_SET_DST_FORMAT_V_A2B10G10R10                                                            0x000000D1
+#define NV902D_SET_DST_FORMAT_V_X8R8G8B8                                                               0x000000E6
+#define NV902D_SET_DST_FORMAT_V_X8RL8GL8BL8                                                            0x000000E7
+#define NV902D_SET_DST_FORMAT_V_X8B8G8R8                                                               0x000000F9
+#define NV902D_SET_DST_FORMAT_V_X8BL8GL8RL8                                                            0x000000FA
+#define NV902D_SET_DST_FORMAT_V_R5G6B5                                                                 0x000000E8
+#define NV902D_SET_DST_FORMAT_V_A1R5G5B5                                                               0x000000E9
+#define NV902D_SET_DST_FORMAT_V_X1R5G5B5                                                               0x000000F8
+#define NV902D_SET_DST_FORMAT_V_Y8                                                                     0x000000F3
+#define NV902D_SET_DST_FORMAT_V_Y16                                                                    0x000000EE
+#define NV902D_SET_DST_FORMAT_V_Y32                                                                    0x000000FF
+#define NV902D_SET_DST_FORMAT_V_Z1R5G5B5                                                               0x000000FB
+#define NV902D_SET_DST_FORMAT_V_O1R5G5B5                                                               0x000000FC
+#define NV902D_SET_DST_FORMAT_V_Z8R8G8B8                                                               0x000000FD
+#define NV902D_SET_DST_FORMAT_V_O8R8G8B8                                                               0x000000FE
+#define NV902D_SET_DST_FORMAT_V_Y1_8X8                                                                 0x0000001C
+#define NV902D_SET_DST_FORMAT_V_RF16                                                                   0x000000F2
+#define NV902D_SET_DST_FORMAT_V_RF32                                                                   0x000000E5
+#define NV902D_SET_DST_FORMAT_V_RF32_GF32                                                              0x000000CB
+#define NV902D_SET_DST_FORMAT_V_RF16_GF16_BF16_AF16                                                    0x000000CA
+#define NV902D_SET_DST_FORMAT_V_RF16_GF16_BF16_X16                                                     0x000000CE
+#define NV902D_SET_DST_FORMAT_V_RF32_GF32_BF32_AF32                                                    0x000000C0
+#define NV902D_SET_DST_FORMAT_V_RF32_GF32_BF32_X32                                                     0x000000C3
+#define NV902D_SET_DST_FORMAT_V_R16_G16_B16_A16                                                        0x000000C6
+#define NV902D_SET_DST_FORMAT_V_RN16_GN16_BN16_AN16                                                    0x000000C7
+#define NV902D_SET_DST_FORMAT_V_BF10GF11RF11                                                           0x000000E0
+#define NV902D_SET_DST_FORMAT_V_AN8BN8GN8RN8                                                           0x000000D7
+#define NV902D_SET_DST_FORMAT_V_RF16_GF16                                                              0x000000DE
+#define NV902D_SET_DST_FORMAT_V_R16_G16                                                                0x000000DA
+#define NV902D_SET_DST_FORMAT_V_RN16_GN16                                                              0x000000DB
+#define NV902D_SET_DST_FORMAT_V_G8R8                                                                   0x000000EA
+#define NV902D_SET_DST_FORMAT_V_GN8RN8                                                                 0x000000EB
+#define NV902D_SET_DST_FORMAT_V_RN16                                                                   0x000000EF
+#define NV902D_SET_DST_FORMAT_V_RN8                                                                    0x000000F4
+#define NV902D_SET_DST_FORMAT_V_A8                                                                     0x000000F7
+
+#define NV902D_SET_DST_MEMORY_LAYOUT                                                                       0x0204
+#define NV902D_SET_DST_MEMORY_LAYOUT_V                                                                        0:0
+#define NV902D_SET_DST_MEMORY_LAYOUT_V_BLOCKLINEAR                                                     0x00000000
+#define NV902D_SET_DST_MEMORY_LAYOUT_V_PITCH                                                           0x00000001
+
+#define NV902D_SET_DST_PITCH                                                                               0x0214
+#define NV902D_SET_DST_PITCH_V                                                                               31:0
+
+#define NV902D_SET_DST_WIDTH                                                                               0x0218
+#define NV902D_SET_DST_WIDTH_V                                                                               31:0
+
+#define NV902D_SET_DST_HEIGHT                                                                              0x021c
+#define NV902D_SET_DST_HEIGHT_V                                                                              31:0
+
+#define NV902D_SET_DST_OFFSET_UPPER                                                                        0x0220
+#define NV902D_SET_DST_OFFSET_UPPER_V                                                                         7:0
+
+#define NV902D_SET_DST_OFFSET_LOWER                                                                        0x0224
+#define NV902D_SET_DST_OFFSET_LOWER_V                                                                        31:0
+
+#define NV902D_SET_SRC_FORMAT                                                                              0x0230
+#define NV902D_SET_SRC_FORMAT_V                                                                               7:0
+#define NV902D_SET_SRC_FORMAT_V_A8R8G8B8                                                               0x000000CF
+#define NV902D_SET_SRC_FORMAT_V_A8RL8GL8BL8                                                            0x000000D0
+#define NV902D_SET_SRC_FORMAT_V_A2R10G10B10                                                            0x000000DF
+#define NV902D_SET_SRC_FORMAT_V_A8B8G8R8                                                               0x000000D5
+#define NV902D_SET_SRC_FORMAT_V_A8BL8GL8RL8                                                            0x000000D6
+#define NV902D_SET_SRC_FORMAT_V_A2B10G10R10                                                            0x000000D1
+#define NV902D_SET_SRC_FORMAT_V_X8R8G8B8                                                               0x000000E6
+#define NV902D_SET_SRC_FORMAT_V_X8RL8GL8BL8                                                            0x000000E7
+#define NV902D_SET_SRC_FORMAT_V_X8B8G8R8                                                               0x000000F9
+#define NV902D_SET_SRC_FORMAT_V_X8BL8GL8RL8                                                            0x000000FA
+#define NV902D_SET_SRC_FORMAT_V_R5G6B5                                                                 0x000000E8
+#define NV902D_SET_SRC_FORMAT_V_A1R5G5B5                                                               0x000000E9
+#define NV902D_SET_SRC_FORMAT_V_X1R5G5B5                                                               0x000000F8
+#define NV902D_SET_SRC_FORMAT_V_Y8                                                                     0x000000F3
+#define NV902D_SET_SRC_FORMAT_V_AY8                                                                    0x0000001D
+#define NV902D_SET_SRC_FORMAT_V_Y16                                                                    0x000000EE
+#define NV902D_SET_SRC_FORMAT_V_Y32                                                                    0x000000FF
+#define NV902D_SET_SRC_FORMAT_V_Z1R5G5B5                                                               0x000000FB
+#define NV902D_SET_SRC_FORMAT_V_O1R5G5B5                                                               0x000000FC
+#define NV902D_SET_SRC_FORMAT_V_Z8R8G8B8                                                               0x000000FD
+#define NV902D_SET_SRC_FORMAT_V_O8R8G8B8                                                               0x000000FE
+#define NV902D_SET_SRC_FORMAT_V_Y1_8X8                                                                 0x0000001C
+#define NV902D_SET_SRC_FORMAT_V_RF16                                                                   0x000000F2
+#define NV902D_SET_SRC_FORMAT_V_RF32                                                                   0x000000E5
+#define NV902D_SET_SRC_FORMAT_V_RF32_GF32                                                              0x000000CB
+#define NV902D_SET_SRC_FORMAT_V_RF16_GF16_BF16_AF16                                                    0x000000CA
+#define NV902D_SET_SRC_FORMAT_V_RF16_GF16_BF16_X16                                                     0x000000CE
+#define NV902D_SET_SRC_FORMAT_V_RF32_GF32_BF32_AF32                                                    0x000000C0
+#define NV902D_SET_SRC_FORMAT_V_RF32_GF32_BF32_X32                                                     0x000000C3
+#define NV902D_SET_SRC_FORMAT_V_R16_G16_B16_A16                                                        0x000000C6
+#define NV902D_SET_SRC_FORMAT_V_RN16_GN16_BN16_AN16                                                    0x000000C7
+#define NV902D_SET_SRC_FORMAT_V_BF10GF11RF11                                                           0x000000E0
+#define NV902D_SET_SRC_FORMAT_V_AN8BN8GN8RN8                                                           0x000000D7
+#define NV902D_SET_SRC_FORMAT_V_RF16_GF16                                                              0x000000DE
+#define NV902D_SET_SRC_FORMAT_V_R16_G16                                                                0x000000DA
+#define NV902D_SET_SRC_FORMAT_V_RN16_GN16                                                              0x000000DB
+#define NV902D_SET_SRC_FORMAT_V_G8R8                                                                   0x000000EA
+#define NV902D_SET_SRC_FORMAT_V_GN8RN8                                                                 0x000000EB
+#define NV902D_SET_SRC_FORMAT_V_RN16                                                                   0x000000EF
+#define NV902D_SET_SRC_FORMAT_V_RN8                                                                    0x000000F4
+#define NV902D_SET_SRC_FORMAT_V_A8                                                                     0x000000F7
+
+#define NV902D_SET_SRC_MEMORY_LAYOUT                                                                       0x0234
+#define NV902D_SET_SRC_MEMORY_LAYOUT_V                                                                        0:0
+#define NV902D_SET_SRC_MEMORY_LAYOUT_V_BLOCKLINEAR                                                     0x00000000
+#define NV902D_SET_SRC_MEMORY_LAYOUT_V_PITCH                                                           0x00000001
+
+#define NV902D_SET_SRC_PITCH                                                                               0x0244
+#define NV902D_SET_SRC_PITCH_V                                                                               31:0
+
+#define NV902D_SET_SRC_WIDTH                                                                               0x0248
+#define NV902D_SET_SRC_WIDTH_V                                                                               31:0
+
+#define NV902D_SET_SRC_HEIGHT                                                                              0x024c
+#define NV902D_SET_SRC_HEIGHT_V                                                                              31:0
+
+#define NV902D_SET_SRC_OFFSET_UPPER                                                                        0x0250
+#define NV902D_SET_SRC_OFFSET_UPPER_V                                                                         7:0
+
+#define NV902D_SET_SRC_OFFSET_LOWER                                                                        0x0254
+#define NV902D_SET_SRC_OFFSET_LOWER_V                                                                        31:0
+
+#define NV902D_SET_CLIP_ENABLE                                                                             0x0290
+#define NV902D_SET_CLIP_ENABLE_V                                                                              0:0
+#define NV902D_SET_CLIP_ENABLE_V_FALSE                                                                 0x00000000
+#define NV902D_SET_CLIP_ENABLE_V_TRUE                                                                  0x00000001
+
+#define NV902D_SET_ROP                                                                                     0x02a0
+#define NV902D_SET_ROP_V                                                                                      7:0
+
+#define NV902D_SET_OPERATION                                                                               0x02ac
+#define NV902D_SET_OPERATION_V                                                                                2:0
+#define NV902D_SET_OPERATION_V_SRCCOPY_AND                                                             0x00000000
+#define NV902D_SET_OPERATION_V_ROP_AND                                                                 0x00000001
+#define NV902D_SET_OPERATION_V_BLEND_AND                                                               0x00000002
+#define NV902D_SET_OPERATION_V_SRCCOPY                                                                 0x00000003
+#define NV902D_SET_OPERATION_V_ROP                                                                     0x00000004
+#define NV902D_SET_OPERATION_V_SRCCOPY_PREMULT                                                         0x00000005
+#define NV902D_SET_OPERATION_V_BLEND_PREMULT                                                           0x00000006
+
+#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT                                                         0x02e8
+#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V                                                          2:0
+#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A8X8R5G6B5                                        0x00000000
+#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A1R5G5B5                                          0x00000001
+#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A8R8G8B8                                          0x00000002
+#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A8Y8                                              0x00000003
+#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A8X8Y16                                           0x00000004
+#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_Y32                                               0x00000005
+#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_BYTE_EXPAND                                       0x00000006
+
+#define NV902D_SET_MONOCHROME_PATTERN_FORMAT                                                               0x02ec
+#define NV902D_SET_MONOCHROME_PATTERN_FORMAT_V                                                                0:0
+#define NV902D_SET_MONOCHROME_PATTERN_FORMAT_V_CGA6_M1                                                 0x00000000
+#define NV902D_SET_MONOCHROME_PATTERN_FORMAT_V_LE_M1                                                   0x00000001
+
+#define NV902D_RENDER_SOLID_PRIM_MODE                                                                      0x0580
+#define NV902D_RENDER_SOLID_PRIM_MODE_V                                                                       2:0
+#define NV902D_RENDER_SOLID_PRIM_MODE_V_POINTS                                                         0x00000000
+#define NV902D_RENDER_SOLID_PRIM_MODE_V_LINES                                                          0x00000001
+#define NV902D_RENDER_SOLID_PRIM_MODE_V_POLYLINE                                                       0x00000002
+#define NV902D_RENDER_SOLID_PRIM_MODE_V_TRIANGLES                                                      0x00000003
+#define NV902D_RENDER_SOLID_PRIM_MODE_V_RECTS                                                          0x00000004
+
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT                                                          0x0584
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V                                                           7:0
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_RF32_GF32_BF32_AF32                                0x000000C0
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_RF16_GF16_BF16_AF16                                0x000000CA
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_RF32_GF32                                          0x000000CB
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A8R8G8B8                                           0x000000CF
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A2R10G10B10                                        0x000000DF
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A8B8G8R8                                           0x000000D5
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A2B10G10R10                                        0x000000D1
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_X8R8G8B8                                           0x000000E6
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_X8B8G8R8                                           0x000000F9
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_R5G6B5                                             0x000000E8
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A1R5G5B5                                           0x000000E9
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_X1R5G5B5                                           0x000000F8
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Y8                                                 0x000000F3
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Y16                                                0x000000EE
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Y32                                                0x000000FF
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Z1R5G5B5                                           0x000000FB
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_O1R5G5B5                                           0x000000FC
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Z8R8G8B8                                           0x000000FD
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_O8R8G8B8                                           0x000000FE
+
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR                                                                 0x0588
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_V                                                                 31:0
+
+#define NV902D_RENDER_SOLID_PRIM_POINT_SET_X(j)                                                    (0x0600+(j)*8)
+#define NV902D_RENDER_SOLID_PRIM_POINT_SET_X_V                                                               31:0
+
+#define NV902D_RENDER_SOLID_PRIM_POINT_Y(j)                                                        (0x0604+(j)*8)
+#define NV902D_RENDER_SOLID_PRIM_POINT_Y_V                                                                   31:0
+
+#define NV902D_SET_PIXELS_FROM_CPU_DATA_TYPE                                                               0x0800
+#define NV902D_SET_PIXELS_FROM_CPU_DATA_TYPE_V                                                                0:0
+#define NV902D_SET_PIXELS_FROM_CPU_DATA_TYPE_V_COLOR                                                   0x00000000
+#define NV902D_SET_PIXELS_FROM_CPU_DATA_TYPE_V_INDEX                                                   0x00000001
+
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT                                                            0x0804
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V                                                             7:0
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A8R8G8B8                                             0x000000CF
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A2R10G10B10                                          0x000000DF
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A8B8G8R8                                             0x000000D5
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A2B10G10R10                                          0x000000D1
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_X8R8G8B8                                             0x000000E6
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_X8B8G8R8                                             0x000000F9
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_R5G6B5                                               0x000000E8
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A1R5G5B5                                             0x000000E9
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_X1R5G5B5                                             0x000000F8
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Y8                                                   0x000000F3
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Y16                                                  0x000000EE
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Y32                                                  0x000000FF
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Z1R5G5B5                                             0x000000FB
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_O1R5G5B5                                             0x000000FC
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Z8R8G8B8                                             0x000000FD
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_O8R8G8B8                                             0x000000FE
+
+#define NV902D_SET_PIXELS_FROM_CPU_INDEX_FORMAT                                                            0x0808
+#define NV902D_SET_PIXELS_FROM_CPU_INDEX_FORMAT_V                                                             1:0
+#define NV902D_SET_PIXELS_FROM_CPU_INDEX_FORMAT_V_I1                                                   0x00000000
+#define NV902D_SET_PIXELS_FROM_CPU_INDEX_FORMAT_V_I4                                                   0x00000001
+#define NV902D_SET_PIXELS_FROM_CPU_INDEX_FORMAT_V_I8                                                   0x00000002
+
+#define NV902D_SET_PIXELS_FROM_CPU_MONO_FORMAT                                                             0x080c
+#define NV902D_SET_PIXELS_FROM_CPU_MONO_FORMAT_V                                                              0:0
+#define NV902D_SET_PIXELS_FROM_CPU_MONO_FORMAT_V_CGA6_M1                                               0x00000000
+#define NV902D_SET_PIXELS_FROM_CPU_MONO_FORMAT_V_LE_M1                                                 0x00000001
+
+#define NV902D_SET_PIXELS_FROM_CPU_WRAP                                                                    0x0810
+#define NV902D_SET_PIXELS_FROM_CPU_WRAP_V                                                                     1:0
+#define NV902D_SET_PIXELS_FROM_CPU_WRAP_V_WRAP_PIXEL                                                   0x00000000
+#define NV902D_SET_PIXELS_FROM_CPU_WRAP_V_WRAP_BYTE                                                    0x00000001
+#define NV902D_SET_PIXELS_FROM_CPU_WRAP_V_WRAP_DWORD                                                   0x00000002
+
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR0                                                                  0x0814
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR0_V                                                                  31:0
+
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR1                                                                  0x0818
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR1_V                                                                  31:0
+
+#define NV902D_SET_PIXELS_FROM_CPU_MONO_OPACITY                                                            0x081c
+#define NV902D_SET_PIXELS_FROM_CPU_MONO_OPACITY_V                                                             0:0
+#define NV902D_SET_PIXELS_FROM_CPU_MONO_OPACITY_V_TRANSPARENT                                          0x00000000
+#define NV902D_SET_PIXELS_FROM_CPU_MONO_OPACITY_V_OPAQUE                                               0x00000001
+
+#define NV902D_SET_PIXELS_FROM_CPU_SRC_WIDTH                                                               0x0838
+#define NV902D_SET_PIXELS_FROM_CPU_SRC_WIDTH_V                                                               31:0
+
+#define NV902D_SET_PIXELS_FROM_CPU_SRC_HEIGHT                                                              0x083c
+#define NV902D_SET_PIXELS_FROM_CPU_SRC_HEIGHT_V                                                              31:0
+
+#define NV902D_SET_PIXELS_FROM_CPU_DX_DU_FRAC                                                              0x0840
+#define NV902D_SET_PIXELS_FROM_CPU_DX_DU_FRAC_V                                                              31:0
+
+#define NV902D_SET_PIXELS_FROM_CPU_DX_DU_INT                                                               0x0844
+#define NV902D_SET_PIXELS_FROM_CPU_DX_DU_INT_V                                                               31:0
+
+#define NV902D_SET_PIXELS_FROM_CPU_DY_DV_FRAC                                                              0x0848
+#define NV902D_SET_PIXELS_FROM_CPU_DY_DV_FRAC_V                                                              31:0
+
+#define NV902D_SET_PIXELS_FROM_CPU_DY_DV_INT                                                               0x084c
+#define NV902D_SET_PIXELS_FROM_CPU_DY_DV_INT_V                                                               31:0
+
+#define NV902D_SET_PIXELS_FROM_CPU_DST_X0_FRAC                                                             0x0850
+#define NV902D_SET_PIXELS_FROM_CPU_DST_X0_FRAC_V                                                             31:0
+
+#define NV902D_SET_PIXELS_FROM_CPU_DST_X0_INT                                                              0x0854
+#define NV902D_SET_PIXELS_FROM_CPU_DST_X0_INT_V                                                              31:0
+
+#define NV902D_SET_PIXELS_FROM_CPU_DST_Y0_FRAC                                                             0x0858
+#define NV902D_SET_PIXELS_FROM_CPU_DST_Y0_FRAC_V                                                             31:0
+
+#define NV902D_SET_PIXELS_FROM_CPU_DST_Y0_INT                                                              0x085c
+#define NV902D_SET_PIXELS_FROM_CPU_DST_Y0_INT_V                                                              31:0
+
+#define NV902D_PIXELS_FROM_CPU_DATA                                                                        0x0860
+#define NV902D_PIXELS_FROM_CPU_DATA_V                                                                        31:0
+
+#define NV902D_SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP                                                         0x0888
+#define NV902D_SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP_V                                                          0:0
+#define NV902D_SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP_V_FALSE                                             0x00000000
+#define NV902D_SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP_V_TRUE                                              0x00000001
+
+#define NV902D_SET_PIXELS_FROM_MEMORY_DST_X0                                                               0x08b0
+#define NV902D_SET_PIXELS_FROM_MEMORY_DST_X0_V                                                               31:0
+
+#define NV902D_SET_PIXELS_FROM_MEMORY_DST_Y0                                                               0x08b4
+#define NV902D_SET_PIXELS_FROM_MEMORY_DST_Y0_V                                                               31:0
+
+#define NV902D_SET_PIXELS_FROM_MEMORY_DST_WIDTH                                                            0x08b8
+#define NV902D_SET_PIXELS_FROM_MEMORY_DST_WIDTH_V                                                            31:0
+
+#define NV902D_SET_PIXELS_FROM_MEMORY_DST_HEIGHT                                                           0x08bc
+#define NV902D_SET_PIXELS_FROM_MEMORY_DST_HEIGHT_V                                                           31:0
+
+#define NV902D_SET_PIXELS_FROM_MEMORY_DU_DX_FRAC                                                           0x08c0
+#define NV902D_SET_PIXELS_FROM_MEMORY_DU_DX_FRAC_V                                                           31:0
+
+#define NV902D_SET_PIXELS_FROM_MEMORY_DU_DX_INT                                                            0x08c4
+#define NV902D_SET_PIXELS_FROM_MEMORY_DU_DX_INT_V                                                            31:0
+
+#define NV902D_SET_PIXELS_FROM_MEMORY_DV_DY_FRAC                                                           0x08c8
+#define NV902D_SET_PIXELS_FROM_MEMORY_DV_DY_FRAC_V                                                           31:0
+
+#define NV902D_SET_PIXELS_FROM_MEMORY_DV_DY_INT                                                            0x08cc
+#define NV902D_SET_PIXELS_FROM_MEMORY_DV_DY_INT_V                                                            31:0
+
+#define NV902D_SET_PIXELS_FROM_MEMORY_SRC_X0_FRAC                                                          0x08d0
+#define NV902D_SET_PIXELS_FROM_MEMORY_SRC_X0_FRAC_V                                                          31:0
+
+#define NV902D_SET_PIXELS_FROM_MEMORY_SRC_X0_INT                                                           0x08d4
+#define NV902D_SET_PIXELS_FROM_MEMORY_SRC_X0_INT_V                                                           31:0
+
+#define NV902D_SET_PIXELS_FROM_MEMORY_SRC_Y0_FRAC                                                          0x08d8
+#define NV902D_SET_PIXELS_FROM_MEMORY_SRC_Y0_FRAC_V                                                          31:0
+
+#define NV902D_PIXELS_FROM_MEMORY_SRC_Y0_INT                                                               0x08dc
+#define NV902D_PIXELS_FROM_MEMORY_SRC_Y0_INT_V                                                               31:0
+#endif /* _cl_fermi_twod_a_h_ */
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl9039.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl9039.h
new file mode 100644 (file)
index 0000000..b8282a6
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2003-2004, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _cl_fermi_memory_to_memory_format_a_h_
+#define _cl_fermi_memory_to_memory_format_a_h_
+
+#define NV9039_SET_OBJECT                                                                                  0x0000
+#define NV9039_SET_OBJECT_CLASS_ID                                                                           15:0
+#define NV9039_SET_OBJECT_ENGINE_ID                                                                         20:16
+
+#define NV9039_OFFSET_OUT_UPPER                                                                            0x0238
+#define NV9039_OFFSET_OUT_UPPER_VALUE                                                                         7:0
+
+#define NV9039_OFFSET_OUT                                                                                  0x023c
+#define NV9039_OFFSET_OUT_VALUE                                                                              31:0
+
+#define NV9039_LAUNCH_DMA                                                                                  0x0300
+#define NV9039_LAUNCH_DMA_SRC_INLINE                                                                          0:0
+#define NV9039_LAUNCH_DMA_SRC_INLINE_FALSE                                                             0x00000000
+#define NV9039_LAUNCH_DMA_SRC_INLINE_TRUE                                                              0x00000001
+#define NV9039_LAUNCH_DMA_SRC_MEMORY_LAYOUT                                                                   4:4
+#define NV9039_LAUNCH_DMA_SRC_MEMORY_LAYOUT_BLOCKLINEAR                                                0x00000000
+#define NV9039_LAUNCH_DMA_SRC_MEMORY_LAYOUT_PITCH                                                      0x00000001
+#define NV9039_LAUNCH_DMA_DST_MEMORY_LAYOUT                                                                   8:8
+#define NV9039_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR                                                0x00000000
+#define NV9039_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH                                                      0x00000001
+#define NV9039_LAUNCH_DMA_COMPLETION_TYPE                                                                   13:12
+#define NV9039_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_DISABLE                                                0x00000000
+#define NV9039_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_ONLY                                                   0x00000001
+#define NV9039_LAUNCH_DMA_COMPLETION_TYPE_RELEASE_SEMAPHORE                                            0x00000002
+#define NV9039_LAUNCH_DMA_INTERRUPT_TYPE                                                                    17:16
+#define NV9039_LAUNCH_DMA_INTERRUPT_TYPE_NONE                                                          0x00000000
+#define NV9039_LAUNCH_DMA_INTERRUPT_TYPE_INTERRUPT                                                     0x00000001
+#define NV9039_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE                                                             20:20
+#define NV9039_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_FOUR_WORDS                                             0x00000000
+#define NV9039_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_ONE_WORD                                               0x00000001
+
+#define NV9039_OFFSET_IN_UPPER                                                                             0x030c
+#define NV9039_OFFSET_IN_UPPER_VALUE                                                                          7:0
+
+#define NV9039_OFFSET_IN                                                                                   0x0310
+#define NV9039_OFFSET_IN_VALUE                                                                               31:0
+
+#define NV9039_PITCH_IN                                                                                    0x0314
+#define NV9039_PITCH_IN_VALUE                                                                                31:0
+
+#define NV9039_PITCH_OUT                                                                                   0x0318
+#define NV9039_PITCH_OUT_VALUE                                                                               31:0
+
+#define NV9039_LINE_LENGTH_IN                                                                              0x031c
+#define NV9039_LINE_LENGTH_IN_VALUE                                                                          31:0
+
+#define NV9039_LINE_COUNT                                                                                  0x0320
+#define NV9039_LINE_COUNT_VALUE                                                                              31:0
+#endif /* _cl_fermi_memory_to_memory_format_a_h_ */
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl906f.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl906f.h
new file mode 100644 (file)
index 0000000..673d668
--- /dev/null
@@ -0,0 +1,74 @@
+/*******************************************************************************
+    Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+
+    Permission is hereby granted, free of charge, to any person obtaining a
+    copy of this software and associated documentation files (the "Software"),
+    to deal in the Software without restriction, including without limitation
+    the rights to use, copy, modify, merge, publish, distribute, sublicense,
+    and/or sell copies of the Software, and to permit persons to whom the
+    Software is furnished to do so, subject to the following conditions:
+
+    The above copyright notice and this permission notice shall be included in
+    all copies or substantial portions of the Software.
+
+    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+    IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+    THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+    FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+    DEALINGS IN THE SOFTWARE.
+
+*******************************************************************************/
+#ifndef _cl906f_h_
+#define _cl906f_h_
+
+/* fields and values */
+#define NV906F_SEMAPHOREA                                          (0x00000010)
+#define NV906F_SEMAPHOREA_OFFSET_UPPER                                     7:0
+#define NV906F_SEMAPHOREB                                          (0x00000014)
+#define NV906F_SEMAPHOREB_OFFSET_LOWER                                    31:2
+#define NV906F_SEMAPHOREC                                          (0x00000018)
+#define NV906F_SEMAPHOREC_PAYLOAD                                         31:0
+#define NV906F_SEMAPHORED                                          (0x0000001C)
+#define NV906F_SEMAPHORED_OPERATION                                        3:0
+#define NV906F_SEMAPHORED_OPERATION_ACQUIRE                         0x00000001
+#define NV906F_SEMAPHORED_OPERATION_RELEASE                         0x00000002
+#define NV906F_SEMAPHORED_OPERATION_ACQ_GEQ                         0x00000004
+#define NV906F_SEMAPHORED_OPERATION_ACQ_AND                         0x00000008
+#define NV906F_SEMAPHORED_ACQUIRE_SWITCH                                 12:12
+#define NV906F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED                   0x00000000
+#define NV906F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED                    0x00000001
+#define NV906F_SEMAPHORED_RELEASE_WFI                                    20:20
+#define NV906F_SEMAPHORED_RELEASE_WFI_EN                            0x00000000
+#define NV906F_SEMAPHORED_RELEASE_WFI_DIS                           0x00000001
+#define NV906F_SEMAPHORED_RELEASE_SIZE                                   24:24
+#define NV906F_SEMAPHORED_RELEASE_SIZE_16BYTE                       0x00000000
+#define NV906F_SEMAPHORED_RELEASE_SIZE_4BYTE                        0x00000001
+#define NV906F_NON_STALL_INTERRUPT                                 (0x00000020)
+#define NV906F_NON_STALL_INTERRUPT_HANDLE                                 31:0
+#define NV906F_SET_REFERENCE                                       (0x00000050)
+#define NV906F_SET_REFERENCE_COUNT                                        31:0
+
+/* dma method formats */
+#define NV906F_DMA_METHOD_ADDRESS                                  11:0
+#define NV906F_DMA_SUBDEVICE_MASK                                  15:4
+#define NV906F_DMA_METHOD_SUBCHANNEL                               15:13
+#define NV906F_DMA_TERT_OP                                         17:16
+#define NV906F_DMA_TERT_OP_GRP0_INC_METHOD                         (0x00000000)
+#define NV906F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK                   (0x00000001)
+#define NV906F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK                 (0x00000002)
+#define NV906F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK                   (0x00000003)
+#define NV906F_DMA_TERT_OP_GRP2_NON_INC_METHOD                     (0x00000000)
+#define NV906F_DMA_METHOD_COUNT                                    28:16
+#define NV906F_DMA_IMMD_DATA                                       28:16
+#define NV906F_DMA_SEC_OP                                          31:29
+#define NV906F_DMA_SEC_OP_GRP0_USE_TERT                            (0x00000000)
+#define NV906F_DMA_SEC_OP_INC_METHOD                               (0x00000001)
+#define NV906F_DMA_SEC_OP_GRP2_USE_TERT                            (0x00000002)
+#define NV906F_DMA_SEC_OP_NON_INC_METHOD                           (0x00000003)
+#define NV906F_DMA_SEC_OP_IMMD_DATA_METHOD                         (0x00000004)
+#define NV906F_DMA_SEC_OP_ONE_INC                                  (0x00000005)
+#define NV906F_DMA_SEC_OP_RESERVED6                                (0x00000006)
+#define NV906F_DMA_SEC_OP_END_PB_SEGMENT                           (0x00000007)
+#endif /* _cl906f_h_ */
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl907c.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl907c.h
new file mode 100644 (file)
index 0000000..77366a2
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl907c_h_
+#define _cl907c_h_
+
+// class methods
+#define NV907C_SET_PRESENT_CONTROL                                              (0x00000084)
+#define NV907C_SET_PRESENT_CONTROL_BEGIN_MODE                                   9:8
+#define NV907C_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING                       (0x00000000)
+#define NV907C_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE                         (0x00000001)
+#define NV907C_SET_PRESENT_CONTROL_BEGIN_MODE_ON_LINE                           (0x00000002)
+#define NV907C_SET_PRESENT_CONTROL_BEGIN_MODE_AT_FRAME                          (0x00000003)
+#define NV907C_SET_PRESENT_CONTROL_TIMESTAMP_MODE                               2:2
+#define NV907C_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE                       (0x00000000)
+#define NV907C_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE                        (0x00000001)
+#define NV907C_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL                         7:4
+#define NV907C_SET_PRESENT_CONTROL_BEGIN_LINE                                   30:16
+#define NV907C_SET_PRESENT_CONTROL_ON_LINE_MARGIN                               15:10
+#define NV907C_SET_CONTEXT_DMAS_ISO(b)                                          (0x000000C0 + (b)*0x00000004)
+#define NV907C_SET_CONTEXT_DMAS_ISO_HANDLE                                      31:0
+#define NV907C_SET_BASE_LUT_LO                                                  (0x000000E0)
+#define NV907C_SET_BASE_LUT_LO_ENABLE                                           31:30
+#define NV907C_SET_BASE_LUT_LO_ENABLE_DISABLE                                   (0x00000000)
+#define NV907C_SET_BASE_LUT_LO_ENABLE_USE_CORE_LUT                              (0x00000001)
+#define NV907C_SET_BASE_LUT_LO_ENABLE_ENABLE                                    (0x00000002)
+#define NV907C_SET_BASE_LUT_LO_MODE                                             27:24
+#define NV907C_SET_BASE_LUT_LO_MODE_LORES                                       (0x00000000)
+#define NV907C_SET_BASE_LUT_LO_MODE_HIRES                                       (0x00000001)
+#define NV907C_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE                      (0x00000003)
+#define NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE                (0x00000004)
+#define NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE               (0x00000005)
+#define NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE                (0x00000006)
+#define NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE                 (0x00000007)
+#define NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE                (0x00000008)
+#define NV907C_SET_BASE_LUT_HI                                                  (0x000000E4)
+#define NV907C_SET_BASE_LUT_HI_ORIGIN                                           31:0
+#define NV907C_SET_OUTPUT_LUT_LO                                                (0x000000E8)
+#define NV907C_SET_OUTPUT_LUT_LO_ENABLE                                         31:30
+#define NV907C_SET_OUTPUT_LUT_LO_ENABLE_DISABLE                                 (0x00000000)
+#define NV907C_SET_OUTPUT_LUT_LO_ENABLE_USE_CORE_LUT                            (0x00000001)
+#define NV907C_SET_OUTPUT_LUT_LO_ENABLE_ENABLE                                  (0x00000002)
+#define NV907C_SET_OUTPUT_LUT_LO_MODE                                           27:24
+#define NV907C_SET_OUTPUT_LUT_LO_MODE_LORES                                     (0x00000000)
+#define NV907C_SET_OUTPUT_LUT_LO_MODE_HIRES                                     (0x00000001)
+#define NV907C_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE                    (0x00000003)
+#define NV907C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE              (0x00000004)
+#define NV907C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE             (0x00000005)
+#define NV907C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE              (0x00000006)
+#define NV907C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE               (0x00000007)
+#define NV907C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE              (0x00000008)
+#define NV907C_SET_CONTEXT_DMA_LUT                                              (0x000000FC)
+#define NV907C_SET_CONTEXT_DMA_LUT_HANDLE                                       31:0
+#define NV907C_SET_CSC_RED2RED                                                  (0x00000140)
+#define NV907C_SET_CSC_RED2RED_OWNER                                            31:31
+#define NV907C_SET_CSC_RED2RED_OWNER_CORE                                       (0x00000000)
+#define NV907C_SET_CSC_RED2RED_OWNER_BASE                                       (0x00000001)
+#define NV907C_SET_CSC_RED2RED_COEFF                                            18:0
+#define NV907C_SET_CSC_GRN2RED                                                  (0x00000144)
+#define NV907C_SET_CSC_GRN2RED_COEFF                                            18:0
+#define NV907C_SET_CSC_BLU2RED                                                  (0x00000148)
+#define NV907C_SET_CSC_BLU2RED_COEFF                                            18:0
+#define NV907C_SET_CSC_CONSTANT2RED                                             (0x0000014C)
+#define NV907C_SET_CSC_CONSTANT2RED_COEFF                                       18:0
+#define NV907C_SET_CSC_RED2GRN                                                  (0x00000150)
+#define NV907C_SET_CSC_RED2GRN_COEFF                                            18:0
+#define NV907C_SET_CSC_GRN2GRN                                                  (0x00000154)
+#define NV907C_SET_CSC_GRN2GRN_COEFF                                            18:0
+#define NV907C_SET_CSC_BLU2GRN                                                  (0x00000158)
+#define NV907C_SET_CSC_BLU2GRN_COEFF                                            18:0
+#define NV907C_SET_CSC_CONSTANT2GRN                                             (0x0000015C)
+#define NV907C_SET_CSC_CONSTANT2GRN_COEFF                                       18:0
+#define NV907C_SET_CSC_RED2BLU                                                  (0x00000160)
+#define NV907C_SET_CSC_RED2BLU_COEFF                                            18:0
+#define NV907C_SET_CSC_GRN2BLU                                                  (0x00000164)
+#define NV907C_SET_CSC_GRN2BLU_COEFF                                            18:0
+#define NV907C_SET_CSC_BLU2BLU                                                  (0x00000168)
+#define NV907C_SET_CSC_BLU2BLU_COEFF                                            18:0
+#define NV907C_SET_CSC_CONSTANT2BLU                                             (0x0000016C)
+#define NV907C_SET_CSC_CONSTANT2BLU_COEFF                                       18:0
+
+#define NV907C_SURFACE_SET_OFFSET(a,b)                                          (0x00000400 + (a)*0x00000020 + (b)*0x00000004)
+#define NV907C_SURFACE_SET_OFFSET_ORIGIN                                        31:0
+#define NV907C_SURFACE_SET_SIZE(a)                                              (0x00000408 + (a)*0x00000020)
+#define NV907C_SURFACE_SET_SIZE_WIDTH                                           15:0
+#define NV907C_SURFACE_SET_SIZE_HEIGHT                                          31:16
+#define NV907C_SURFACE_SET_STORAGE(a)                                           (0x0000040C + (a)*0x00000020)
+#define NV907C_SURFACE_SET_STORAGE_BLOCK_HEIGHT                                 3:0
+#define NV907C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB                         (0x00000000)
+#define NV907C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS                        (0x00000001)
+#define NV907C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS                       (0x00000002)
+#define NV907C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS                      (0x00000003)
+#define NV907C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS                    (0x00000004)
+#define NV907C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS                  (0x00000005)
+#define NV907C_SURFACE_SET_STORAGE_PITCH                                        20:8
+#define NV907C_SURFACE_SET_STORAGE_MEMORY_LAYOUT                                24:24
+#define NV907C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR                    (0x00000000)
+#define NV907C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH                          (0x00000001)
+#define NV907C_SURFACE_SET_PARAMS(a)                                            (0x00000410 + (a)*0x00000020)
+#define NV907C_SURFACE_SET_PARAMS_FORMAT                                        15:8
+#define NV907C_SURFACE_SET_PARAMS_FORMAT_I8                                     (0x0000001E)
+#define NV907C_SURFACE_SET_PARAMS_FORMAT_VOID16                                 (0x0000001F)
+#define NV907C_SURFACE_SET_PARAMS_FORMAT_VOID32                                 (0x0000002E)
+#define NV907C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16                    (0x000000CA)
+#define NV907C_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8                               (0x000000CF)
+#define NV907C_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10                            (0x000000D1)
+#define NV907C_SURFACE_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS                  (0x00000022)
+#define NV907C_SURFACE_SET_PARAMS_FORMAT_A8B8G8R8                               (0x000000D5)
+#define NV907C_SURFACE_SET_PARAMS_FORMAT_R5G6B5                                 (0x000000E8)
+#define NV907C_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5                               (0x000000E9)
+#define NV907C_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16                        (0x000000C6)
+#define NV907C_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS                 (0x00000023)
+#define NV907C_SURFACE_SET_PARAMS_SUPER_SAMPLE                                  1:0
+#define NV907C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X1_AA                            (0x00000000)
+#define NV907C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X4_AA                            (0x00000002)
+#define NV907C_SURFACE_SET_PARAMS_GAMMA                                         2:2
+#define NV907C_SURFACE_SET_PARAMS_GAMMA_LINEAR                                  (0x00000000)
+#define NV907C_SURFACE_SET_PARAMS_GAMMA_SRGB                                    (0x00000001)
+#define NV907C_SURFACE_SET_PARAMS_LAYOUT                                        5:4
+#define NV907C_SURFACE_SET_PARAMS_LAYOUT_FRM                                    (0x00000000)
+#define NV907C_SURFACE_SET_PARAMS_LAYOUT_FLD1                                   (0x00000001)
+#define NV907C_SURFACE_SET_PARAMS_LAYOUT_FLD2                                   (0x00000002)
+#endif // _cl907c_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h
new file mode 100644 (file)
index 0000000..34bc3ea
--- /dev/null
@@ -0,0 +1,429 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl907d_h_
+#define _cl907d_h_
+
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20                             0x00000014
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18               0:0
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_FALSE         0x00000000
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_TRUE          0x00000001
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24               1:1
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_FALSE         0x00000000
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_TRUE          0x00000001
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18                 2:2
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_FALSE           0x00000000
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_TRUE            0x00000001
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24                 3:3
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_FALSE           0x00000000
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_TRUE            0x00000001
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R0                          7:4
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A               8:8
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_FALSE         0x00000000
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_TRUE          0x00000001
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B               9:9
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_FALSE         0x00000000
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_TRUE          0x00000001
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R1                          10:10
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS                   11:11
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_FALSE             0x00000000
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_TRUE              0x00000001
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R2                          12:12
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R3                          15:14
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R4                          19:17
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R5                          23:20
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A                        24:24
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_FALSE                  0x00000000
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_TRUE                   0x00000001
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B                        25:25
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_FALSE                  0x00000000
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_TRUE                   0x00000001
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE                26:26
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_FALSE          0x00000000
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_TRUE           0x00000001
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R6                          31:27
+
+
+// class methods
+#define NV907D_DAC_SET_CONTROL(a)                                               (0x00000180 + (a)*0x00000020)
+#define NV907D_DAC_SET_CONTROL_OWNER_MASK                                       3:0
+#define NV907D_DAC_SET_CONTROL_OWNER_MASK_NONE                                  (0x00000000)
+#define NV907D_DAC_SET_CONTROL_OWNER_MASK_HEAD0                                 (0x00000001)
+#define NV907D_DAC_SET_CONTROL_OWNER_MASK_HEAD1                                 (0x00000002)
+#define NV907D_DAC_SET_CONTROL_OWNER_MASK_HEAD2                                 (0x00000004)
+#define NV907D_DAC_SET_CONTROL_OWNER_MASK_HEAD3                                 (0x00000008)
+#define NV907D_DAC_SET_CONTROL_PROTOCOL                                         12:8
+#define NV907D_DAC_SET_CONTROL_PROTOCOL_RGB_CRT                                 (0x00000000)
+#define NV907D_DAC_SET_CONTROL_PROTOCOL_YUV_CRT                                 (0x00000013)
+
+#define NV907D_SOR_SET_CONTROL(a)                                               (0x00000200 + (a)*0x00000020)
+#define NV907D_SOR_SET_CONTROL_OWNER_MASK                                       3:0
+#define NV907D_SOR_SET_CONTROL_OWNER_MASK_NONE                                  (0x00000000)
+#define NV907D_SOR_SET_CONTROL_OWNER_MASK_HEAD0                                 (0x00000001)
+#define NV907D_SOR_SET_CONTROL_OWNER_MASK_HEAD1                                 (0x00000002)
+#define NV907D_SOR_SET_CONTROL_OWNER_MASK_HEAD2                                 (0x00000004)
+#define NV907D_SOR_SET_CONTROL_OWNER_MASK_HEAD3                                 (0x00000008)
+#define NV907D_SOR_SET_CONTROL_PROTOCOL                                         11:8
+#define NV907D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM                             (0x00000000)
+#define NV907D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A                           (0x00000001)
+#define NV907D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B                           (0x00000002)
+#define NV907D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS                               (0x00000005)
+#define NV907D_SOR_SET_CONTROL_PROTOCOL_DP_A                                    (0x00000008)
+#define NV907D_SOR_SET_CONTROL_PROTOCOL_DP_B                                    (0x00000009)
+#define NV907D_SOR_SET_CONTROL_PROTOCOL_CUSTOM                                  (0x0000000F)
+#define NV907D_SOR_SET_CONTROL_DE_SYNC_POLARITY                                 14:14
+#define NV907D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE                   (0x00000000)
+#define NV907D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE                   (0x00000001)
+#define NV907D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE                             21:20
+#define NV907D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF                         (0x00000000)
+#define NV907D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2                          (0x00000001)
+#define NV907D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4                          (0x00000002)
+
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a)                              (0x00000404 + (a)*0x00000300)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE                        1:0
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER          (0x00000000)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER        (0x00000001)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER      (0x00000002)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY                  3:3
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE    (0x00000000)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE    (0x00000001)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY                  4:4
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE    (0x00000000)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE    (0x00000001)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH                     9:6
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_DEFAULT             (0x00000000)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422          (0x00000001)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444          (0x00000002)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422          (0x00000003)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422          (0x00000004)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444          (0x00000005)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444          (0x00000006)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422          (0x00000007)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444          (0x00000008)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444          (0x00000009)
+#define NV907D_HEAD_SET_CONTROL(a)                                              (0x00000408 + (a)*0x00000300)
+#define NV907D_HEAD_SET_CONTROL_STRUCTURE                                       0:0
+#define NV907D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE                           (0x00000000)
+#define NV907D_HEAD_SET_CONTROL_STRUCTURE_INTERLACED                            (0x00000001)
+#define NV907D_HEAD_SET_OVERSCAN_COLOR(a)                                       (0x00000410 + (a)*0x00000300)
+#define NV907D_HEAD_SET_OVERSCAN_COLOR_RED                                      9:0
+#define NV907D_HEAD_SET_OVERSCAN_COLOR_GRN                                      19:10
+#define NV907D_HEAD_SET_OVERSCAN_COLOR_BLU                                      29:20
+#define NV907D_HEAD_SET_RASTER_SIZE(a)                                          (0x00000414 + (a)*0x00000300)
+#define NV907D_HEAD_SET_RASTER_SIZE_WIDTH                                       14:0
+#define NV907D_HEAD_SET_RASTER_SIZE_HEIGHT                                      30:16
+#define NV907D_HEAD_SET_RASTER_SYNC_END(a)                                      (0x00000418 + (a)*0x00000300)
+#define NV907D_HEAD_SET_RASTER_SYNC_END_X                                       14:0
+#define NV907D_HEAD_SET_RASTER_SYNC_END_Y                                       30:16
+#define NV907D_HEAD_SET_RASTER_BLANK_END(a)                                     (0x0000041C + (a)*0x00000300)
+#define NV907D_HEAD_SET_RASTER_BLANK_END_X                                      14:0
+#define NV907D_HEAD_SET_RASTER_BLANK_END_Y                                      30:16
+#define NV907D_HEAD_SET_RASTER_BLANK_START(a)                                   (0x00000420 + (a)*0x00000300)
+#define NV907D_HEAD_SET_RASTER_BLANK_START_X                                    14:0
+#define NV907D_HEAD_SET_RASTER_BLANK_START_Y                                    30:16
+#define NV907D_HEAD_SET_RASTER_VERT_BLANK2(a)                                   (0x00000424 + (a)*0x00000300)
+#define NV907D_HEAD_SET_RASTER_VERT_BLANK2_YSTART                               14:0
+#define NV907D_HEAD_SET_RASTER_VERT_BLANK2_YEND                                 30:16
+#define NV907D_HEAD_SET_DEFAULT_BASE_COLOR(a)                                   (0x0000042C + (a)*0x00000300)
+#define NV907D_HEAD_SET_DEFAULT_BASE_COLOR_RED                                  9:0
+#define NV907D_HEAD_SET_DEFAULT_BASE_COLOR_GREEN                                19:10
+#define NV907D_HEAD_SET_DEFAULT_BASE_COLOR_BLUE                                 29:20
+#define NV907D_HEAD_SET_CRC_CONTROL(a)                                          (0x00000430 + (a)*0x00000300)
+#define NV907D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL                         1:0
+#define NV907D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE                    (0x00000000)
+#define NV907D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_BASE                    (0x00000001)
+#define NV907D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_OVERLAY                 (0x00000002)
+#define NV907D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE                      2:2
+#define NV907D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE                (0x00000000)
+#define NV907D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE                 (0x00000001)
+#define NV907D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE                              3:3
+#define NV907D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_FALSE                        (0x00000000)
+#define NV907D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_TRUE                         (0x00000001)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT                              19:8
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC(i)                       (0x00000FF0 +(i))
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC__SIZE_1                  4
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC0                         (0x00000FF0)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC1                         (0x00000FF1)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC2                         (0x00000FF2)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC3                         (0x00000FF3)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG(i)                        (0x00000FF8 +(i))
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG__SIZE_1                   4
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG0                          (0x00000FF8)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG1                          (0x00000FF9)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG2                          (0x00000FFA)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG3                          (0x00000FFB)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR(i)                       (0x00000F0F +(i)*16)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR__SIZE_1                  8
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR0                         (0x00000F0F)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR1                         (0x00000F1F)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR2                         (0x00000F2F)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR3                         (0x00000F3F)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR4                         (0x00000F4F)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR5                         (0x00000F5F)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR6                         (0x00000F6F)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR7                         (0x00000F7F)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF(i)                        (0x00000F8F +(i)*16)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF__SIZE_1                   4
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF0                          (0x00000F8F)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF1                          (0x00000F9F)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF2                          (0x00000FAF)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF3                          (0x00000FBF)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR(i)                      (0x000000FF +(i)*256)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR__SIZE_1                 8
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR0                        (0x000000FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR1                        (0x000001FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR2                        (0x000002FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR3                        (0x000003FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR4                        (0x000004FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR5                        (0x000005FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR6                        (0x000006FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR7                        (0x000007FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_NONE                         (0x00000FFF)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT                            31:20
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC(i)                     (0x00000FF0 +(i))
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC__SIZE_1                4
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC0                       (0x00000FF0)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC1                       (0x00000FF1)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC2                       (0x00000FF2)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC3                       (0x00000FF3)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG(i)                      (0x00000FF8 +(i))
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG__SIZE_1                 4
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG0                        (0x00000FF8)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG1                        (0x00000FF9)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG2                        (0x00000FFA)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG3                        (0x00000FFB)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR(i)                     (0x00000F0F +(i)*16)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR__SIZE_1                8
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR0                       (0x00000F0F)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR1                       (0x00000F1F)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR2                       (0x00000F2F)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR3                       (0x00000F3F)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR4                       (0x00000F4F)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR5                       (0x00000F5F)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR6                       (0x00000F6F)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR7                       (0x00000F7F)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF(i)                      (0x00000F8F +(i)*16)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF__SIZE_1                 4
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF0                        (0x00000F8F)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF1                        (0x00000F9F)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF2                        (0x00000FAF)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF3                        (0x00000FBF)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR(i)                    (0x000000FF +(i)*256)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR__SIZE_1               8
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR0                      (0x000000FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR1                      (0x000001FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR2                      (0x000002FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR3                      (0x000003FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR4                      (0x000004FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR5                      (0x000005FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR6                      (0x000006FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR7                      (0x000007FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_NONE                       (0x00000FFF)
+#define NV907D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE                           5:5
+#define NV907D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE                   (0x00000000)
+#define NV907D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE                    (0x00000001)
+#define NV907D_HEAD_SET_CONTEXT_DMA_CRC(a)                                      (0x00000438 + (a)*0x00000300)
+#define NV907D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE                                  31:0
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO(a)                                        (0x00000448 + (a)*0x00000300)
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_ENABLE                                    31:31
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_DISABLE                            (0x00000000)
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_ENABLE                             (0x00000001)
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE                                      27:24
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE_LORES                                (0x00000000)
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE_HIRES                                (0x00000001)
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE               (0x00000003)
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE         (0x00000004)
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE        (0x00000005)
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE         (0x00000006)
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE          (0x00000007)
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE         (0x00000008)
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE                       20:20
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE               (0x00000000)
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE                (0x00000001)
+#define NV907D_HEAD_SET_OUTPUT_LUT_HI(a)                                        (0x0000044C + (a)*0x00000300)
+#define NV907D_HEAD_SET_OUTPUT_LUT_HI_ORIGIN                                    31:0
+#define NV907D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a)                                (0x00000450 + (a)*0x00000300)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ                             30:0
+#define NV907D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001                    31:31
+#define NV907D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE              (0x00000000)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE               (0x00000001)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a)                            (0x00000454 + (a)*0x00000300)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE                          21:20
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_25                   (0x00000000)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_28                   (0x00000001)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_CUSTOM               (0x00000002)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER                    24:24
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE              (0x00000000)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE               (0x00000001)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING                25:25
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_FALSE          (0x00000000)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_TRUE           (0x00000001)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE                  26:26
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK           (0x00000000)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK           (0x00000001)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a)                            (0x00000458 + (a)*0x00000300)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ                         30:0
+#define NV907D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001                31:31
+#define NV907D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE          (0x00000000)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE           (0x00000001)
+#define NV907D_HEAD_SET_CONTEXT_DMA_LUT(a)                                      (0x0000045C + (a)*0x00000300)
+#define NV907D_HEAD_SET_CONTEXT_DMA_LUT_HANDLE                                  31:0
+#define NV907D_HEAD_SET_OFFSET(a)                                               (0x00000460 + (a)*0x00000300)
+#define NV907D_HEAD_SET_OFFSET_ORIGIN                                           31:0
+#define NV907D_HEAD_SET_SIZE(a)                                                 (0x00000468 + (a)*0x00000300)
+#define NV907D_HEAD_SET_SIZE_WIDTH                                              15:0
+#define NV907D_HEAD_SET_SIZE_HEIGHT                                             31:16
+#define NV907D_HEAD_SET_STORAGE(a)                                              (0x0000046C + (a)*0x00000300)
+#define NV907D_HEAD_SET_STORAGE_BLOCK_HEIGHT                                    3:0
+#define NV907D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB                            (0x00000000)
+#define NV907D_HEAD_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS                           (0x00000001)
+#define NV907D_HEAD_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS                          (0x00000002)
+#define NV907D_HEAD_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS                         (0x00000003)
+#define NV907D_HEAD_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS                       (0x00000004)
+#define NV907D_HEAD_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS                     (0x00000005)
+#define NV907D_HEAD_SET_STORAGE_PITCH                                           20:8
+#define NV907D_HEAD_SET_STORAGE_MEMORY_LAYOUT                                   24:24
+#define NV907D_HEAD_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR                       (0x00000000)
+#define NV907D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH                             (0x00000001)
+#define NV907D_HEAD_SET_PARAMS(a)                                               (0x00000470 + (a)*0x00000300)
+#define NV907D_HEAD_SET_PARAMS_FORMAT                                           15:8
+#define NV907D_HEAD_SET_PARAMS_FORMAT_I8                                        (0x0000001E)
+#define NV907D_HEAD_SET_PARAMS_FORMAT_VOID16                                    (0x0000001F)
+#define NV907D_HEAD_SET_PARAMS_FORMAT_VOID32                                    (0x0000002E)
+#define NV907D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16                       (0x000000CA)
+#define NV907D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8                                  (0x000000CF)
+#define NV907D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10                               (0x000000D1)
+#define NV907D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS                     (0x00000022)
+#define NV907D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8                                  (0x000000D5)
+#define NV907D_HEAD_SET_PARAMS_FORMAT_R5G6B5                                    (0x000000E8)
+#define NV907D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5                                  (0x000000E9)
+#define NV907D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16                           (0x000000C6)
+#define NV907D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS                    (0x00000023)
+#define NV907D_HEAD_SET_PARAMS_SUPER_SAMPLE                                     1:0
+#define NV907D_HEAD_SET_PARAMS_SUPER_SAMPLE_X1_AA                               (0x00000000)
+#define NV907D_HEAD_SET_PARAMS_SUPER_SAMPLE_X4_AA                               (0x00000002)
+#define NV907D_HEAD_SET_PARAMS_GAMMA                                            2:2
+#define NV907D_HEAD_SET_PARAMS_GAMMA_LINEAR                                     (0x00000000)
+#define NV907D_HEAD_SET_PARAMS_GAMMA_SRGB                                       (0x00000001)
+#define NV907D_HEAD_SET_CONTEXT_DMAS_ISO(a)                                     (0x00000474 + (a)*0x00000300)
+#define NV907D_HEAD_SET_CONTEXT_DMAS_ISO_HANDLE                                 31:0
+#define NV907D_HEAD_SET_CONTROL_CURSOR(a)                                       (0x00000480 + (a)*0x00000300)
+#define NV907D_HEAD_SET_CONTROL_CURSOR_ENABLE                                   31:31
+#define NV907D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE                           (0x00000000)
+#define NV907D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE                            (0x00000001)
+#define NV907D_HEAD_SET_CONTROL_CURSOR_FORMAT                                   25:24
+#define NV907D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5                          (0x00000000)
+#define NV907D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8                          (0x00000001)
+#define NV907D_HEAD_SET_CONTROL_CURSOR_SIZE                                     26:26
+#define NV907D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32                             (0x00000000)
+#define NV907D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64                             (0x00000001)
+#define NV907D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X                               13:8
+#define NV907D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y                               21:16
+#define NV907D_HEAD_SET_CONTROL_CURSOR_COMPOSITION                              29:28
+#define NV907D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND                  (0x00000000)
+#define NV907D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND          (0x00000001)
+#define NV907D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR                          (0x00000002)
+#define NV907D_HEAD_SET_OFFSET_CURSOR(a)                                        (0x00000484 + (a)*0x00000300)
+#define NV907D_HEAD_SET_OFFSET_CURSOR_ORIGIN                                    31:0
+#define NV907D_HEAD_SET_CONTEXT_DMA_CURSOR(a)                                   (0x0000048C + (a)*0x00000300)
+#define NV907D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE                               31:0
+#define NV907D_HEAD_SET_DITHER_CONTROL(a)                                       (0x00000490 + (a)*0x00000300)
+#define NV907D_HEAD_SET_DITHER_CONTROL_ENABLE                                   0:0
+#define NV907D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE                           (0x00000000)
+#define NV907D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE                            (0x00000001)
+#define NV907D_HEAD_SET_DITHER_CONTROL_BITS                                     2:1
+#define NV907D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_6_BITS                    (0x00000000)
+#define NV907D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_8_BITS                    (0x00000001)
+#define NV907D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_10_BITS                   (0x00000002)
+#define NV907D_HEAD_SET_DITHER_CONTROL_MODE                                     6:3
+#define NV907D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC                     (0x00000000)
+#define NV907D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC                      (0x00000001)
+#define NV907D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2                         (0x00000002)
+#define NV907D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2                          (0x00000003)
+#define NV907D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL                            (0x00000004)
+#define NV907D_HEAD_SET_DITHER_CONTROL_PHASE                                    8:7
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER(a)                                (0x00000494 + (a)*0x00000300)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS                     2:0
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1              (0x00000000)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2              (0x00000001)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3              (0x00000002)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3_ADAPTIVE     (0x00000003)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5              (0x00000004)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS                   4:3
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1            (0x00000000)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2            (0x00000001)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8            (0x00000002)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER_HRESPONSE_BIAS                    23:16
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER_VRESPONSE_BIAS                    31:24
+#define NV907D_HEAD_SET_PROCAMP(a)                                              (0x00000498 + (a)*0x00000300)
+#define NV907D_HEAD_SET_PROCAMP_COLOR_SPACE                                     1:0
+#define NV907D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB                                 (0x00000000)
+#define NV907D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601                             (0x00000001)
+#define NV907D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709                             (0x00000002)
+#define NV907D_HEAD_SET_PROCAMP_CHROMA_LPF                                      2:2
+#define NV907D_HEAD_SET_PROCAMP_CHROMA_LPF_AUTO                                 (0x00000000)
+#define NV907D_HEAD_SET_PROCAMP_CHROMA_LPF_ON                                   (0x00000001)
+#define NV907D_HEAD_SET_PROCAMP_SAT_COS                                         19:8
+#define NV907D_HEAD_SET_PROCAMP_SAT_SINE                                        31:20
+#define NV907D_HEAD_SET_PROCAMP_DYNAMIC_RANGE                                   5:5
+#define NV907D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA                              (0x00000000)
+#define NV907D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA                               (0x00000001)
+#define NV907D_HEAD_SET_PROCAMP_RANGE_COMPRESSION                               6:6
+#define NV907D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE                       (0x00000000)
+#define NV907D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE                        (0x00000001)
+#define NV907D_HEAD_SET_VIEWPORT_POINT_IN(a)                                    (0x000004B0 + (a)*0x00000300)
+#define NV907D_HEAD_SET_VIEWPORT_POINT_IN_X                                     14:0
+#define NV907D_HEAD_SET_VIEWPORT_POINT_IN_Y                                     30:16
+#define NV907D_HEAD_SET_VIEWPORT_SIZE_IN(a)                                     (0x000004B8 + (a)*0x00000300)
+#define NV907D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH                                  14:0
+#define NV907D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT                                 30:16
+#define NV907D_HEAD_SET_VIEWPORT_SIZE_OUT(a)                                    (0x000004C0 + (a)*0x00000300)
+#define NV907D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH                                 14:0
+#define NV907D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT                                30:16
+#define NV907D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(a)                                (0x000004C4 + (a)*0x00000300)
+#define NV907D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_WIDTH                             14:0
+#define NV907D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_HEIGHT                            30:16
+#define NV907D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX(a)                                (0x000004C8 + (a)*0x00000300)
+#define NV907D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_WIDTH                             14:0
+#define NV907D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_HEIGHT                            30:16
+#define NV907D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(a)                            (0x000004D0 + (a)*0x00000300)
+#define NV907D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE                        0:0
+#define NV907D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_FALSE                  (0x00000000)
+#define NV907D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_TRUE                   (0x00000001)
+#define NV907D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH                   11:8
+#define NV907D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_8             (0x00000000)
+#define NV907D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16            (0x00000001)
+#define NV907D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32            (0x00000003)
+#define NV907D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64            (0x00000005)
+#define NV907D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE                  13:12
+#define NV907D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X1_AA            (0x00000000)
+#define NV907D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X4_AA            (0x00000002)
+#define NV907D_HEAD_SET_OVERLAY_USAGE_BOUNDS(a)                                 (0x000004D4 + (a)*0x00000300)
+#define NV907D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE                             0:0
+#define NV907D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_FALSE                       (0x00000000)
+#define NV907D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_TRUE                        (0x00000001)
+#define NV907D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH                        11:8
+#define NV907D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16                 (0x00000001)
+#define NV907D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32                 (0x00000003)
+#define NV907D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64                 (0x00000005)
+#endif // _cl907d_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl907e.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl907e.h
new file mode 100644 (file)
index 0000000..64ef0c5
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl907e_h_
+#define _cl907e_h_
+
+// class methods
+#define NV907E_SET_PRESENT_CONTROL                                              (0x00000084)
+#define NV907E_SET_PRESENT_CONTROL_BEGIN_MODE                                   1:0
+#define NV907E_SET_PRESENT_CONTROL_BEGIN_MODE_ASAP                              (0x00000000)
+#define NV907E_SET_PRESENT_CONTROL_BEGIN_MODE_TIMESTAMP                         (0x00000003)
+#define NV907E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL                         7:4
+#define NV907E_SET_CONTEXT_DMA_ISO                                              (0x000000C0)
+#define NV907E_SET_CONTEXT_DMA_ISO_HANDLE                                       31:0
+#define NV907E_SET_COMPOSITION_CONTROL                                          (0x00000100)
+#define NV907E_SET_COMPOSITION_CONTROL_MODE                                     3:0
+#define NV907E_SET_COMPOSITION_CONTROL_MODE_SOURCE_COLOR_VALUE_KEYING           (0x00000000)
+#define NV907E_SET_COMPOSITION_CONTROL_MODE_DESTINATION_COLOR_VALUE_KEYING      (0x00000001)
+#define NV907E_SET_COMPOSITION_CONTROL_MODE_OPAQUE                              (0x00000002)
+
+#define NV907E_SURFACE_SET_OFFSET                                               (0x00000400)
+#define NV907E_SURFACE_SET_OFFSET_ORIGIN                                        31:0
+#define NV907E_SURFACE_SET_SIZE                                                 (0x00000408)
+#define NV907E_SURFACE_SET_SIZE_WIDTH                                           15:0
+#define NV907E_SURFACE_SET_SIZE_HEIGHT                                          31:16
+#define NV907E_SURFACE_SET_STORAGE                                              (0x0000040C)
+#define NV907E_SURFACE_SET_STORAGE_BLOCK_HEIGHT                                 3:0
+#define NV907E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB                         (0x00000000)
+#define NV907E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS                        (0x00000001)
+#define NV907E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS                       (0x00000002)
+#define NV907E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS                      (0x00000003)
+#define NV907E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS                    (0x00000004)
+#define NV907E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS                  (0x00000005)
+#define NV907E_SURFACE_SET_STORAGE_PITCH                                        20:8
+#define NV907E_SURFACE_SET_STORAGE_MEMORY_LAYOUT                                24:24
+#define NV907E_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR                    (0x00000000)
+#define NV907E_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH                          (0x00000001)
+#define NV907E_SURFACE_SET_PARAMS                                               (0x00000410)
+#define NV907E_SURFACE_SET_PARAMS_FORMAT                                        15:8
+#define NV907E_SURFACE_SET_PARAMS_FORMAT_VE8YO8UE8YE8                           (0x00000028)
+#define NV907E_SURFACE_SET_PARAMS_FORMAT_YO8VE8YE8UE8                           (0x00000029)
+#define NV907E_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10                            (0x000000D1)
+#define NV907E_SURFACE_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS                  (0x00000022)
+#define NV907E_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8                               (0x000000CF)
+#define NV907E_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5                               (0x000000E9)
+#define NV907E_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16                    (0x000000CA)
+#define NV907E_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16                        (0x000000C6)
+#define NV907E_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS                 (0x00000023)
+#define NV907E_SURFACE_SET_PARAMS_COLOR_SPACE                                   1:0
+#define NV907E_SURFACE_SET_PARAMS_COLOR_SPACE_RGB                               (0x00000000)
+#define NV907E_SURFACE_SET_PARAMS_COLOR_SPACE_YUV_601                           (0x00000001)
+#define NV907E_SURFACE_SET_PARAMS_COLOR_SPACE_YUV_709                           (0x00000002)
+#endif // _cl907e_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h
new file mode 100644 (file)
index 0000000..2a2612d
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl917d_h_
+#define _cl917d_h_
+
+// class methods
+#define NV917D_SOR_SET_CONTROL(a)                                               (0x00000200 + (a)*0x00000020)
+#define NV917D_SOR_SET_CONTROL_OWNER_MASK                                       3:0
+#define NV917D_SOR_SET_CONTROL_OWNER_MASK_NONE                                  (0x00000000)
+#define NV917D_SOR_SET_CONTROL_OWNER_MASK_HEAD0                                 (0x00000001)
+#define NV917D_SOR_SET_CONTROL_OWNER_MASK_HEAD1                                 (0x00000002)
+#define NV917D_SOR_SET_CONTROL_OWNER_MASK_HEAD2                                 (0x00000004)
+#define NV917D_SOR_SET_CONTROL_OWNER_MASK_HEAD3                                 (0x00000008)
+#define NV917D_SOR_SET_CONTROL_PROTOCOL                                         11:8
+#define NV917D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM                             (0x00000000)
+#define NV917D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A                           (0x00000001)
+#define NV917D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B                           (0x00000002)
+#define NV917D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS                               (0x00000005)
+#define NV917D_SOR_SET_CONTROL_PROTOCOL_DP_A                                    (0x00000008)
+#define NV917D_SOR_SET_CONTROL_PROTOCOL_DP_B                                    (0x00000009)
+#define NV917D_SOR_SET_CONTROL_PROTOCOL_CUSTOM                                  (0x0000000F)
+#define NV917D_SOR_SET_CONTROL_DE_SYNC_POLARITY                                 14:14
+#define NV917D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE                   (0x00000000)
+#define NV917D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE                   (0x00000001)
+#define NV917D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE                             21:20
+#define NV917D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF                         (0x00000000)
+#define NV917D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2                          (0x00000001)
+#define NV917D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4                          (0x00000002)
+
+#define NV917D_HEAD_SET_CONTROL_CURSOR(a)                                       (0x00000480 + (a)*0x00000300)
+#define NV917D_HEAD_SET_CONTROL_CURSOR_ENABLE                                   31:31
+#define NV917D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE                           (0x00000000)
+#define NV917D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE                            (0x00000001)
+#define NV917D_HEAD_SET_CONTROL_CURSOR_FORMAT                                   25:24
+#define NV917D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5                          (0x00000000)
+#define NV917D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8                          (0x00000001)
+#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE                                     27:26
+#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32                             (0x00000000)
+#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64                             (0x00000001)
+#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128                           (0x00000002)
+#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256                           (0x00000003)
+#define NV917D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X                               15:8
+#define NV917D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y                               23:16
+#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION                              29:28
+#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND                  (0x00000000)
+#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND          (0x00000001)
+#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR                          (0x00000002)
+#define NV917D_HEAD_SET_DITHER_CONTROL(a)                                       (0x000004A0 + (a)*0x00000300)
+#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE                                   0:0
+#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE                           (0x00000000)
+#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE                            (0x00000001)
+#define NV917D_HEAD_SET_DITHER_CONTROL_BITS                                     2:1
+#define NV917D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_6_BITS                    (0x00000000)
+#define NV917D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_8_BITS                    (0x00000001)
+#define NV917D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_10_BITS                   (0x00000002)
+#define NV917D_HEAD_SET_DITHER_CONTROL_MODE                                     6:3
+#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC                     (0x00000000)
+#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC                      (0x00000001)
+#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2                         (0x00000002)
+#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2                          (0x00000003)
+#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL                            (0x00000004)
+#define NV917D_HEAD_SET_DITHER_CONTROL_PHASE                                    8:7
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(a)                            (0x000004D0 + (a)*0x00000300)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE                        0:0
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_FALSE                  (0x00000000)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_TRUE                   (0x00000001)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH                   11:8
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_8             (0x00000000)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16            (0x00000001)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32            (0x00000003)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64            (0x00000005)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE                  13:12
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X1_AA            (0x00000000)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X4_AA            (0x00000002)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT                      17:16
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_NONE           (0x00000000)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_257            (0x00000001)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_1025           (0x00000002)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT                    21:20
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE         (0x00000000)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257          (0x00000001)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025         (0x00000002)
+#endif // _cl917d_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cla0b5.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cla0b5.h
new file mode 100644 (file)
index 0000000..fe5d10f
--- /dev/null
@@ -0,0 +1,162 @@
+/*******************************************************************************
+    Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
+
+    Permission is hereby granted, free of charge, to any person obtaining a
+    copy of this software and associated documentation files (the "Software"),
+    to deal in the Software without restriction, including without limitation
+    the rights to use, copy, modify, merge, publish, distribute, sublicense,
+    and/or sell copies of the Software, and to permit persons to whom the
+    Software is furnished to do so, subject to the following conditions:
+
+    The above copyright notice and this permission notice shall be included in
+    all copies or substantial portions of the Software.
+
+    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+    IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+    THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+    FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+    DEALINGS IN THE SOFTWARE.
+
+*******************************************************************************/
+
+#ifndef _cla0b5_h_
+#define _cla0b5_h_
+
+#define NVA0B5_SET_SRC_PHYS_MODE                                                (0x00000260)
+#define NVA0B5_SET_SRC_PHYS_MODE_TARGET                                         1:0
+#define NVA0B5_SET_SRC_PHYS_MODE_TARGET_LOCAL_FB                                (0x00000000)
+#define NVA0B5_SET_SRC_PHYS_MODE_TARGET_COHERENT_SYSMEM                         (0x00000001)
+#define NVA0B5_SET_SRC_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM                      (0x00000002)
+#define NVA0B5_SET_DST_PHYS_MODE                                                (0x00000264)
+#define NVA0B5_SET_DST_PHYS_MODE_TARGET                                         1:0
+#define NVA0B5_SET_DST_PHYS_MODE_TARGET_LOCAL_FB                                (0x00000000)
+#define NVA0B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM                         (0x00000001)
+#define NVA0B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM                      (0x00000002)
+#define NVA0B5_LAUNCH_DMA                                                       (0x00000300)
+#define NVA0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE                                    1:0
+#define NVA0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE                               (0x00000000)
+#define NVA0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PIPELINED                          (0x00000001)
+#define NVA0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NON_PIPELINED                      (0x00000002)
+#define NVA0B5_LAUNCH_DMA_FLUSH_ENABLE                                          2:2
+#define NVA0B5_LAUNCH_DMA_FLUSH_ENABLE_FALSE                                    (0x00000000)
+#define NVA0B5_LAUNCH_DMA_FLUSH_ENABLE_TRUE                                     (0x00000001)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_TYPE                                        4:3
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_TYPE_NONE                                   (0x00000000)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_ONE_WORD_SEMAPHORE             (0x00000001)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_FOUR_WORD_SEMAPHORE            (0x00000002)
+#define NVA0B5_LAUNCH_DMA_INTERRUPT_TYPE                                        6:5
+#define NVA0B5_LAUNCH_DMA_INTERRUPT_TYPE_NONE                                   (0x00000000)
+#define NVA0B5_LAUNCH_DMA_INTERRUPT_TYPE_BLOCKING                               (0x00000001)
+#define NVA0B5_LAUNCH_DMA_INTERRUPT_TYPE_NON_BLOCKING                           (0x00000002)
+#define NVA0B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT                                     7:7
+#define NVA0B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_BLOCKLINEAR                         (0x00000000)
+#define NVA0B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_PITCH                               (0x00000001)
+#define NVA0B5_LAUNCH_DMA_DST_MEMORY_LAYOUT                                     8:8
+#define NVA0B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR                         (0x00000000)
+#define NVA0B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH                               (0x00000001)
+#define NVA0B5_LAUNCH_DMA_MULTI_LINE_ENABLE                                     9:9
+#define NVA0B5_LAUNCH_DMA_MULTI_LINE_ENABLE_FALSE                               (0x00000000)
+#define NVA0B5_LAUNCH_DMA_MULTI_LINE_ENABLE_TRUE                                (0x00000001)
+#define NVA0B5_LAUNCH_DMA_REMAP_ENABLE                                          10:10
+#define NVA0B5_LAUNCH_DMA_REMAP_ENABLE_FALSE                                    (0x00000000)
+#define NVA0B5_LAUNCH_DMA_REMAP_ENABLE_TRUE                                     (0x00000001)
+#define NVA0B5_LAUNCH_DMA_BYPASS_L2                                             11:11
+#define NVA0B5_LAUNCH_DMA_BYPASS_L2_USE_PTE_SETTING                             (0x00000000)
+#define NVA0B5_LAUNCH_DMA_BYPASS_L2_FORCE_VOLATILE                              (0x00000001)
+#define NVA0B5_LAUNCH_DMA_SRC_TYPE                                              12:12
+#define NVA0B5_LAUNCH_DMA_SRC_TYPE_VIRTUAL                                      (0x00000000)
+#define NVA0B5_LAUNCH_DMA_SRC_TYPE_PHYSICAL                                     (0x00000001)
+#define NVA0B5_LAUNCH_DMA_DST_TYPE                                              13:13
+#define NVA0B5_LAUNCH_DMA_DST_TYPE_VIRTUAL                                      (0x00000000)
+#define NVA0B5_LAUNCH_DMA_DST_TYPE_PHYSICAL                                     (0x00000001)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION                                   17:14
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMIN                              (0x00000000)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMAX                              (0x00000001)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IXOR                              (0x00000002)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IAND                              (0x00000003)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IOR                               (0x00000004)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IADD                              (0x00000005)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INC                               (0x00000006)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_DEC                               (0x00000007)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FADD                              (0x0000000A)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FMIN                              (0x0000000B)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FMAX                              (0x0000000C)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FMUL                              (0x0000000D)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMUL                              (0x0000000E)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN                              18:18
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_SIGNED                       (0x00000000)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_UNSIGNED                     (0x00000001)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE                            19:19
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_FALSE                      (0x00000000)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_TRUE                       (0x00000001)
+#define NVA0B5_OFFSET_IN_UPPER                                                  (0x00000400)
+#define NVA0B5_OFFSET_IN_UPPER_UPPER                                            7:0
+#define NVA0B5_OFFSET_IN_LOWER                                                  (0x00000404)
+#define NVA0B5_OFFSET_IN_LOWER_VALUE                                            31:0
+#define NVA0B5_OFFSET_OUT_UPPER                                                 (0x00000408)
+#define NVA0B5_OFFSET_OUT_UPPER_UPPER                                           7:0
+#define NVA0B5_OFFSET_OUT_LOWER                                                 (0x0000040C)
+#define NVA0B5_OFFSET_OUT_LOWER_VALUE                                           31:0
+#define NVA0B5_PITCH_IN                                                         (0x00000410)
+#define NVA0B5_PITCH_IN_VALUE                                                   31:0
+#define NVA0B5_PITCH_OUT                                                        (0x00000414)
+#define NVA0B5_PITCH_OUT_VALUE                                                  31:0
+#define NVA0B5_LINE_LENGTH_IN                                                   (0x00000418)
+#define NVA0B5_LINE_LENGTH_IN_VALUE                                             31:0
+#define NVA0B5_LINE_COUNT                                                       (0x0000041C)
+#define NVA0B5_LINE_COUNT_VALUE                                                 31:0
+#define NVA0B5_SET_REMAP_CONST_A                                                (0x00000700)
+#define NVA0B5_SET_REMAP_CONST_A_V                                              31:0
+#define NVA0B5_SET_REMAP_CONST_B                                                (0x00000704)
+#define NVA0B5_SET_REMAP_CONST_B_V                                              31:0
+#define NVA0B5_SET_REMAP_COMPONENTS                                             (0x00000708)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_X                                       2:0
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_SRC_X                                 (0x00000000)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_SRC_Y                                 (0x00000001)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_SRC_Z                                 (0x00000002)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_SRC_W                                 (0x00000003)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_CONST_A                               (0x00000004)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_CONST_B                               (0x00000005)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_NO_WRITE                              (0x00000006)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y                                       6:4
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_X                                 (0x00000000)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Y                                 (0x00000001)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Z                                 (0x00000002)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_W                                 (0x00000003)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_CONST_A                               (0x00000004)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_CONST_B                               (0x00000005)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_NO_WRITE                              (0x00000006)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z                                       10:8
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_X                                 (0x00000000)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Y                                 (0x00000001)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Z                                 (0x00000002)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_W                                 (0x00000003)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_CONST_A                               (0x00000004)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_CONST_B                               (0x00000005)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_NO_WRITE                              (0x00000006)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_W                                       14:12
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_SRC_X                                 (0x00000000)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_SRC_Y                                 (0x00000001)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_SRC_Z                                 (0x00000002)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_SRC_W                                 (0x00000003)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_CONST_A                               (0x00000004)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_CONST_B                               (0x00000005)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_NO_WRITE                              (0x00000006)
+#define NVA0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE                              17:16
+#define NVA0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_ONE                          (0x00000000)
+#define NVA0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_TWO                          (0x00000001)
+#define NVA0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_THREE                        (0x00000002)
+#define NVA0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_FOUR                         (0x00000003)
+#define NVA0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS                          21:20
+#define NVA0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_ONE                      (0x00000000)
+#define NVA0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_TWO                      (0x00000001)
+#define NVA0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_THREE                    (0x00000002)
+#define NVA0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_FOUR                     (0x00000003)
+#define NVA0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS                          25:24
+#define NVA0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_ONE                      (0x00000000)
+#define NVA0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO                      (0x00000001)
+#define NVA0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE                    (0x00000002)
+#define NVA0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR                     (0x00000003)
+#endif // _cla0b5_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc37a.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc37a.h
new file mode 100644 (file)
index 0000000..ded616f
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 1993-2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _clc37a__h_
+#define _clc37a__h_
+
+#define NVC37A_UPDATE                                                           (0x00000200)
+#define NVC37A_SET_CURSOR_HOT_SPOT_POINT_OUT(b)                                 (0x00000208 + (b)*0x00000004)
+#define NVC37A_SET_CURSOR_HOT_SPOT_POINT_OUT_X                                  15:0
+#define NVC37A_SET_CURSOR_HOT_SPOT_POINT_OUT_Y                                  31:16
+#endif // _clc37a_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc37b.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc37b.h
new file mode 100644 (file)
index 0000000..0f7323b
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 1993-2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _clC37b_h_
+#define _clC37b_h_
+
+// dma opcode instructions
+#define NVC37B_DMA
+#define NVC37B_DMA_OPCODE                                                        31:29
+#define NVC37B_DMA_OPCODE_METHOD                                            0x00000000
+#define NVC37B_DMA_OPCODE_JUMP                                              0x00000001
+#define NVC37B_DMA_OPCODE_NONINC_METHOD                                     0x00000002
+#define NVC37B_DMA_OPCODE_SET_SUBDEVICE_MASK                                0x00000003
+#define NVC37B_DMA_METHOD_COUNT                                                  27:18
+#define NVC37B_DMA_METHOD_OFFSET                                                  13:2
+#define NVC37B_DMA_DATA                                                           31:0
+#define NVC37B_DMA_DATA_NOP                                                 0x00000000
+#define NVC37B_DMA_JUMP_OFFSET                                                    11:2
+#define NVC37B_DMA_SET_SUBDEVICE_MASK_VALUE                                       11:0
+
+// class methods
+#define NVC37B_UPDATE                                                           (0x00000200)
+#define NVC37B_UPDATE_INTERLOCK_WITH_WINDOW                                     1:1
+#define NVC37B_UPDATE_INTERLOCK_WITH_WINDOW_DISABLE                             (0x00000000)
+#define NVC37B_UPDATE_INTERLOCK_WITH_WINDOW_ENABLE                              (0x00000001)
+#define NVC37B_SET_POINT_OUT(b)                                                 (0x00000208 + (b)*0x00000004)
+#define NVC37B_SET_POINT_OUT_X                                                  15:0
+#define NVC37B_SET_POINT_OUT_Y                                                  31:16
+#endif // _clC37b_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc37d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc37d.h
new file mode 100644 (file)
index 0000000..2b8c314
--- /dev/null
@@ -0,0 +1,567 @@
+/*
+ * Copyright (c) 1993-2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _clC37d_h_
+#define _clC37d_h_
+
+#define NV_DISP_NOTIFIER                                                             0x00000000
+#define NV_DISP_NOTIFIER_SIZEOF                                                      0x00000010
+#define NV_DISP_NOTIFIER__0                                                          0x00000000
+#define NV_DISP_NOTIFIER__0_PRESENT_COUNT                                            7:0
+#define NV_DISP_NOTIFIER__0_FIELD                                                    8:8
+#define NV_DISP_NOTIFIER__0_FLIP_TYPE                                                9:9
+#define NV_DISP_NOTIFIER__0_FLIP_TYPE_NON_TEARING                                    0x00000000
+#define NV_DISP_NOTIFIER__0_FLIP_TYPE_IMMEDIATE                                      0x00000001
+#define NV_DISP_NOTIFIER__0_R1                                                       15:10
+#define NV_DISP_NOTIFIER__0_R2                                                       23:16
+#define NV_DISP_NOTIFIER__0_R3                                                       29:24
+#define NV_DISP_NOTIFIER__0_STATUS                                                   31:30
+#define NV_DISP_NOTIFIER__0_STATUS_NOT_BEGUN                                         0x00000000
+#define NV_DISP_NOTIFIER__0_STATUS_BEGUN                                             0x00000001
+#define NV_DISP_NOTIFIER__0_STATUS_FINISHED                                          0x00000002
+#define NV_DISP_NOTIFIER__1                                                          0x00000001
+#define NV_DISP_NOTIFIER__1_R4                                                       31:0
+#define NV_DISP_NOTIFIER__2                                                          0x00000002
+#define NV_DISP_NOTIFIER__2_TIMESTAMP_LO                                             31:0
+#define NV_DISP_NOTIFIER__3                                                          0x00000003
+#define NV_DISP_NOTIFIER__3_TIMESTAMP_HI                                             31:0
+
+
+// class methods
+#define NVC37D_UPDATE                                                           (0x00000200)
+#define NVC37D_UPDATE_SPECIAL_HANDLING                                          21:20
+#define NVC37D_UPDATE_SPECIAL_HANDLING_NONE                                     (0x00000000)
+#define NVC37D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM                             (0x00000001)
+#define NVC37D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH                              (0x00000002)
+#define NVC37D_UPDATE_SPECIAL_HANDLING_REASON                                   19:12
+#define NVC37D_UPDATE_INHIBIT_INTERRUPTS                                        24:24
+#define NVC37D_UPDATE_INHIBIT_INTERRUPTS_FALSE                                  (0x00000000)
+#define NVC37D_UPDATE_INHIBIT_INTERRUPTS_TRUE                                   (0x00000001)
+#define NVC37D_SET_CONTEXT_DMA_NOTIFIER                                         (0x00000208)
+#define NVC37D_SET_CONTEXT_DMA_NOTIFIER_HANDLE                                  31:0
+#define NVC37D_SET_NOTIFIER_CONTROL                                             (0x0000020C)
+#define NVC37D_SET_NOTIFIER_CONTROL_MODE                                        0:0
+#define NVC37D_SET_NOTIFIER_CONTROL_MODE_WRITE                                  (0x00000000)
+#define NVC37D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN                           (0x00000001)
+#define NVC37D_SET_NOTIFIER_CONTROL_OFFSET                                      11:4
+#define NVC37D_SET_NOTIFIER_CONTROL_NOTIFY                                      12:12
+#define NVC37D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE                              (0x00000000)
+#define NVC37D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE                               (0x00000001)
+#define NVC37D_SET_INTERLOCK_FLAGS                                              (0x00000218)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i)                     ((i)+0):((i)+0)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1                8
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE                (0x00000000)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE                 (0x00000001)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0                       0:0
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE               (0x00000000)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE                (0x00000001)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1                       1:1
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE               (0x00000000)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE                (0x00000001)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2                       2:2
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE               (0x00000000)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE                (0x00000001)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3                       3:3
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE               (0x00000000)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE                (0x00000001)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4                       4:4
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE               (0x00000000)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE                (0x00000001)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5                       5:5
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE               (0x00000000)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE                (0x00000001)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6                       6:6
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE               (0x00000000)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE                (0x00000001)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7                       7:7
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE               (0x00000000)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE                (0x00000001)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE                          16:16
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE                  (0x00000000)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE                   (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS                                       (0x0000021C)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i)              ((i)+0):((i)+0)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1         32
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE         (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE          (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0                0:0
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE        (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE         (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1                1:1
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE        (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE         (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2                2:2
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE        (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE         (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3                3:3
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE        (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE         (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4                4:4
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE        (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE         (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5                5:5
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE        (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE         (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6                6:6
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE        (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE         (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7                7:7
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE        (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE         (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8                8:8
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE        (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE         (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9                9:9
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE        (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE         (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10               10:10
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE       (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE        (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11               11:11
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE       (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE        (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12               12:12
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE       (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE        (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13               13:13
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE       (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE        (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14               14:14
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE       (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE        (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15               15:15
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE       (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE        (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16               16:16
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE       (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE        (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17               17:17
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE       (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE        (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18               18:18
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE       (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE        (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19               19:19
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE       (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE        (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20               20:20
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE       (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE        (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21               21:21
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE       (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE        (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22               22:22
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE       (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE        (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23               23:23
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE       (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE        (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24               24:24
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE       (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE        (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25               25:25
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE       (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE        (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26               26:26
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE       (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE        (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27               27:27
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE       (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE        (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28               28:28
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE       (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE        (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29               29:29
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE       (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE        (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30               30:30
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE       (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE        (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31               31:31
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE       (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE        (0x00000001)
+
+#define NVC37D_SOR_SET_CONTROL(a)                                               (0x00000300 + (a)*0x00000020)
+#define NVC37D_SOR_SET_CONTROL_OWNER_MASK                                       7:0
+#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_NONE                                  (0x00000000)
+#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD0                                 (0x00000001)
+#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD1                                 (0x00000002)
+#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD2                                 (0x00000004)
+#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD3                                 (0x00000008)
+#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD4                                 (0x00000010)
+#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD5                                 (0x00000020)
+#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD6                                 (0x00000040)
+#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD7                                 (0x00000080)
+#define NVC37D_SOR_SET_CONTROL_PROTOCOL                                         11:8
+#define NVC37D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM                             (0x00000000)
+#define NVC37D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A                           (0x00000001)
+#define NVC37D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B                           (0x00000002)
+#define NVC37D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS                               (0x00000005)
+#define NVC37D_SOR_SET_CONTROL_PROTOCOL_DP_A                                    (0x00000008)
+#define NVC37D_SOR_SET_CONTROL_PROTOCOL_DP_B                                    (0x00000009)
+#define NVC37D_SOR_SET_CONTROL_PROTOCOL_DSI                                     (0x0000000A)
+#define NVC37D_SOR_SET_CONTROL_PROTOCOL_CUSTOM                                  (0x0000000F)
+#define NVC37D_SOR_SET_CONTROL_DE_SYNC_POLARITY                                 16:16
+#define NVC37D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE                   (0x00000000)
+#define NVC37D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE                   (0x00000001)
+#define NVC37D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE                             21:20
+#define NVC37D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF                         (0x00000000)
+#define NVC37D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2                          (0x00000001)
+#define NVC37D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4                          (0x00000002)
+
+#define NVC37D_WINDOW_SET_CONTROL(a)                                            (0x00001000 + (a)*0x00000080)
+#define NVC37D_WINDOW_SET_CONTROL_OWNER                                         3:0
+#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD(i)                                 (0x00000000 +(i))
+#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD__SIZE_1                            8
+#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD0                                   (0x00000000)
+#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD1                                   (0x00000001)
+#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD2                                   (0x00000002)
+#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD3                                   (0x00000003)
+#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD4                                   (0x00000004)
+#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD5                                   (0x00000005)
+#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD6                                   (0x00000006)
+#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD7                                   (0x00000007)
+#define NVC37D_WINDOW_SET_CONTROL_OWNER_NONE                                    (0x0000000F)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(a)                         (0x00001004 + (a)*0x00000080)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP             0:0
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE       (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE        (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP             1:1
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE       (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE        (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP             2:2
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE       (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE        (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP             3:3
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE       (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE        (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422              4:4
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE        (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE         (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420              5:5
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE        (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE         (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444              6:6
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE        (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE         (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420         7:7
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE   (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE    (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422         8:8
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE   (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE    (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R        9:9
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE  (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE   (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444         10:10
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE   (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE    (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420          11:11
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE    (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE     (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444          12:12
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE    (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE     (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420     13:13
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422     14:14
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R    15:15
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444     16:16
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(a)                 (0x00001008 + (a)*0x00000080)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP     0:0
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP     1:1
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP     2:2
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP     3:3
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422      4:4
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420      5:5
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444      6:6
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420  11:11
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444  12:12
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS(a)                                (0x00001010 + (a)*0x00000080)
+#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_MAX_PIXELS_FETCHED_PER_LINE       14:0
+#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_LUT                         17:16
+#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_LUT_USAGE_NONE              (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_LUT_USAGE_257               (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_LUT_USAGE_1025              (0x00000002)
+#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS                 22:20
+#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_2          (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_5          (0x00000004)
+#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED                 24:24
+#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE           (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE            (0x00000001)
+
+#define NVC37D_HEAD_SET_PROCAMP(a)                                              (0x00002000 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE                                     1:0
+#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB                                 (0x00000000)
+#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601                             (0x00000001)
+#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709                             (0x00000002)
+#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020                            (0x00000003)
+#define NVC37D_HEAD_SET_PROCAMP_CHROMA_LPF                                      3:3
+#define NVC37D_HEAD_SET_PROCAMP_CHROMA_LPF_DISABLE                              (0x00000000)
+#define NVC37D_HEAD_SET_PROCAMP_CHROMA_LPF_ENABLE                               (0x00000001)
+#define NVC37D_HEAD_SET_PROCAMP_SAT_COS                                         15:4
+#define NVC37D_HEAD_SET_PROCAMP_SAT_SINE                                        27:16
+#define NVC37D_HEAD_SET_PROCAMP_DYNAMIC_RANGE                                   28:28
+#define NVC37D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA                              (0x00000000)
+#define NVC37D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA                               (0x00000001)
+#define NVC37D_HEAD_SET_PROCAMP_RANGE_COMPRESSION                               29:29
+#define NVC37D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE                       (0x00000000)
+#define NVC37D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE                        (0x00000001)
+#define NVC37D_HEAD_SET_PROCAMP_BLACK_LEVEL                                     31:30
+#define NVC37D_HEAD_SET_PROCAMP_BLACK_LEVEL_AUTO                                (0x00000000)
+#define NVC37D_HEAD_SET_PROCAMP_BLACK_LEVEL_VIDEO                               (0x00000001)
+#define NVC37D_HEAD_SET_PROCAMP_BLACK_LEVEL_GRAPHICS                            (0x00000002)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a)                              (0x00002004 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE                        1:0
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER          (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER        (0x00000001)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER      (0x00000002)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY                  2:2
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE    (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE    (0x00000001)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY                  3:3
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE    (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE    (0x00000001)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH                     7:4
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422          (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444          (0x00000001)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422          (0x00000002)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422          (0x00000003)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444          (0x00000004)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444          (0x00000005)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422          (0x00000006)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444          (0x00000007)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444          (0x00000008)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE            24:24
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE    (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE     (0x00000001)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG                23:12
+#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a)                                (0x0000200C + (a)*0x00000400)
+#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ                             30:0
+#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001                    31:31
+#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE              (0x00000000)
+#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE               (0x00000001)
+#define NVC37D_HEAD_SET_DITHER_CONTROL(a)                                       (0x00002018 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_ENABLE                                   0:0
+#define NVC37D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE                           (0x00000000)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE                            (0x00000001)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS                                     5:4
+#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS_TO_6_BITS                           (0x00000000)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS_TO_8_BITS                           (0x00000001)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS_TO_10_BITS                          (0x00000002)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS_TO_12_BITS                          (0x00000003)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE                            2:2
+#define NVC37D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_DISABLE                    (0x00000000)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_ENABLE                     (0x00000001)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE                                     10:8
+#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC                     (0x00000000)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC                      (0x00000001)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2                         (0x00000002)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2                          (0x00000003)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL                            (0x00000004)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_PHASE                                    13:12
+#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a)                            (0x00002028 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ                         30:0
+#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001                31:31
+#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE          (0x00000000)
+#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE           (0x00000001)
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS(a)                                    (0x00002030 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR                                2:0
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_NONE                     (0x00000000)
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W32_H32                  (0x00000001)
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W64_H64                  (0x00000002)
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W128_H128                (0x00000003)
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W256_H256                (0x00000004)
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_LUT                            5:4
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE                 (0x00000000)
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257                  (0x00000001)
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025                 (0x00000002)
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED                     8:8
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE               (0x00000000)
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE                (0x00000001)
+#define NVC37D_HEAD_SET_VIEWPORT_SIZE_IN(a)                                     (0x0000204C + (a)*0x00000400)
+#define NVC37D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH                                  14:0
+#define NVC37D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT                                 30:16
+#define NVC37D_HEAD_SET_VIEWPORT_SIZE_OUT(a)                                    (0x00002058 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH                                 14:0
+#define NVC37D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT                                30:16
+#define NVC37D_HEAD_SET_RASTER_SIZE(a)                                          (0x00002064 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_RASTER_SIZE_WIDTH                                       14:0
+#define NVC37D_HEAD_SET_RASTER_SIZE_HEIGHT                                      30:16
+#define NVC37D_HEAD_SET_RASTER_SYNC_END(a)                                      (0x00002068 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_RASTER_SYNC_END_X                                       14:0
+#define NVC37D_HEAD_SET_RASTER_SYNC_END_Y                                       30:16
+#define NVC37D_HEAD_SET_RASTER_BLANK_END(a)                                     (0x0000206C + (a)*0x00000400)
+#define NVC37D_HEAD_SET_RASTER_BLANK_END_X                                      14:0
+#define NVC37D_HEAD_SET_RASTER_BLANK_END_Y                                      30:16
+#define NVC37D_HEAD_SET_RASTER_BLANK_START(a)                                   (0x00002070 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_RASTER_BLANK_START_X                                    14:0
+#define NVC37D_HEAD_SET_RASTER_BLANK_START_Y                                    30:16
+#define NVC37D_HEAD_SET_CONTEXT_DMA_CURSOR(a,b)                                 (0x00002088 + (a)*0x00000400 + (b)*0x00000004)
+#define NVC37D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE                               31:0
+#define NVC37D_HEAD_SET_OFFSET_CURSOR(a,b)                                      (0x00002090 + (a)*0x00000400 + (b)*0x00000004)
+#define NVC37D_HEAD_SET_OFFSET_CURSOR_ORIGIN                                    31:0
+#define NVC37D_HEAD_SET_CONTROL_CURSOR(a)                                       (0x0000209C + (a)*0x00000400)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_ENABLE                                   31:31
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE                           (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE                            (0x00000001)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_FORMAT                                   7:0
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5                          (0x000000E9)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8                          (0x000000CF)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE                                     9:8
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32                             (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64                             (0x00000001)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128                           (0x00000002)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256                           (0x00000003)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X                               19:12
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y                               27:20
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA                                 29:28
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA_NONE                            (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA_SRGB                            (0x00000001)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA_YUV8_10                         (0x00000002)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA_YUV12                           (0x00000003)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION(a)                           (0x000020A0 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_K1                           7:0
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT   11:8
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1 (0x00000002)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1_TIMES_SRC (0x00000005)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT 15:12
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_ZERO (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_K1 (0x00000002)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_NEG_K1_TIMES_SRC (0x00000007)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE                         16:16
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_BLEND                   (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_XOR                     (0x00000001)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT(a)                                   (0x000020A4 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_SIZE                                 1:0
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_SIZE_SIZE_257                        (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_SIZE_SIZE_1025                       (0x00000002)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE                                5:4
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE_UNITY                          (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE_XRBIAS                         (0x00000001)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE_XVYCC                          (0x00000002)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_OUTPUT_MODE                          9:8
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_OUTPUT_MODE_INDEX                    (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_OUTPUT_MODE_INTERPOLATE              (0x00000001)
+#define NVC37D_HEAD_SET_OFFSET_OUTPUT_LUT(a)                                    (0x000020A8 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_OFFSET_OUTPUT_LUT_ORIGIN                                31:0
+#define NVC37D_HEAD_SET_CONTEXT_DMA_OUTPUT_LUT(a)                               (0x000020AC + (a)*0x00000400)
+#define NVC37D_HEAD_SET_CONTEXT_DMA_OUTPUT_LUT_HANDLE                           31:0
+#define NVC37D_HEAD_SET_CONTEXT_DMA_CRC(a)                                      (0x00002180 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE                                  31:0
+#define NVC37D_HEAD_SET_CRC_CONTROL(a)                                          (0x00002184 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL                         4:0
+#define NVC37D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE                      8:8
+#define NVC37D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE                (0x00000000)
+#define NVC37D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE                 (0x00000001)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC                                 19:12
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_NONE                            (0x00000000)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF                              (0x00000030)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(i)                          (0x00000050 +(i))
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR__SIZE_1                     8
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR0                            (0x00000050)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR1                            (0x00000051)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR2                            (0x00000052)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR3                            (0x00000053)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR4                            (0x00000054)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR5                            (0x00000055)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR6                            (0x00000056)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR7                            (0x00000057)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR(i)                         (0x00000060 +(i))
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR__SIZE_1                    4
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR0                           (0x00000060)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR1                           (0x00000061)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR2                           (0x00000062)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR3                           (0x00000063)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC                               27:20
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_NONE                          (0x00000000)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SF                            (0x00000030)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR(i)                        (0x00000050 +(i))
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR__SIZE_1                   8
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR0                          (0x00000050)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR1                          (0x00000051)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR2                          (0x00000052)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR3                          (0x00000053)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR4                          (0x00000054)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR5                          (0x00000055)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR6                          (0x00000056)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR7                          (0x00000057)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR(i)                       (0x00000060 +(i))
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR__SIZE_1                  4
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR0                         (0x00000060)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR1                         (0x00000061)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR2                         (0x00000062)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR3                         (0x00000063)
+#define NVC37D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE                           9:9
+#define NVC37D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE                   (0x00000000)
+#define NVC37D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE                    (0x00000001)
+#endif // _clC37d_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc37e.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc37e.h
new file mode 100644 (file)
index 0000000..99e5a73
--- /dev/null
@@ -0,0 +1,394 @@
+/*
+ * Copyright (c) 1993-2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _clC37e_h_
+#define _clC37e_h_
+
+// class methods
+#define NVC37E_UPDATE                                                           (0x00000200)
+#define NVC37E_UPDATE_INTERLOCK_WITH_WIN_IMM                                    12:12
+#define NVC37E_UPDATE_INTERLOCK_WITH_WIN_IMM_DISABLE                            (0x00000000)
+#define NVC37E_UPDATE_INTERLOCK_WITH_WIN_IMM_ENABLE                             (0x00000001)
+#define NVC37E_SET_SEMAPHORE_CONTROL                                            (0x0000020C)
+#define NVC37E_SET_SEMAPHORE_CONTROL_OFFSET                                     7:0
+#define NVC37E_SET_SEMAPHORE_ACQUIRE                                            (0x00000210)
+#define NVC37E_SET_SEMAPHORE_ACQUIRE_VALUE                                      31:0
+#define NVC37E_SET_SEMAPHORE_RELEASE                                            (0x00000214)
+#define NVC37E_SET_SEMAPHORE_RELEASE_VALUE                                      31:0
+#define NVC37E_SET_CONTEXT_DMA_SEMAPHORE                                        (0x00000218)
+#define NVC37E_SET_CONTEXT_DMA_SEMAPHORE_HANDLE                                 31:0
+#define NVC37E_SET_CONTEXT_DMA_NOTIFIER                                         (0x0000021C)
+#define NVC37E_SET_CONTEXT_DMA_NOTIFIER_HANDLE                                  31:0
+#define NVC37E_SET_NOTIFIER_CONTROL                                             (0x00000220)
+#define NVC37E_SET_NOTIFIER_CONTROL_MODE                                        0:0
+#define NVC37E_SET_NOTIFIER_CONTROL_MODE_WRITE                                  (0x00000000)
+#define NVC37E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN                           (0x00000001)
+#define NVC37E_SET_NOTIFIER_CONTROL_OFFSET                                      11:4
+#define NVC37E_SET_SIZE                                                         (0x00000224)
+#define NVC37E_SET_SIZE_WIDTH                                                   15:0
+#define NVC37E_SET_SIZE_HEIGHT                                                  31:16
+#define NVC37E_SET_STORAGE                                                      (0x00000228)
+#define NVC37E_SET_STORAGE_BLOCK_HEIGHT                                         3:0
+#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_ONE_GOB                (0x00000000)
+#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_TWO_GOBS               (0x00000001)
+#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_FOUR_GOBS              (0x00000002)
+#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_EIGHT_GOBS             (0x00000003)
+#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_SIXTEEN_GOBS           (0x00000004)
+#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_THIRTYTWO_GOBS         (0x00000005)
+#define NVC37E_SET_STORAGE_MEMORY_LAYOUT                                        4:4
+#define NVC37E_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR                            (0x00000000)
+#define NVC37E_SET_STORAGE_MEMORY_LAYOUT_PITCH                                  (0x00000001)
+#define NVC37E_SET_PARAMS                                                       (0x0000022C)
+#define NVC37E_SET_PARAMS_FORMAT                                                7:0
+#define NVC37E_SET_PARAMS_FORMAT_I8                                             (0x0000001E)
+#define NVC37E_SET_PARAMS_FORMAT_R4G4B4A4                                       (0x0000002F)
+#define NVC37E_SET_PARAMS_FORMAT_R5G6B5                                         (0x000000E8)
+#define NVC37E_SET_PARAMS_FORMAT_A1R5G5B5                                       (0x000000E9)
+#define NVC37E_SET_PARAMS_FORMAT_R5G5B5A1                                       (0x0000002E)
+#define NVC37E_SET_PARAMS_FORMAT_A8R8G8B8                                       (0x000000CF)
+#define NVC37E_SET_PARAMS_FORMAT_X8R8G8B8                                       (0x000000E6)
+#define NVC37E_SET_PARAMS_FORMAT_A8B8G8R8                                       (0x000000D5)
+#define NVC37E_SET_PARAMS_FORMAT_X8B8G8R8                                       (0x000000F9)
+#define NVC37E_SET_PARAMS_FORMAT_A2R10G10B10                                    (0x000000DF)
+#define NVC37E_SET_PARAMS_FORMAT_A2B10G10R10                                    (0x000000D1)
+#define NVC37E_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS                          (0x00000022)
+#define NVC37E_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC                           (0x00000024)
+#define NVC37E_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS                         (0x00000023)
+#define NVC37E_SET_PARAMS_FORMAT_R16_G16_B16_A16                                (0x000000C6)
+#define NVC37E_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16                            (0x000000CA)
+#define NVC37E_SET_PARAMS_FORMAT_Y8_U8__Y8_V8_N422                              (0x00000028)
+#define NVC37E_SET_PARAMS_FORMAT_U8_Y8__V8_Y8_N422                              (0x00000029)
+#define NVC37E_SET_PARAMS_FORMAT_Y8___U8V8_N444                                 (0x00000035)
+#define NVC37E_SET_PARAMS_FORMAT_Y8___U8V8_N422                                 (0x00000036)
+#define NVC37E_SET_PARAMS_FORMAT_Y8___U8V8_N422R                                (0x00000037)
+#define NVC37E_SET_PARAMS_FORMAT_Y8___V8U8_N420                                 (0x00000038)
+#define NVC37E_SET_PARAMS_FORMAT_Y8___U8___V8_N444                              (0x0000003A)
+#define NVC37E_SET_PARAMS_FORMAT_Y8___U8___V8_N420                              (0x0000003B)
+#define NVC37E_SET_PARAMS_FORMAT_Y10___U10V10_N444                              (0x00000055)
+#define NVC37E_SET_PARAMS_FORMAT_Y10___U10V10_N422                              (0x00000056)
+#define NVC37E_SET_PARAMS_FORMAT_Y10___U10V10_N422R                             (0x00000057)
+#define NVC37E_SET_PARAMS_FORMAT_Y10___V10U10_N420                              (0x00000058)
+#define NVC37E_SET_PARAMS_FORMAT_Y10___U10___V10_N444                           (0x0000005A)
+#define NVC37E_SET_PARAMS_FORMAT_Y10___U10___V10_N420                           (0x0000005B)
+#define NVC37E_SET_PARAMS_FORMAT_Y12___U12V12_N444                              (0x00000075)
+#define NVC37E_SET_PARAMS_FORMAT_Y12___U12V12_N422                              (0x00000076)
+#define NVC37E_SET_PARAMS_FORMAT_Y12___U12V12_N422R                             (0x00000077)
+#define NVC37E_SET_PARAMS_FORMAT_Y12___V12U12_N420                              (0x00000078)
+#define NVC37E_SET_PARAMS_FORMAT_Y12___U12___V12_N444                           (0x0000007A)
+#define NVC37E_SET_PARAMS_FORMAT_Y12___U12___V12_N420                           (0x0000007B)
+#define NVC37E_SET_PARAMS_COLOR_SPACE                                           9:8
+#define NVC37E_SET_PARAMS_COLOR_SPACE_RGB                                       (0x00000000)
+#define NVC37E_SET_PARAMS_COLOR_SPACE_YUV_601                                   (0x00000001)
+#define NVC37E_SET_PARAMS_COLOR_SPACE_YUV_709                                   (0x00000002)
+#define NVC37E_SET_PARAMS_COLOR_SPACE_YUV_2020                                  (0x00000003)
+#define NVC37E_SET_PARAMS_INPUT_RANGE                                           13:12
+#define NVC37E_SET_PARAMS_INPUT_RANGE_BYPASS                                    (0x00000000)
+#define NVC37E_SET_PARAMS_INPUT_RANGE_LIMITED                                   (0x00000001)
+#define NVC37E_SET_PARAMS_INPUT_RANGE_FULL                                      (0x00000002)
+#define NVC37E_SET_PARAMS_UNDERREPLICATE                                        16:16
+#define NVC37E_SET_PARAMS_UNDERREPLICATE_DISABLE                                (0x00000000)
+#define NVC37E_SET_PARAMS_UNDERREPLICATE_ENABLE                                 (0x00000001)
+#define NVC37E_SET_PARAMS_DE_GAMMA                                              21:20
+#define NVC37E_SET_PARAMS_DE_GAMMA_NONE                                         (0x00000000)
+#define NVC37E_SET_PARAMS_DE_GAMMA_SRGB                                         (0x00000001)
+#define NVC37E_SET_PARAMS_DE_GAMMA_YUV8_10                                      (0x00000002)
+#define NVC37E_SET_PARAMS_DE_GAMMA_YUV12                                        (0x00000003)
+#define NVC37E_SET_PARAMS_CSC                                                   17:17
+#define NVC37E_SET_PARAMS_CSC_DISABLE                                           (0x00000000)
+#define NVC37E_SET_PARAMS_CSC_ENABLE                                            (0x00000001)
+#define NVC37E_SET_PARAMS_CLAMP_BEFORE_BLEND                                    18:18
+#define NVC37E_SET_PARAMS_CLAMP_BEFORE_BLEND_DISABLE                            (0x00000000)
+#define NVC37E_SET_PARAMS_CLAMP_BEFORE_BLEND_ENABLE                             (0x00000001)
+#define NVC37E_SET_PARAMS_SWAP_UV                                               19:19
+#define NVC37E_SET_PARAMS_SWAP_UV_DISABLE                                       (0x00000000)
+#define NVC37E_SET_PARAMS_SWAP_UV_ENABLE                                        (0x00000001)
+#define NVC37E_SET_PLANAR_STORAGE(b)                                            (0x00000230 + (b)*0x00000004)
+#define NVC37E_SET_PLANAR_STORAGE_PITCH                                         12:0
+#define NVC37E_SET_CONTEXT_DMA_ISO(b)                                           (0x00000240 + (b)*0x00000004)
+#define NVC37E_SET_CONTEXT_DMA_ISO_HANDLE                                       31:0
+#define NVC37E_SET_OFFSET(b)                                                    (0x00000260 + (b)*0x00000004)
+#define NVC37E_SET_OFFSET_ORIGIN                                                31:0
+#define NVC37E_SET_POINT_IN(b)                                                  (0x00000290 + (b)*0x00000004)
+#define NVC37E_SET_POINT_IN_X                                                   15:0
+#define NVC37E_SET_POINT_IN_Y                                                   31:16
+#define NVC37E_SET_SIZE_IN                                                      (0x00000298)
+#define NVC37E_SET_SIZE_IN_WIDTH                                                14:0
+#define NVC37E_SET_SIZE_IN_HEIGHT                                               30:16
+#define NVC37E_SET_SIZE_OUT                                                     (0x000002A4)
+#define NVC37E_SET_SIZE_OUT_WIDTH                                               14:0
+#define NVC37E_SET_SIZE_OUT_HEIGHT                                              30:16
+#define NVC37E_SET_CONTROL_INPUT_LUT                                            (0x000002B0)
+#define NVC37E_SET_CONTROL_INPUT_LUT_SIZE                                       1:0
+#define NVC37E_SET_CONTROL_INPUT_LUT_SIZE_SIZE_257                              (0x00000000)
+#define NVC37E_SET_CONTROL_INPUT_LUT_SIZE_SIZE_1025                             (0x00000002)
+#define NVC37E_SET_CONTROL_INPUT_LUT_RANGE                                      5:4
+#define NVC37E_SET_CONTROL_INPUT_LUT_RANGE_UNITY                                (0x00000000)
+#define NVC37E_SET_CONTROL_INPUT_LUT_RANGE_XRBIAS                               (0x00000001)
+#define NVC37E_SET_CONTROL_INPUT_LUT_RANGE_XVYCC                                (0x00000002)
+#define NVC37E_SET_CONTROL_INPUT_LUT_OUTPUT_MODE                                9:8
+#define NVC37E_SET_CONTROL_INPUT_LUT_OUTPUT_MODE_INDEX                          (0x00000000)
+#define NVC37E_SET_CONTROL_INPUT_LUT_OUTPUT_MODE_INTERPOLATE                    (0x00000001)
+#define NVC37E_SET_OFFSET_INPUT_LUT                                             (0x000002B4)
+#define NVC37E_SET_OFFSET_INPUT_LUT_ORIGIN                                      31:0
+#define NVC37E_SET_CONTEXT_DMA_INPUT_LUT                                        (0x000002B8)
+#define NVC37E_SET_CONTEXT_DMA_INPUT_LUT_HANDLE                                 31:0
+#define NVC37E_SET_CSC_RED2RED                                                  (0x000002BC)
+#define NVC37E_SET_CSC_RED2RED_COEFF                                            18:0
+#define NVC37E_SET_CSC_GREEN2RED                                                (0x000002C0)
+#define NVC37E_SET_CSC_GREEN2RED_COEFF                                          18:0
+#define NVC37E_SET_CSC_BLUE2RED                                                 (0x000002C4)
+#define NVC37E_SET_CSC_BLUE2RED_COEFF                                           18:0
+#define NVC37E_SET_CSC_CONSTANT2RED                                             (0x000002C8)
+#define NVC37E_SET_CSC_CONSTANT2RED_COEFF                                       18:0
+#define NVC37E_SET_CSC_RED2GREEN                                                (0x000002CC)
+#define NVC37E_SET_CSC_RED2GREEN_COEFF                                          18:0
+#define NVC37E_SET_CSC_GREEN2GREEN                                              (0x000002D0)
+#define NVC37E_SET_CSC_GREEN2GREEN_COEFF                                        18:0
+#define NVC37E_SET_CSC_BLUE2GREEN                                               (0x000002D4)
+#define NVC37E_SET_CSC_BLUE2GREEN_COEFF                                         18:0
+#define NVC37E_SET_CSC_CONSTANT2GREEN                                           (0x000002D8)
+#define NVC37E_SET_CSC_CONSTANT2GREEN_COEFF                                     18:0
+#define NVC37E_SET_CSC_RED2BLUE                                                 (0x000002DC)
+#define NVC37E_SET_CSC_RED2BLUE_COEFF                                           18:0
+#define NVC37E_SET_CSC_GREEN2BLUE                                               (0x000002E0)
+#define NVC37E_SET_CSC_GREEN2BLUE_COEFF                                         18:0
+#define NVC37E_SET_CSC_BLUE2BLUE                                                (0x000002E4)
+#define NVC37E_SET_CSC_BLUE2BLUE_COEFF                                          18:0
+#define NVC37E_SET_CSC_CONSTANT2BLUE                                            (0x000002E8)
+#define NVC37E_SET_CSC_CONSTANT2BLUE_COEFF                                      18:0
+#define NVC37E_SET_COMPOSITION_CONTROL                                          (0x000002EC)
+#define NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT                         1:0
+#define NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DISABLE                 (0x00000000)
+#define NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_SRC                     (0x00000001)
+#define NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DST                     (0x00000002)
+#define NVC37E_SET_COMPOSITION_CONTROL_DEPTH                                    11:4
+#define NVC37E_SET_COMPOSITION_CONSTANT_ALPHA                                   (0x000002F0)
+#define NVC37E_SET_COMPOSITION_CONSTANT_ALPHA_K1                                7:0
+#define NVC37E_SET_COMPOSITION_CONSTANT_ALPHA_K2                                15:8
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT                                    (0x000002F4)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT      3:0
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ONE  (0x00000001)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1   (0x00000002)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_SRC (0x00000005)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT   7:4
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_SRC (0x00000005)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT      11:8
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ONE  (0x00000001)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1   (0x00000002)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K2   (0x00000003)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1 (0x00000004)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT   15:12
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K2 (0x00000003)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1 (0x00000004)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT      19:16
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K1   (0x00000002)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K2   (0x00000003)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT   23:20
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K1 (0x00000002)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT      27:24
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ONE  (0x00000001)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_K2   (0x00000003)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT   31:28
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ONE (0x00000001)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007)
+#define NVC37E_SET_KEY_ALPHA                                                    (0x000002F8)
+#define NVC37E_SET_KEY_ALPHA_MIN                                                15:0
+#define NVC37E_SET_KEY_ALPHA_MAX                                                31:16
+#define NVC37E_SET_KEY_RED_CR                                                   (0x000002FC)
+#define NVC37E_SET_KEY_RED_CR_MIN                                               15:0
+#define NVC37E_SET_KEY_RED_CR_MAX                                               31:16
+#define NVC37E_SET_KEY_GREEN_Y                                                  (0x00000300)
+#define NVC37E_SET_KEY_GREEN_Y_MIN                                              15:0
+#define NVC37E_SET_KEY_GREEN_Y_MAX                                              31:16
+#define NVC37E_SET_KEY_BLUE_CB                                                  (0x00000304)
+#define NVC37E_SET_KEY_BLUE_CB_MIN                                              15:0
+#define NVC37E_SET_KEY_BLUE_CB_MAX                                              31:16
+#define NVC37E_SET_PRESENT_CONTROL                                              (0x00000308)
+#define NVC37E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL                         3:0
+#define NVC37E_SET_PRESENT_CONTROL_BEGIN_MODE                                   6:4
+#define NVC37E_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING                       (0x00000000)
+#define NVC37E_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE                         (0x00000001)
+#define NVC37E_SET_PRESENT_CONTROL_TIMESTAMP_MODE                               8:8
+#define NVC37E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE                       (0x00000000)
+#define NVC37E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE                        (0x00000001)
+#define NVC37E_SET_INTERLOCK_FLAGS                                              (0x00000370)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE                          0:0
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE                  (0x00000000)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE                   (0x00000001)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i)                     ((i)+1):((i)+1)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1                8
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE                (0x00000000)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE                 (0x00000001)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0                       1:1
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE               (0x00000000)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE                (0x00000001)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1                       2:2
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE               (0x00000000)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE                (0x00000001)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2                       3:3
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE               (0x00000000)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE                (0x00000001)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3                       4:4
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE               (0x00000000)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE                (0x00000001)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4                       5:5
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE               (0x00000000)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE                (0x00000001)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5                       6:6
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE               (0x00000000)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE                (0x00000001)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6                       7:7
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE               (0x00000000)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE                (0x00000001)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7                       8:8
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE               (0x00000000)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE                (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS                                       (0x00000374)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i)              ((i)+0):((i)+0)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1         32
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE         (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE          (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0                0:0
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE        (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE         (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1                1:1
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE        (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE         (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2                2:2
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE        (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE         (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3                3:3
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE        (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE         (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4                4:4
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE        (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE         (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5                5:5
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE        (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE         (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6                6:6
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE        (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE         (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7                7:7
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE        (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE         (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8                8:8
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE        (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE         (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9                9:9
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE        (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE         (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10               10:10
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE       (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE        (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11               11:11
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE       (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE        (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12               12:12
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE       (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE        (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13               13:13
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE       (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE        (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14               14:14
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE       (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE        (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15               15:15
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE       (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE        (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16               16:16
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE       (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE        (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17               17:17
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE       (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE        (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18               18:18
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE       (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE        (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19               19:19
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE       (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE        (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20               20:20
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE       (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE        (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21               21:21
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE       (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE        (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22               22:22
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE       (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE        (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23               23:23
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE       (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE        (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24               24:24
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE       (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE        (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25               25:25
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE       (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE        (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26               26:26
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE       (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE        (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27               27:27
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE       (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE        (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28               28:28
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE       (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE        (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29               29:29
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE       (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE        (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30               30:30
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE       (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE        (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31               31:31
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE       (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE        (0x00000001)
+#endif // _clC37e_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc57d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc57d.h
new file mode 100644 (file)
index 0000000..d83ac81
--- /dev/null
@@ -0,0 +1,286 @@
+/*
+ * Copyright (c) 1993-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _clC57d_h_
+#define _clC57d_h_
+
+// class methods
+#define NVC57D_SET_CONTEXT_DMA_NOTIFIER                                         (0x00000208)
+#define NVC57D_SET_CONTEXT_DMA_NOTIFIER_HANDLE                                  31:0
+
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(a)                         (0x00001004 + (a)*0x00000080)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP             0:0
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE       (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE        (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP             1:1
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE       (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE        (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP             2:2
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE       (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE        (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP             3:3
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE       (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE        (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422              4:4
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE        (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE         (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420              5:5
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE        (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE         (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444              6:6
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE        (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE         (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420         7:7
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE   (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE    (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422         8:8
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE   (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE    (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R        9:9
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE  (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE   (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444         10:10
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE   (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE    (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420          11:11
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE    (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE     (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444          12:12
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE    (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE     (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420     13:13
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422     14:14
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R    15:15
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444     16:16
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(a)                 (0x00001008 + (a)*0x00000080)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP     0:0
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP     1:1
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP     2:2
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP     3:3
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422      4:4
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420      5:5
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444      6:6
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420  11:11
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444  12:12
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS(a)                                (0x00001010 + (a)*0x00000080)
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_MAX_PIXELS_FETCHED_PER_LINE       14:0
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED                      16:16
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_FALSE                (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_TRUE                 (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED                   28:28
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_FALSE             (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_TRUE              (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS                 22:20
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_2          (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_5          (0x00000004)
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED                 24:24
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE           (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE            (0x00000001)
+
+#define NVC57D_HEAD_SET_PROCAMP(a)                                              (0x00002000 + (a)*0x00000400)
+#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE                                     1:0
+#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB                                 (0x00000000)
+#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601                             (0x00000001)
+#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709                             (0x00000002)
+#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020                            (0x00000003)
+#define NVC57D_HEAD_SET_PROCAMP_CHROMA_LPF                                      3:3
+#define NVC57D_HEAD_SET_PROCAMP_CHROMA_LPF_DISABLE                              (0x00000000)
+#define NVC57D_HEAD_SET_PROCAMP_CHROMA_LPF_ENABLE                               (0x00000001)
+#define NVC57D_HEAD_SET_PROCAMP_DYNAMIC_RANGE                                   28:28
+#define NVC57D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA                              (0x00000000)
+#define NVC57D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA                               (0x00000001)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a)                              (0x00002004 + (a)*0x00000400)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE                        1:0
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER          (0x00000000)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER        (0x00000001)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER      (0x00000002)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY                  2:2
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE    (0x00000000)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE    (0x00000001)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY                  3:3
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE    (0x00000000)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE    (0x00000001)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH                     7:4
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422          (0x00000000)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444          (0x00000001)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422          (0x00000002)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422          (0x00000003)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444          (0x00000004)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444          (0x00000005)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422          (0x00000006)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444          (0x00000007)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444          (0x00000008)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE            24:24
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE    (0x00000000)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE     (0x00000001)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG                23:12
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN                  31:26
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN0             (0x00000000)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN1             (0x00000001)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN2             (0x00000002)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN3             (0x00000003)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN4             (0x00000004)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN5             (0x00000005)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN6             (0x00000006)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN7             (0x00000007)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN8             (0x00000008)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN9             (0x00000009)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN10            (0x0000000A)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN11            (0x0000000B)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN12            (0x0000000C)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN13            (0x0000000D)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN14            (0x0000000E)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN15            (0x0000000F)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN16            (0x00000010)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN17            (0x00000011)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN18            (0x00000012)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN19            (0x00000013)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN20            (0x00000014)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN21            (0x00000015)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN22            (0x00000016)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN23            (0x00000017)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN24            (0x00000018)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN25            (0x00000019)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN26            (0x0000001A)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN27            (0x0000001B)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN28            (0x0000001C)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN29            (0x0000001D)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN30            (0x0000001E)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN31            (0x0000001F)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_NONE             (0x0000003F)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a)                                (0x0000200C + (a)*0x00000400)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ                             30:0
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001                    31:31
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE              (0x00000000)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE               (0x00000001)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a)                            (0x0000201C + (a)*0x00000400)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER                    0:0
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE              (0x00000000)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE               (0x00000001)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING                       4:4
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_DISABLE               (0x00000000)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_ENABLE                (0x00000001)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE                  9:8
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK           (0x00000000)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK           (0x00000001)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a)                            (0x00002028 + (a)*0x00000400)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ                         30:0
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001                31:31
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE          (0x00000000)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE           (0x00000001)
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS(a)                                    (0x00002030 + (a)*0x00000400)
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR                                2:0
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_NONE                     (0x00000000)
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W32_H32                  (0x00000001)
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W64_H64                  (0x00000002)
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W128_H128                (0x00000003)
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W256_H256                (0x00000004)
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED                          4:4
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_FALSE                    (0x00000000)
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_TRUE                     (0x00000001)
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS                    14:12
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_2             (0x00000001)
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_5             (0x00000004)
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED                     8:8
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE               (0x00000000)
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE                (0x00000001)
+#define NVC57D_HEAD_SET_RASTER_SIZE(a)                                          (0x00002064 + (a)*0x00000400)
+#define NVC57D_HEAD_SET_RASTER_SIZE_WIDTH                                       14:0
+#define NVC57D_HEAD_SET_RASTER_SIZE_HEIGHT                                      30:16
+#define NVC57D_HEAD_SET_RASTER_SYNC_END(a)                                      (0x00002068 + (a)*0x00000400)
+#define NVC57D_HEAD_SET_RASTER_SYNC_END_X                                       14:0
+#define NVC57D_HEAD_SET_RASTER_SYNC_END_Y                                       30:16
+#define NVC57D_HEAD_SET_RASTER_BLANK_END(a)                                     (0x0000206C + (a)*0x00000400)
+#define NVC57D_HEAD_SET_RASTER_BLANK_END_X                                      14:0
+#define NVC57D_HEAD_SET_RASTER_BLANK_END_Y                                      30:16
+#define NVC57D_HEAD_SET_RASTER_BLANK_START(a)                                   (0x00002070 + (a)*0x00000400)
+#define NVC57D_HEAD_SET_RASTER_BLANK_START_X                                    14:0
+#define NVC57D_HEAD_SET_RASTER_BLANK_START_Y                                    30:16
+#define NVC57D_HEAD_SET_OLUT_CONTROL(a)                                         (0x00002280 + (a)*0x00000400)
+#define NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE                                0:0
+#define NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_DISABLE                        (0x00000000)
+#define NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_ENABLE                         (0x00000001)
+#define NVC57D_HEAD_SET_OLUT_CONTROL_MIRROR                                     1:1
+#define NVC57D_HEAD_SET_OLUT_CONTROL_MIRROR_DISABLE                             (0x00000000)
+#define NVC57D_HEAD_SET_OLUT_CONTROL_MIRROR_ENABLE                              (0x00000001)
+#define NVC57D_HEAD_SET_OLUT_CONTROL_MODE                                       3:2
+#define NVC57D_HEAD_SET_OLUT_CONTROL_MODE_SEGMENTED                             (0x00000000)
+#define NVC57D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT8                               (0x00000001)
+#define NVC57D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT10                              (0x00000002)
+#define NVC57D_HEAD_SET_OLUT_CONTROL_SIZE                                       18:8
+#define NVC57D_HEAD_SET_OLUT_FP_NORM_SCALE(a)                                   (0x00002284 + (a)*0x00000400)
+#define NVC57D_HEAD_SET_OLUT_FP_NORM_SCALE_VALUE                                31:0
+#define NVC57D_HEAD_SET_CONTEXT_DMA_OLUT(a)                                     (0x00002288 + (a)*0x00000400)
+#define NVC57D_HEAD_SET_CONTEXT_DMA_OLUT_HANDLE                                 31:0
+#define NVC57D_HEAD_SET_OFFSET_OLUT(a)                                          (0x0000228C + (a)*0x00000400)
+#define NVC57D_HEAD_SET_OFFSET_OLUT_ORIGIN                                      31:0
+#endif // _clC57d_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc57e.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc57e.h
new file mode 100644 (file)
index 0000000..850d16f
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 1993-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _clC57e_h_
+#define _clC57e_h_
+
+// class methods
+#define NVC57E_SET_SIZE                                                         (0x00000224)
+#define NVC57E_SET_SIZE_WIDTH                                                   15:0
+#define NVC57E_SET_SIZE_HEIGHT                                                  31:16
+#define NVC57E_SET_STORAGE                                                      (0x00000228)
+#define NVC57E_SET_STORAGE_BLOCK_HEIGHT                                         3:0
+#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_ONE_GOB                (0x00000000)
+#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_TWO_GOBS               (0x00000001)
+#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_FOUR_GOBS              (0x00000002)
+#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_EIGHT_GOBS             (0x00000003)
+#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_SIXTEEN_GOBS           (0x00000004)
+#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_THIRTYTWO_GOBS         (0x00000005)
+#define NVC57E_SET_STORAGE_MEMORY_LAYOUT                                        4:4
+#define NVC57E_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR                            (0x00000000)
+#define NVC57E_SET_STORAGE_MEMORY_LAYOUT_PITCH                                  (0x00000001)
+#define NVC57E_SET_PARAMS                                                       (0x0000022C)
+#define NVC57E_SET_PARAMS_FORMAT                                                7:0
+#define NVC57E_SET_PARAMS_FORMAT_I8                                             (0x0000001E)
+#define NVC57E_SET_PARAMS_FORMAT_R4G4B4A4                                       (0x0000002F)
+#define NVC57E_SET_PARAMS_FORMAT_R5G6B5                                         (0x000000E8)
+#define NVC57E_SET_PARAMS_FORMAT_A1R5G5B5                                       (0x000000E9)
+#define NVC57E_SET_PARAMS_FORMAT_R5G5B5A1                                       (0x0000002E)
+#define NVC57E_SET_PARAMS_FORMAT_A8R8G8B8                                       (0x000000CF)
+#define NVC57E_SET_PARAMS_FORMAT_X8R8G8B8                                       (0x000000E6)
+#define NVC57E_SET_PARAMS_FORMAT_A8B8G8R8                                       (0x000000D5)
+#define NVC57E_SET_PARAMS_FORMAT_X8B8G8R8                                       (0x000000F9)
+#define NVC57E_SET_PARAMS_FORMAT_A2R10G10B10                                    (0x000000DF)
+#define NVC57E_SET_PARAMS_FORMAT_A2B10G10R10                                    (0x000000D1)
+#define NVC57E_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS                          (0x00000022)
+#define NVC57E_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC                           (0x00000024)
+#define NVC57E_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS                         (0x00000023)
+#define NVC57E_SET_PARAMS_FORMAT_R16_G16_B16_A16                                (0x000000C6)
+#define NVC57E_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16                            (0x000000CA)
+#define NVC57E_SET_PARAMS_FORMAT_Y8_U8__Y8_V8_N422                              (0x00000028)
+#define NVC57E_SET_PARAMS_FORMAT_U8_Y8__V8_Y8_N422                              (0x00000029)
+#define NVC57E_SET_PARAMS_FORMAT_Y8___U8V8_N444                                 (0x00000035)
+#define NVC57E_SET_PARAMS_FORMAT_Y8___U8V8_N422                                 (0x00000036)
+#define NVC57E_SET_PARAMS_FORMAT_Y8___V8U8_N420                                 (0x00000038)
+#define NVC57E_SET_PARAMS_FORMAT_Y10___U10V10_N444                              (0x00000055)
+#define NVC57E_SET_PARAMS_FORMAT_Y10___U10V10_N422                              (0x00000056)
+#define NVC57E_SET_PARAMS_FORMAT_Y10___V10U10_N420                              (0x00000058)
+#define NVC57E_SET_PARAMS_FORMAT_Y12___U12V12_N444                              (0x00000075)
+#define NVC57E_SET_PARAMS_FORMAT_Y12___U12V12_N422                              (0x00000076)
+#define NVC57E_SET_PARAMS_FORMAT_Y12___V12U12_N420                              (0x00000078)
+#define NVC57E_SET_PARAMS_CLAMP_BEFORE_BLEND                                    18:18
+#define NVC57E_SET_PARAMS_CLAMP_BEFORE_BLEND_DISABLE                            (0x00000000)
+#define NVC57E_SET_PARAMS_CLAMP_BEFORE_BLEND_ENABLE                             (0x00000001)
+#define NVC57E_SET_PARAMS_SWAP_UV                                               19:19
+#define NVC57E_SET_PARAMS_SWAP_UV_DISABLE                                       (0x00000000)
+#define NVC57E_SET_PARAMS_SWAP_UV_ENABLE                                        (0x00000001)
+#define NVC57E_SET_PARAMS_FMT_ROUNDING_MODE                                     22:22
+#define NVC57E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_TO_NEAREST                    (0x00000000)
+#define NVC57E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_DOWN                          (0x00000001)
+#define NVC57E_SET_PLANAR_STORAGE(b)                                            (0x00000230 + (b)*0x00000004)
+#define NVC57E_SET_PLANAR_STORAGE_PITCH                                         12:0
+#define NVC57E_SET_CONTEXT_DMA_ISO(b)                                           (0x00000240 + (b)*0x00000004)
+#define NVC57E_SET_CONTEXT_DMA_ISO_HANDLE                                       31:0
+#define NVC57E_SET_OFFSET(b)                                                    (0x00000260 + (b)*0x00000004)
+#define NVC57E_SET_OFFSET_ORIGIN                                                31:0
+#define NVC57E_SET_POINT_IN(b)                                                  (0x00000290 + (b)*0x00000004)
+#define NVC57E_SET_POINT_IN_X                                                   15:0
+#define NVC57E_SET_POINT_IN_Y                                                   31:16
+#define NVC57E_SET_SIZE_IN                                                      (0x00000298)
+#define NVC57E_SET_SIZE_IN_WIDTH                                                15:0
+#define NVC57E_SET_SIZE_IN_HEIGHT                                               31:16
+#define NVC57E_SET_SIZE_OUT                                                     (0x000002A4)
+#define NVC57E_SET_SIZE_OUT_WIDTH                                               15:0
+#define NVC57E_SET_SIZE_OUT_HEIGHT                                              31:16
+#define NVC57E_SET_PRESENT_CONTROL                                              (0x00000308)
+#define NVC57E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL                         3:0
+#define NVC57E_SET_PRESENT_CONTROL_BEGIN_MODE                                   6:4
+#define NVC57E_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING                       (0x00000000)
+#define NVC57E_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE                         (0x00000001)
+#define NVC57E_SET_PRESENT_CONTROL_TIMESTAMP_MODE                               8:8
+#define NVC57E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE                       (0x00000000)
+#define NVC57E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE                        (0x00000001)
+#define NVC57E_SET_FMT_COEFFICIENT_C00                                          (0x00000400)
+#define NVC57E_SET_FMT_COEFFICIENT_C00_VALUE                                    20:0
+#define NVC57E_SET_FMT_COEFFICIENT_C01                                          (0x00000404)
+#define NVC57E_SET_FMT_COEFFICIENT_C01_VALUE                                    20:0
+#define NVC57E_SET_FMT_COEFFICIENT_C02                                          (0x00000408)
+#define NVC57E_SET_FMT_COEFFICIENT_C02_VALUE                                    20:0
+#define NVC57E_SET_FMT_COEFFICIENT_C03                                          (0x0000040C)
+#define NVC57E_SET_FMT_COEFFICIENT_C03_VALUE                                    20:0
+#define NVC57E_SET_FMT_COEFFICIENT_C10                                          (0x00000410)
+#define NVC57E_SET_FMT_COEFFICIENT_C10_VALUE                                    20:0
+#define NVC57E_SET_FMT_COEFFICIENT_C11                                          (0x00000414)
+#define NVC57E_SET_FMT_COEFFICIENT_C11_VALUE                                    20:0
+#define NVC57E_SET_FMT_COEFFICIENT_C12                                          (0x00000418)
+#define NVC57E_SET_FMT_COEFFICIENT_C12_VALUE                                    20:0
+#define NVC57E_SET_FMT_COEFFICIENT_C13                                          (0x0000041C)
+#define NVC57E_SET_FMT_COEFFICIENT_C13_VALUE                                    20:0
+#define NVC57E_SET_FMT_COEFFICIENT_C20                                          (0x00000420)
+#define NVC57E_SET_FMT_COEFFICIENT_C20_VALUE                                    20:0
+#define NVC57E_SET_FMT_COEFFICIENT_C21                                          (0x00000424)
+#define NVC57E_SET_FMT_COEFFICIENT_C21_VALUE                                    20:0
+#define NVC57E_SET_FMT_COEFFICIENT_C22                                          (0x00000428)
+#define NVC57E_SET_FMT_COEFFICIENT_C22_VALUE                                    20:0
+#define NVC57E_SET_FMT_COEFFICIENT_C23                                          (0x0000042C)
+#define NVC57E_SET_FMT_COEFFICIENT_C23_VALUE                                    20:0
+#define NVC57E_SET_ILUT_CONTROL                                                 (0x00000440)
+#define NVC57E_SET_ILUT_CONTROL_INTERPOLATE                                     0:0
+#define NVC57E_SET_ILUT_CONTROL_INTERPOLATE_DISABLE                             (0x00000000)
+#define NVC57E_SET_ILUT_CONTROL_INTERPOLATE_ENABLE                              (0x00000001)
+#define NVC57E_SET_ILUT_CONTROL_MIRROR                                          1:1
+#define NVC57E_SET_ILUT_CONTROL_MIRROR_DISABLE                                  (0x00000000)
+#define NVC57E_SET_ILUT_CONTROL_MIRROR_ENABLE                                   (0x00000001)
+#define NVC57E_SET_ILUT_CONTROL_MODE                                            3:2
+#define NVC57E_SET_ILUT_CONTROL_MODE_SEGMENTED                                  (0x00000000)
+#define NVC57E_SET_ILUT_CONTROL_MODE_DIRECT8                                    (0x00000001)
+#define NVC57E_SET_ILUT_CONTROL_MODE_DIRECT10                                   (0x00000002)
+#define NVC57E_SET_ILUT_CONTROL_SIZE                                            18:8
+#define NVC57E_SET_CONTEXT_DMA_ILUT                                             (0x00000444)
+#define NVC57E_SET_CONTEXT_DMA_ILUT_HANDLE                                      31:0
+#define NVC57E_SET_OFFSET_ILUT                                                  (0x00000448)
+#define NVC57E_SET_OFFSET_ILUT_ORIGIN                                           31:0
+#endif // _clC57e_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/drf.h b/drivers/gpu/drm/nouveau/include/nvhw/drf.h
new file mode 100644 (file)
index 0000000..bd0fc41
--- /dev/null
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __NVHW_DRF_H__
+#define __NVHW_DRF_H__
+
+/* Helpers common to all DRF accessors. */
+#define DRF_LO(drf)    (0 ? drf)
+#define DRF_HI(drf)    (1 ? drf)
+#define DRF_BITS(drf)  (DRF_HI(drf) - DRF_LO(drf) + 1)
+#define DRF_MASK(drf)  (~0ULL >> (64 - DRF_BITS(drf)))
+#define DRF_SMASK(drf) (DRF_MASK(drf) << DRF_LO(drf))
+
+/* Helpers for DRF-MW accessors. */
+#define DRF_MX_MW(drf)      drf
+#define DRF_MX(drf)         DRF_MX_##drf
+#define DRF_MW(drf)         DRF_MX(drf)
+#define DRF_MW_SPANS(o,drf) (DRF_LW_IDX((o),drf) != DRF_HW_IDX((o),drf))
+#define DRF_MW_SIZE(o)      (sizeof((o)[0]) * 8)
+
+#define DRF_LW_IDX(o,drf)   (DRF_LO(DRF_MW(drf)) / DRF_MW_SIZE(o))
+#define DRF_LW_LO(o,drf)    (DRF_LO(DRF_MW(drf)) % DRF_MW_SIZE(o))
+#define DRF_LW_HI(o,drf)    (DRF_MW_SPANS((o),drf) ? (DRF_MW_SIZE(o) - 1) : DRF_HW_HI((o),drf))
+#define DRF_LW_BITS(o,drf)  (DRF_LW_HI((o),drf) - DRF_LW_LO((o),drf) + 1)
+#define DRF_LW_MASK(o,drf)  (~0ULL >> (64 - DRF_LW_BITS((o),drf)))
+#define DRF_LW_SMASK(o,drf) (DRF_LW_MASK((o),drf) << DRF_LW_LO((o),drf))
+#define DRF_LW_GET(o,drf)   (((o)[DRF_LW_IDX((o),drf)] >> DRF_LW_LO((o),drf)) & DRF_LW_MASK((o),drf))
+#define DRF_LW_VAL(o,drf,v) (((v) & DRF_LW_MASK((o),drf)) << DRF_LW_LO((o),drf))
+#define DRF_LW_CLR(o,drf)   ((o)[DRF_LW_IDX((o),drf)] & ~DRF_LW_SMASK((o),drf))
+#define DRF_LW_SET(o,drf,v) (DRF_LW_CLR((o),drf) | DRF_LW_VAL((o),drf,(v)))
+
+#define DRF_HW_IDX(o,drf)   (DRF_HI(DRF_MW(drf)) / DRF_MW_SIZE(o))
+#define DRF_HW_LO(o,drf)    0
+#define DRF_HW_HI(o,drf)    (DRF_HI(DRF_MW(drf)) % DRF_MW_SIZE(o))
+#define DRF_HW_BITS(o,drf)  (DRF_HW_HI((o),drf) - DRF_HW_LO((o),drf) + 1)
+#define DRF_HW_MASK(o,drf)  (~0ULL >> (64 - DRF_HW_BITS((o),drf)))
+#define DRF_HW_SMASK(o,drf) (DRF_HW_MASK((o),drf) << DRF_HW_LO((o),drf))
+#define DRF_HW_GET(o,drf)   ((o)[DRF_HW_IDX(o,drf)] & DRF_HW_SMASK((o),drf))
+#define DRF_HW_VAL(o,drf,v) (((long long)(v) >> DRF_LW_BITS((o),drf)) & DRF_HW_SMASK((o),drf))
+#define DRF_HW_CLR(o,drf)   ((o)[DRF_HW_IDX((o),drf)] & ~DRF_HW_SMASK((o),drf))
+#define DRF_HW_SET(o,drf,v) (DRF_HW_CLR((o),drf) | DRF_HW_VAL((o),drf,(v)))
+
+/* DRF accessors. */
+#define NVVAL_X(drf,v) (((v) & DRF_MASK(drf)) << DRF_LO(drf))
+#define NVVAL_N(X,d,r,f,  v) NVVAL_X(d##_##r##_##f, (v))
+#define NVVAL_I(X,d,r,f,i,v) NVVAL_X(d##_##r##_##f(i), (v))
+#define NVVAL_(X,_1,_2,_3,_4,_5,IMPL,...) IMPL
+#define NVVAL(A...) NVVAL_(X, ##A, NVVAL_I, NVVAL_N)(X, ##A)
+
+#define NVDEF_N(X,d,r,f,  v) NVVAL_X(d##_##r##_##f, d##_##r##_##f##_##v)
+#define NVDEF_I(X,d,r,f,i,v) NVVAL_X(d##_##r##_##f(i), d##_##r##_##f##_##v)
+#define NVDEF_(X,_1,_2,_3,_4,_5,IMPL,...) IMPL
+#define NVDEF(A...) NVDEF_(X, ##A, NVDEF_I, NVDEF_N)(X, ##A)
+
+#define NVVAL_GET_X(o,drf) (((o) >> DRF_LO(drf)) & DRF_MASK(drf))
+#define NVVAL_GET_N(X,o,d,r,f  ) NVVAL_GET_X(o, d##_##r##_##f)
+#define NVVAL_GET_I(X,o,d,r,f,i) NVVAL_GET_X(o, d##_##r##_##f(i))
+#define NVVAL_GET_(X,_1,_2,_3,_4,_5,IMPL,...) IMPL
+#define NVVAL_GET(A...) NVVAL_GET_(X, ##A, NVVAL_GET_I, NVVAL_GET_N)(X, ##A)
+
+#define NVVAL_TEST_X(o,drf,cmp,drfv) (NVVAL_GET_X((o), drf) cmp drfv)
+#define NVVAL_TEST_N(X,o,d,r,f,  cmp,v) NVVAL_TEST_X(o, d##_##r##_##f   , cmp, (v))
+#define NVVAL_TEST_I(X,o,d,r,f,i,cmp,v) NVVAL_TEST_X(o, d##_##r##_##f(i), cmp, (v))
+#define NVVAL_TEST_(X,_1,_2,_3,_4,_5,_6,_7,IMPL,...) IMPL
+#define NVVAL_TEST(A...) NVVAL_TEST_(X, ##A, NVVAL_TEST_I, NVVAL_TEST_N)(X, ##A)
+
+#define NVDEF_TEST_N(X,o,d,r,f,  cmp,v) NVVAL_TEST_X(o, d##_##r##_##f   , cmp, d##_##r##_##f##_##v)
+#define NVDEF_TEST_I(X,o,d,r,f,i,cmp,v) NVVAL_TEST_X(o, d##_##r##_##f(i), cmp, d##_##r##_##f##_##v)
+#define NVDEF_TEST_(X,_1,_2,_3,_4,_5,_6,_7,IMPL,...) IMPL
+#define NVDEF_TEST(A...) NVDEF_TEST_(X, ##A, NVDEF_TEST_I, NVDEF_TEST_N)(X, ##A)
+
+#define NVVAL_SET_X(o,drf,v) (((o) & ~DRF_SMASK(drf)) | NVVAL_X(drf, (v)))
+#define NVVAL_SET_N(X,o,d,r,f,  v) NVVAL_SET_X(o, d##_##r##_##f, (v))
+#define NVVAL_SET_I(X,o,d,r,f,i,v) NVVAL_SET_X(o, d##_##r##_##f(i), (v))
+#define NVVAL_SET_(X,_1,_2,_3,_4,_5,_6,IMPL,...) IMPL
+#define NVVAL_SET(A...) NVVAL_SET_(X, ##A, NVVAL_SET_I, NVVAL_SET_N)(X, ##A)
+
+#define NVDEF_SET_N(X,o,d,r,f,  v) NVVAL_SET_X(o, d##_##r##_##f,    d##_##r##_##f##_##v)
+#define NVDEF_SET_I(X,o,d,r,f,i,v) NVVAL_SET_X(o, d##_##r##_##f(i), d##_##r##_##f##_##v)
+#define NVDEF_SET_(X,_1,_2,_3,_4,_5,_6,IMPL,...) IMPL
+#define NVDEF_SET(A...) NVDEF_SET_(X, ##A, NVDEF_SET_I, NVDEF_SET_N)(X, ##A)
+
+/* DRF-MW accessors. */
+#define NVVAL_MW_GET_X(o,drf)                                                       \
+       ((DRF_MW_SPANS((o),drf) ?                                                   \
+         (DRF_HW_GET((o),drf) << DRF_LW_BITS((o),drf)) : 0) | DRF_LW_GET((o),drf))
+#define NVVAL_MW_GET_N(X,o,d,r,f  ) NVVAL_MW_GET_X((o), d##_##r##_##f)
+#define NVVAL_MW_GET_I(X,o,d,r,f,i) NVVAL_MW_GET_X((o), d##_##r##_##f(i))
+#define NVVAL_MW_GET_(X,_1,_2,_3,_4,_5,IMPL,...) IMPL
+#define NVVAL_MW_GET(A...) NVVAL_MW_GET_(X, ##A, NVVAL_MW_GET_I, NVVAL_MW_GET_N)(X, ##A)
+
+#define NVVAL_MW_SET_X(o,drf,v) do {                                           \
+       (o)[DRF_LW_IDX((o),drf)] = DRF_LW_SET((o),drf,(v));                    \
+       if (DRF_MW_SPANS((o),drf))                                             \
+               (o)[DRF_HW_IDX((o),drf)] = DRF_HW_SET((o),drf,(v));            \
+} while(0)
+#define NVVAL_MW_SET_N(X,o,d,r,f,  v) NVVAL_MW_SET_X((o), d##_##r##_##f, (v))
+#define NVVAL_MW_SET_I(X,o,d,r,f,i,v) NVVAL_MW_SET_X((o), d##_##r##_##f(i), (v))
+#define NVVAL_MW_SET_(X,_1,_2,_3,_4,_5,_6,IMPL,...) IMPL
+#define NVVAL_MW_SET(A...) NVVAL_MW_SET_(X, ##A, NVVAL_MW_SET_I, NVVAL_MW_SET_N)(X, ##A)
+
+#define NVDEF_MW_SET_N(X,o,d,r,f,  v) NVVAL_MW_SET_X(o, d##_##r##_##f,    d##_##r##_##f##_##v)
+#define NVDEF_MW_SET_I(X,o,d,r,f,i,v) NVVAL_MW_SET_X(o, d##_##r##_##f(i), d##_##r##_##f##_##v)
+#define NVDEF_MW_SET_(X,_1,_2,_3,_4,_5,_6,IMPL,...) IMPL
+#define NVDEF_MW_SET(A...) NVDEF_MW_SET_(X, ##A, NVDEF_MW_SET_I, NVDEF_MW_SET_N)(X, ##A)
+
+/* Helper for reading an arbitrary object. */
+#define DRF_RD_X(e,p,o,dr) e((p), (o), dr)
+#define DRF_RD_N(X,e,p,o,d,r  ) DRF_RD_X(e, (p), (o), d##_##r)
+#define DRF_RD_I(X,e,p,o,d,r,i) DRF_RD_X(e, (p), (o), d##_##r(i))
+#define DRF_RD_(X,_1,_2,_3,_4,_5,_6,IMPL,...) IMPL
+#define DRF_RD(A...) DRF_RD_(X, ##A, DRF_RD_I, DRF_RD_N)(X, ##A)
+
+/* Helper for writing an arbitrary object. */
+#define DRF_WR_X(e,p,o,dr,v) e((p), (o), dr, (v))
+#define DRF_WR_N(X,e,p,o,d,r,  v) DRF_WR_X(e, (p), (o), d##_##r   , (v))
+#define DRF_WR_I(X,e,p,o,d,r,i,v) DRF_WR_X(e, (p), (o), d##_##r(i), (v))
+#define DRF_WR_(X,_1,_2,_3,_4,_5,_6,_7,IMPL,...) IMPL
+#define DRF_WR(A...) DRF_WR_(X, ##A, DRF_WR_I, DRF_WR_N)(X, ##A)
+
+/* Helper for modifying an arbitrary object. */
+#define DRF_MR_X(er,ew,ty,p,o,dr,m,v) ({               \
+       ty _t = DRF_RD_X(er, (p), (o), dr);            \
+       DRF_WR_X(ew, (p), (o), dr, (_t & ~(m)) | (v)); \
+       _t;                                            \
+})
+#define DRF_MR_N(X,er,ew,ty,p,o,d,r  ,m,v) DRF_MR_X(er, ew, ty, (p), (o), d##_##r   , (m), (v))
+#define DRF_MR_I(X,er,ew,ty,p,o,d,r,i,m,v) DRF_MR_X(er, ew, ty, (p), (o), d##_##r(i), (m), (v))
+#define DRF_MR_(X,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,IMPL,...) IMPL
+#define DRF_MR(A...) DRF_MR_(X, ##A, DRF_MR_I, DRF_MR_N)(X, ##A)
+
+/* Helper for extracting a field value from arbitrary object. */
+#define DRF_RV_X(e,p,o,dr,drf) NVVAL_GET_X(DRF_RD_X(e, (p), (o), dr), drf)
+#define DRF_RV_N(X,e,p,o,d,r,  f) DRF_RV_X(e, (p), (o), d##_##r   , d##_##r##_##f)
+#define DRF_RV_I(X,e,p,o,d,r,i,f) DRF_RV_X(e, (p), (o), d##_##r(i), d##_##r##_##f)
+#define DRF_RV_(X,_1,_2,_3,_4,_5,_6,_7,IMPL,...) IMPL
+#define DRF_RV(A...) DRF_RV_(X, ##A, DRF_RV_I, DRF_RV_N)(X, ##A)
+
+/* Helper for writing field value to arbitrary object (all other bits cleared). */
+#define DRF_WV_N(X,e,p,o,d,r,  f,v)                                    \
+       DRF_WR_X(e, (p), (o), d##_##r   , NVVAL_X(d##_##r##_##f, (v)))
+#define DRF_WV_I(X,e,p,o,d,r,i,f,v)                                    \
+       DRF_WR_X(e, (p), (o), d##_##r(i), NVVAL_X(d##_##r##_##f, (v)))
+#define DRF_WV_(X,_1,_2,_3,_4,_5,_6,_7,_8,IMPL,...) IMPL
+#define DRF_WV(A...) DRF_WV_(X, ##A, DRF_WV_I, DRF_WV_N)(X, ##A)
+
+/* Helper for writing field definition to arbitrary object (all other bits cleared). */
+#define DRF_WD_N(X,e,p,o,d,r,  f,v)                                                    \
+       DRF_WR_X(e, (p), (o), d##_##r   , NVVAL_X(d##_##r##_##f, d##_##r##_##f##_##v))
+#define DRF_WD_I(X,e,p,o,d,r,i,f,v)                                                    \
+       DRF_WR_X(e, (p), (o), d##_##r(i), NVVAL_X(d##_##r##_##f, d##_##r##_##f##_##v))
+#define DRF_WD_(X,_1,_2,_3,_4,_5,_6,_7,_8,IMPL,...) IMPL
+#define DRF_WD(A...) DRF_WD_(X, ##A, DRF_WD_I, DRF_WD_N)(X, ##A)
+
+/* Helper for modifying field value in arbitrary object. */
+#define DRF_MV_N(X,er,ew,ty,p,o,d,r,  f,v)                                               \
+       NVVAL_GET_X(DRF_MR_X(er, ew, ty, (p), (o), d##_##r   , DRF_SMASK(d##_##r##_##f), \
+                   NVVAL_X(d##_##r##_##f, (v))), d##_##r##_##f)
+#define DRF_MV_I(X,er,ew,ty,p,o,d,r,i,f,v)                                               \
+       NVVAL_GET_X(DRF_MR_X(er, ew, ty, (p), (o), d##_##r(i), DRF_SMASK(d##_##r##_##f), \
+                   NVVAL_X(d##_##r##_##f, (v))), d##_##r##_##f)
+#define DRF_MV_(X,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,IMPL,...) IMPL
+#define DRF_MV(A...) DRF_MV_(X, ##A, DRF_MV_I, DRF_MV_N)(X, ##A)
+
+/* Helper for modifying field definition in arbitrary object. */
+#define DRF_MD_N(X,er,ew,ty,p,o,d,r,  f,v)                                               \
+       NVVAL_GET_X(DRF_MR_X(er, ew, ty, (p), (o), d##_##r   , DRF_SMASK(d##_##r##_##f), \
+                   NVVAL_X(d##_##r##_##f, d##_##r##_##f##_##v)), d##_##r##_##f)
+#define DRF_MD_I(X,er,ew,ty,p,o,d,r,i,f,v)                                               \
+       NVVAL_GET_X(DRF_MR_X(er, ew, ty, (p), (o), d##_##r(i), DRF_SMASK(d##_##r##_##f), \
+                   NVVAL_X(d##_##r##_##f, d##_##r##_##f##_##v)), d##_##r##_##f)
+#define DRF_MD_(X,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,IMPL,...) IMPL
+#define DRF_MD(A...) DRF_MD_(X, ##A, DRF_MD_I, DRF_MD_N)(X, ##A)
+
+/* Helper for testing against field value in aribtrary object */
+#define DRF_TV_N(X,e,p,o,d,r,  f,cmp,v)                                          \
+       NVVAL_TEST_X(DRF_RD_X(e, (p), (o), d##_##r   ), d##_##r##_##f, cmp, (v))
+#define DRF_TV_I(X,e,p,o,d,r,i,f,cmp,v)                                          \
+       NVVAL_TEST_X(DRF_RD_X(e, (p), (o), d##_##r(i)), d##_##r##_##f, cmp, (v))
+#define DRF_TV_(X,_1,_2,_3,_4,_5,_6,_7,_8,_9,IMPL,...) IMPL
+#define DRF_TV(A...) DRF_TV_(X, ##A, DRF_TV_I, DRF_TV_N)(X, ##A)
+
+/* Helper for testing against field definition in aribtrary object */
+#define DRF_TD_N(X,e,p,o,d,r,  f,cmp,v)                                                          \
+       NVVAL_TEST_X(DRF_RD_X(e, (p), (o), d##_##r   ), d##_##r##_##f, cmp, d##_##r##_##f##_##v)
+#define DRF_TD_I(X,e,p,o,d,r,i,f,cmp,v)                                                          \
+       NVVAL_TEST_X(DRF_RD_X(e, (p), (o), d##_##r(i)), d##_##r##_##f, cmp, d##_##r##_##f##_##v)
+#define DRF_TD_(X,_1,_2,_3,_4,_5,_6,_7,_8,_9,IMPL,...) IMPL
+#define DRF_TD(A...) DRF_TD_(X, ##A, DRF_TD_I, DRF_TD_N)(X, ##A)
+#endif
index e63c6c9..347d2c0 100644 (file)
@@ -12,9 +12,9 @@ struct nvif_client {
        bool super;
 };
 
-int  nvif_client_init(struct nvif_client *parent, const char *name, u64 device,
+int  nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
                      struct nvif_client *);
-void nvif_client_fini(struct nvif_client *);
+void nvif_client_dtor(struct nvif_client *);
 int  nvif_client_ioctl(struct nvif_client *, void *, u32);
 int  nvif_client_suspend(struct nvif_client *);
 int  nvif_client_resume(struct nvif_client *);
index c2a572c..b0e5980 100644 (file)
@@ -18,9 +18,9 @@ struct nvif_device {
        struct nvif_user user;
 };
 
-int  nvif_device_init(struct nvif_object *, u32 handle, s32 oclass, void *, u32,
-                     struct nvif_device *);
-void nvif_device_fini(struct nvif_device *);
+int  nvif_device_ctor(struct nvif_object *, const char *name, u32 handle,
+                     s32 oclass, void *, u32, struct nvif_device *);
+void nvif_device_dtor(struct nvif_device *);
 u64  nvif_device_time(struct nvif_device *);
 
 /*XXX*/
index 7c0eda3..07ac544 100644 (file)
@@ -7,6 +7,7 @@ struct nvif_disp {
        struct nvif_object object;
 };
 
-int nvif_disp_ctor(struct nvif_device *, s32 oclass, struct nvif_disp *);
+int nvif_disp_ctor(struct nvif_device *, const char *name, s32 oclass,
+                  struct nvif_disp *);
 void nvif_disp_dtor(struct nvif_disp *);
 #endif
index 80ee4ab..9e1071d 100644 (file)
@@ -10,11 +10,13 @@ struct nvif_mem {
        u64 size;
 };
 
-int nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page,
-                      u64 size, void *argv, u32 argc, struct nvif_mem *);
-int nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page,
-                 u64 size, void *argv, u32 argc, struct nvif_mem *);
-void nvif_mem_fini(struct nvif_mem *);
+int nvif_mem_ctor_type(struct nvif_mmu *mmu, const char *name, s32 oclass,
+                      int type, u8 page, u64 size, void *argv, u32 argc,
+                      struct nvif_mem *);
+int nvif_mem_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, u8 type,
+                 u8 page, u64 size, void *argv, u32 argc, struct nvif_mem *);
+void nvif_mem_dtor(struct nvif_mem *);
 
-int nvif_mem_init_map(struct nvif_mmu *, u8 type, u64 size, struct nvif_mem *);
+int nvif_mem_ctor_map(struct nvif_mmu *, const char *name, u8 type, u64 size,
+                     struct nvif_mem *);
 #endif
index cec1e88..2035ef1 100644 (file)
@@ -31,8 +31,9 @@ struct nvif_mmu {
        u8 *kind;
 };
 
-int nvif_mmu_init(struct nvif_object *, s32 oclass, struct nvif_mmu *);
-void nvif_mmu_fini(struct nvif_mmu *);
+int nvif_mmu_ctor(struct nvif_object *, const char *name, s32 oclass,
+                 struct nvif_mmu *);
+void nvif_mmu_dtor(struct nvif_mmu *);
 
 static inline bool
 nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind)
index 6863732..39f6b7e 100644 (file)
@@ -4,6 +4,7 @@
 
 struct nvif_notify {
        struct nvif_object *object;
+       const char *name;
        int index;
 
 #define NVIF_NOTIFY_USER 0
@@ -24,10 +25,10 @@ struct nvif_notify {
        struct work_struct work;
 };
 
-int  nvif_notify_init(struct nvif_object *, int (*func)(struct nvif_notify *),
-                     bool work, u8 type, void *data, u32 size, u32 reply,
-                     struct nvif_notify *);
-int  nvif_notify_fini(struct nvif_notify *);
+int  nvif_notify_ctor(struct nvif_object *, const char *name,
+                     int (*func)(struct nvif_notify *), bool work, u8 type,
+                     void *data, u32 size, u32 reply, struct nvif_notify *);
+int  nvif_notify_dtor(struct nvif_notify *);
 int  nvif_notify_get(struct nvif_notify *);
 int  nvif_notify_put(struct nvif_notify *);
 int  nvif_notify(const void *, u32, const void *, u32);
index 604fabc..1e4c158 100644 (file)
@@ -1,7 +1,6 @@
 /* SPDX-License-Identifier: MIT */
 #ifndef __NVIF_OBJECT_H__
 #define __NVIF_OBJECT_H__
-
 #include <nvif/os.h>
 
 struct nvif_sclass {
@@ -11,7 +10,9 @@ struct nvif_sclass {
 };
 
 struct nvif_object {
+       struct nvif_parent *parent;
        struct nvif_client *client;
+       const char *name;
        u32 handle;
        s32 oclass;
        void *priv; /*XXX: hack */
@@ -21,9 +22,9 @@ struct nvif_object {
        } map;
 };
 
-int  nvif_object_init(struct nvif_object *, u32 handle, s32 oclass, void *, u32,
-                     struct nvif_object *);
-void nvif_object_fini(struct nvif_object *);
+int  nvif_object_ctor(struct nvif_object *, const char *name, u32 handle,
+                     s32 oclass, void *, u32, struct nvif_object *);
+void nvif_object_dtor(struct nvif_object *);
 int  nvif_object_ioctl(struct nvif_object *, void *, u32, void **);
 int  nvif_object_sclass_get(struct nvif_object *, struct nvif_sclass **);
 void nvif_object_sclass_put(struct nvif_sclass **);
@@ -115,6 +116,19 @@ struct nvif_mclass {
        _cid;                                                                  \
 })
 
+#define NVIF_RD32_(p,o,dr)   nvif_rd32((p), (o) + (dr))
+#define NVIF_WR32_(p,o,dr,f) nvif_wr32((p), (o) + (dr), (f))
+#define NVIF_RD32(p,A...) DRF_RD(NVIF_RD32_,                  (p), 0, ##A)
+#define NVIF_RV32(p,A...) DRF_RV(NVIF_RD32_,                  (p), 0, ##A)
+#define NVIF_TV32(p,A...) DRF_TV(NVIF_RD32_,                  (p), 0, ##A)
+#define NVIF_TD32(p,A...) DRF_TD(NVIF_RD32_,                  (p), 0, ##A)
+#define NVIF_WR32(p,A...) DRF_WR(            NVIF_WR32_,      (p), 0, ##A)
+#define NVIF_WV32(p,A...) DRF_WV(            NVIF_WR32_,      (p), 0, ##A)
+#define NVIF_WD32(p,A...) DRF_WD(            NVIF_WR32_,      (p), 0, ##A)
+#define NVIF_MR32(p,A...) DRF_MR(NVIF_RD32_, NVIF_WR32_, u32, (p), 0, ##A)
+#define NVIF_MV32(p,A...) DRF_MV(NVIF_RD32_, NVIF_WR32_, u32, (p), 0, ##A)
+#define NVIF_MD32(p,A...) DRF_MD(NVIF_RD32_, NVIF_WR32_, u32, (p), 0, ##A)
+
 /*XXX*/
 #include <core/object.h>
 #define nvxx_object(a) ({                                                      \
diff --git a/drivers/gpu/drm/nouveau/include/nvif/parent.h b/drivers/gpu/drm/nouveau/include/nvif/parent.h
new file mode 100644 (file)
index 0000000..41cb1b0
--- /dev/null
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVIF_PARENT_H__
+#define __NVIF_PARENT_H__
+#include <nvif/os.h>
+struct nvif_object;
+
+struct nvif_parent {
+       const struct nvif_parent_func {
+               void (*debugf)(struct nvif_object *, const char *fmt, ...) __printf(2, 3);
+               void (*errorf)(struct nvif_object *, const char *fmt, ...) __printf(2, 3);
+       } *func;
+};
+
+static inline void
+nvif_parent_dtor(struct nvif_parent *parent)
+{
+       parent->func = NULL;
+}
+
+static inline void
+nvif_parent_ctor(const struct nvif_parent_func *func, struct nvif_parent *parent)
+{
+       parent->func = func;
+}
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/printf.h b/drivers/gpu/drm/nouveau/include/nvif/printf.h
new file mode 100644 (file)
index 0000000..6c299ec
--- /dev/null
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVIF_PRINTF_H__
+#define __NVIF_PRINTF_H__
+#include <nvif/client.h>
+#include <nvif/parent.h>
+
+#define NVIF_PRINT(l,o,f,a...) do {                                                                \
+       struct nvif_object *_o = (o);                                                              \
+       struct nvif_parent *_p = _o->parent;                                                       \
+       _p->func->l(_o, "[%s/%08x:%s] "f"\n", _o->client->object.name, _o->handle, _o->name, ##a); \
+} while(0)
+
+#ifndef NVIF_DEBUG_PRINT_DISABLE
+#define NVIF_DEBUG(o,f,a...) NVIF_PRINT(debugf, (o), f, ##a)
+#else
+#define NVIF_DEBUG(o,f,a...)
+#endif
+
+#define NVIF_ERROR(o,f,a...) NVIF_PRINT(errorf, (o), f, ##a)
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/push.h b/drivers/gpu/drm/nouveau/include/nvif/push.h
new file mode 100644 (file)
index 0000000..168d769
--- /dev/null
@@ -0,0 +1,359 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __NVIF_PUSH_H__
+#define __NVIF_PUSH_H__
+#include <nvif/mem.h>
+#include <nvif/printf.h>
+
+#include <nvhw/drf.h>
+
+struct nvif_push {
+       int (*wait)(struct nvif_push *push, u32 size);
+       void (*kick)(struct nvif_push *push);
+
+       struct nvif_mem mem;
+
+       u32 *bgn;
+       u32 *cur;
+       u32 *seg;
+       u32 *end;
+};
+
+static inline __must_check int
+PUSH_WAIT(struct nvif_push *push, u32 size)
+{
+       if (push->cur + size >= push->end) {
+               int ret = push->wait(push, size);
+               if (ret)
+                       return ret;
+       }
+#ifdef CONFIG_NOUVEAU_DEBUG_PUSH
+       push->seg = push->cur + size;
+#endif
+       return 0;
+}
+
+static inline int
+PUSH_KICK(struct nvif_push *push)
+{
+       push->kick(push);
+       return 0;
+}
+
+#ifdef CONFIG_NOUVEAU_DEBUG_PUSH
+#define PUSH_PRINTF(p,f,a...) do {                              \
+       struct nvif_push *_ppp = (p);                           \
+       u32 __o = _ppp->cur - (u32 *)_ppp->mem.object.map.ptr;  \
+       NVIF_DEBUG(&_ppp->mem.object, "%08x: "f, __o * 4, ##a); \
+       (void)__o;                                              \
+} while(0)
+#define PUSH_ASSERT_ON(a,b) WARN((a), b)
+#else
+#define PUSH_PRINTF(p,f,a...)
+#define PUSH_ASSERT_ON(a, b)
+#endif
+
+#define PUSH_ASSERT(a,b) do {                                             \
+       static_assert(                                                    \
+               __builtin_choose_expr(__builtin_constant_p(a), (a), 1), b \
+       );                                                                \
+       PUSH_ASSERT_ON(!(a), b);                                          \
+} while(0)
+
+#define PUSH_DATA__(p,d,f,a...) do {                       \
+       struct nvif_push *_p = (p);                        \
+       u32 _d = (d);                                      \
+       PUSH_ASSERT(_p->cur < _p->seg, "segment overrun"); \
+       PUSH_ASSERT(_p->cur < _p->end, "pushbuf overrun"); \
+       PUSH_PRINTF(_p, "%08x"f, _d, ##a);                 \
+       *_p->cur++ = _d;                                   \
+} while(0)
+
+#define PUSH_DATA_(X,p,m,i0,i1,d,s,f,a...) PUSH_DATA__((p), (d), "-> "#m f, ##a)
+#define PUSH_DATA(p,d) PUSH_DATA__((p), (d), " data - %s", __func__)
+
+//XXX: error-check this against *real* pushbuffer end?
+#define PUSH_RSVD(p,d) do {          \
+       struct nvif_push *__p = (p); \
+       __p->seg++;                  \
+       __p->end++;                  \
+       d;                           \
+} while(0)
+
+#ifdef CONFIG_NOUVEAU_DEBUG_PUSH
+#define PUSH_DATAp(X,p,m,i,o,d,s,f,a...) do {                                     \
+       struct nvif_push *_pp = (p);                                              \
+       const u32 *_dd = (d);                                                     \
+       u32 _s = (s), _i = (i?PUSH_##o##_INC);                                    \
+       if (_s--) {                                                               \
+               PUSH_DATA_(X, _pp, X##m, i0, i1, *_dd++, 1, "+0x%x", 0);          \
+               while (_s--) {                                                    \
+                       PUSH_DATA_(X, _pp, X##m, i0, i1, *_dd++, 1, "+0x%x", _i); \
+                       _i += (0?PUSH_##o##_INC);                                 \
+               }                                                                 \
+       }                                                                         \
+} while(0)
+#else
+#define PUSH_DATAp(X,p,m,i,o,d,s,f,a...) do {                    \
+       struct nvif_push *_p = (p);                              \
+       u32 _s = (s);                                            \
+       PUSH_ASSERT(_p->cur + _s <= _p->seg, "segment overrun"); \
+       PUSH_ASSERT(_p->cur + _s <= _p->end, "pushbuf overrun"); \
+       memcpy(_p->cur, (d), _s << 2);                           \
+       _p->cur += _s;                                           \
+} while(0)
+#endif
+
+#define PUSH_1(X,f,ds,n,c,o,p,s,mA,dA) do {                            \
+       PUSH_##o##_HDR((p), s, mA, (c)+(n));                           \
+       PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, "");                   \
+} while(0)
+#define PUSH_2(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do {                 \
+       PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1");       \
+       PUSH_1(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
+       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                   \
+} while(0)
+#define PUSH_3(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do {                 \
+       PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2");       \
+       PUSH_2(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
+       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                   \
+} while(0)
+#define PUSH_4(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do {                 \
+       PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3");       \
+       PUSH_3(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
+       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                   \
+} while(0)
+#define PUSH_5(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do {                 \
+       PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4");       \
+       PUSH_4(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
+       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                   \
+} while(0)
+#define PUSH_6(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do {                 \
+       PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5");       \
+       PUSH_5(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
+       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                   \
+} while(0)
+#define PUSH_7(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do {                 \
+       PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6");       \
+       PUSH_6(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
+       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                   \
+} while(0)
+#define PUSH_8(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do {                 \
+       PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7");       \
+       PUSH_7(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
+       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                   \
+} while(0)
+#define PUSH_9(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do {                 \
+       PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8");       \
+       PUSH_8(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
+       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                   \
+} while(0)
+#define PUSH_10(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do {                \
+       PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9");       \
+       PUSH_9(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
+       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                   \
+} while(0)
+
+#define PUSH_1D(X,o,p,s,mA,dA)                            \
+       PUSH_1(X, DATA_, 1, 1, 0, o, (p), s, X##mA, (dA))
+#define PUSH_2D(X,o,p,s,mA,dA,mB,dB)                      \
+       PUSH_2(X, DATA_, 1, 1, 0, o, (p), s, X##mB, (dB), \
+                                            X##mA, (dA))
+#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC)                \
+       PUSH_3(X, DATA_, 1, 1, 0, o, (p), s, X##mC, (dC), \
+                                            X##mB, (dB), \
+                                            X##mA, (dA))
+#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD)          \
+       PUSH_4(X, DATA_, 1, 1, 0, o, (p), s, X##mD, (dD), \
+                                            X##mC, (dC), \
+                                            X##mB, (dB), \
+                                            X##mA, (dA))
+#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE)    \
+       PUSH_5(X, DATA_, 1, 1, 0, o, (p), s, X##mE, (dE), \
+                                            X##mD, (dD), \
+                                            X##mC, (dC), \
+                                            X##mB, (dB), \
+                                            X##mA, (dA))
+#define PUSH_6D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF) \
+       PUSH_6(X, DATA_, 1, 1, 0, o, (p), s, X##mF, (dF),    \
+                                            X##mE, (dE),    \
+                                            X##mD, (dD),    \
+                                            X##mC, (dC),    \
+                                            X##mB, (dB),    \
+                                            X##mA, (dA))
+#define PUSH_7D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG) \
+       PUSH_7(X, DATA_, 1, 1, 0, o, (p), s, X##mG, (dG),          \
+                                            X##mF, (dF),          \
+                                            X##mE, (dE),          \
+                                            X##mD, (dD),          \
+                                            X##mC, (dC),          \
+                                            X##mB, (dB),          \
+                                            X##mA, (dA))
+#define PUSH_8D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH) \
+       PUSH_8(X, DATA_, 1, 1, 0, o, (p), s, X##mH, (dH),                \
+                                            X##mG, (dG),                \
+                                            X##mF, (dF),                \
+                                            X##mE, (dE),                \
+                                            X##mD, (dD),                \
+                                            X##mC, (dC),                \
+                                            X##mB, (dB),                \
+                                            X##mA, (dA))
+#define PUSH_9D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI) \
+       PUSH_9(X, DATA_, 1, 1, 0, o, (p), s, X##mI, (dI),                      \
+                                            X##mH, (dH),                      \
+                                            X##mG, (dG),                      \
+                                            X##mF, (dF),                      \
+                                            X##mE, (dE),                      \
+                                            X##mD, (dD),                      \
+                                            X##mC, (dC),                      \
+                                            X##mB, (dB),                      \
+                                            X##mA, (dA))
+#define PUSH_10D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI,mJ,dJ) \
+       PUSH_10(X, DATA_, 1, 1, 0, o, (p), s, X##mJ, (dJ),                            \
+                                             X##mI, (dI),                            \
+                                             X##mH, (dH),                            \
+                                             X##mG, (dG),                            \
+                                             X##mF, (dF),                            \
+                                             X##mE, (dE),                            \
+                                             X##mD, (dD),                            \
+                                             X##mC, (dC),                            \
+                                             X##mB, (dB),                            \
+                                             X##mA, (dA))
+
+#define PUSH_1P(X,o,p,s,mA,dp,ds)                           \
+       PUSH_1(X, DATAp, ds, ds, 0, o, (p), s, X##mA, (dp))
+#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds)                     \
+       PUSH_2(X, DATAp, ds, ds, 0, o, (p), s, X##mB, (dp), \
+                                              X##mA, (dA))
+#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds)               \
+       PUSH_3(X, DATAp, ds, ds, 0, o, (p), s, X##mC, (dp), \
+                                              X##mB, (dB), \
+                                              X##mA, (dA))
+
+#define PUSH_(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,IMPL,...) IMPL
+#define PUSH(A...) PUSH_(A, PUSH_10P, PUSH_10D,          \
+                           PUSH_9P , PUSH_9D,           \
+                           PUSH_8P , PUSH_8D,           \
+                           PUSH_7P , PUSH_7D,           \
+                           PUSH_6P , PUSH_6D,           \
+                           PUSH_5P , PUSH_5D,           \
+                           PUSH_4P , PUSH_4D,           \
+                           PUSH_3P , PUSH_3D,           \
+                           PUSH_2P , PUSH_2D,           \
+                           PUSH_1P , PUSH_1D)(, ##A)
+
+#define PUSH_NVIM(p,c,m,d) do {             \
+       struct nvif_push *__p = (p);        \
+       u32 __d = (d);                      \
+       PUSH_IMMD_HDR(__p, c, m, __d);      \
+       __p->cur--;                         \
+       PUSH_PRINTF(__p, "%08x-> "#m, __d); \
+       __p->cur++;                         \
+} while(0)
+#define PUSH_NVSQ(A...) PUSH(MTHD, ##A)
+#define PUSH_NV1I(A...) PUSH(1INC, ##A)
+#define PUSH_NVNI(A...) PUSH(NINC, ##A)
+
+
+#define PUSH_NV_1(X,o,p,c,mA,d...) \
+       PUSH_##o(p,c,c##_##mA,d)
+#define PUSH_NV_2(X,o,p,c,mA,dA,mB,d...) \
+       PUSH_##o(p,c,c##_##mA,dA,         \
+                   c##_##mB,d)
+#define PUSH_NV_3(X,o,p,c,mA,dA,mB,dB,mC,d...) \
+       PUSH_##o(p,c,c##_##mA,dA,               \
+                   c##_##mB,dB,               \
+                   c##_##mC,d)
+#define PUSH_NV_4(X,o,p,c,mA,dA,mB,dB,mC,dC,mD,d...) \
+       PUSH_##o(p,c,c##_##mA,dA,                     \
+                   c##_##mB,dB,                     \
+                   c##_##mC,dC,                     \
+                   c##_##mD,d)
+#define PUSH_NV_5(X,o,p,c,mA,dA,mB,dB,mC,dC,mD,dD,mE,d...) \
+       PUSH_##o(p,c,c##_##mA,dA,                           \
+                   c##_##mB,dB,                           \
+                   c##_##mC,dC,                           \
+                   c##_##mD,dD,                           \
+                   c##_##mE,d)
+#define PUSH_NV_6(X,o,p,c,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,d...) \
+       PUSH_##o(p,c,c##_##mA,dA,                                 \
+                   c##_##mB,dB,                                 \
+                   c##_##mC,dC,                                 \
+                   c##_##mD,dD,                                 \
+                   c##_##mE,dE,                                 \
+                   c##_##mF,d)
+#define PUSH_NV_7(X,o,p,c,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,d...) \
+       PUSH_##o(p,c,c##_##mA,dA,                                       \
+                   c##_##mB,dB,                                       \
+                   c##_##mC,dC,                                       \
+                   c##_##mD,dD,                                       \
+                   c##_##mE,dE,                                       \
+                   c##_##mF,dF,                                       \
+                   c##_##mG,d)
+#define PUSH_NV_8(X,o,p,c,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,d...) \
+       PUSH_##o(p,c,c##_##mA,dA,                                             \
+                   c##_##mB,dB,                                             \
+                   c##_##mC,dC,                                             \
+                   c##_##mD,dD,                                             \
+                   c##_##mE,dE,                                             \
+                   c##_##mF,dF,                                             \
+                   c##_##mG,dG,                                             \
+                   c##_##mH,d)
+#define PUSH_NV_9(X,o,p,c,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,d...) \
+       PUSH_##o(p,c,c##_##mA,dA,                                                   \
+                   c##_##mB,dB,                                                   \
+                   c##_##mC,dC,                                                   \
+                   c##_##mD,dD,                                                   \
+                   c##_##mE,dE,                                                   \
+                   c##_##mF,dF,                                                   \
+                   c##_##mG,dG,                                                   \
+                   c##_##mH,dH,                                                   \
+                   c##_##mI,d)
+#define PUSH_NV_10(X,o,p,c,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI,mJ,d...) \
+       PUSH_##o(p,c,c##_##mA,dA,                                                          \
+                   c##_##mB,dB,                                                          \
+                   c##_##mC,dC,                                                          \
+                   c##_##mD,dD,                                                          \
+                   c##_##mE,dE,                                                          \
+                   c##_##mF,dF,                                                          \
+                   c##_##mG,dG,                                                          \
+                   c##_##mH,dH,                                                          \
+                   c##_##mI,dI,                                                          \
+                   c##_##mJ,d)
+
+#define PUSH_NV_(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,IMPL,...) IMPL
+#define PUSH_NV(A...) PUSH_NV_(A, PUSH_NV_10, PUSH_NV_10,       \
+                                 PUSH_NV_9 , PUSH_NV_9,        \
+                                 PUSH_NV_8 , PUSH_NV_8,        \
+                                 PUSH_NV_7 , PUSH_NV_7,        \
+                                 PUSH_NV_6 , PUSH_NV_6,        \
+                                 PUSH_NV_5 , PUSH_NV_5,        \
+                                 PUSH_NV_4 , PUSH_NV_4,        \
+                                 PUSH_NV_3 , PUSH_NV_3,        \
+                                 PUSH_NV_2 , PUSH_NV_2,        \
+                                 PUSH_NV_1 , PUSH_NV_1)(, ##A)
+
+#define PUSH_IMMD(A...) PUSH_NV(NVIM, ##A)
+#define PUSH_MTHD(A...) PUSH_NV(NVSQ, ##A)
+#define PUSH_1INC(A...) PUSH_NV(NV1I, ##A)
+#define PUSH_NINC(A...) PUSH_NV(NVNI, ##A)
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/push006c.h b/drivers/gpu/drm/nouveau/include/nvif/push006c.h
new file mode 100644 (file)
index 0000000..a31c147
--- /dev/null
@@ -0,0 +1,73 @@
+#ifndef __NVIF_PUSH006C_H__
+#define __NVIF_PUSH006C_H__
+#include <nvif/push.h>
+
+#include <nvhw/class/cl006c.h>
+
+#ifndef PUSH006C_SUBC
+// Host methods
+#define PUSH006C_SUBC_NV06E    0
+#define PUSH006C_SUBC_NV176E   0
+#define PUSH006C_SUBC_NV826F   0
+
+// ContextSurfaces2d
+#define PUSH006C_SUBC_NV042    0
+#define PUSH006C_SUBC_NV062    0
+
+// ContextClipRectangle
+#define PUSH006C_SUBC_NV019    0
+
+// ContextRop
+#define PUSH006C_SUBC_NV043    0
+
+// ContextPattern
+#define PUSH006C_SUBC_NV044    0
+
+// Misc dodginess...
+#define PUSH006C_SUBC_NV_SW    1
+
+// ImageBlit
+#define PUSH006C_SUBC_NV05F    2
+#define PUSH006C_SUBC_NV09F    2
+
+// GdiRectangleText
+#define PUSH006C_SUBC_NV04A    3
+
+// Twod
+#define PUSH006C_SUBC_NV502D   3
+
+// MemoryToMemoryFormat
+#define PUSH006C_SUBC_NV039    4
+#define PUSH006C_SUBC_NV5039   4
+
+// DmaCopy
+#define PUSH006C_SUBC_NV85B5   4
+
+// Cipher
+#define PUSH006C_SUBC_NV74C1   4
+#endif
+
+#define PUSH_HDR(p,o,n,s,m,c) do {                                        \
+        PUSH_ASSERT(!((s) & ~DRF_MASK(NV06C_METHOD_SUBCHANNEL)), "subc"); \
+        PUSH_ASSERT(!((m) & ~DRF_SMASK(NV06C_METHOD_ADDRESS)), "mthd");   \
+        PUSH_ASSERT(!((c) & ~DRF_MASK(NV06C_METHOD_COUNT)), "count");     \
+        PUSH_DATA__((p), NVVAL_X(NV06C_METHOD_ADDRESS, (m) >> 2) |        \
+                        NVVAL_X(NV06C_METHOD_SUBCHANNEL, (s)) |          \
+                        NVVAL_X(NV06C_METHOD_COUNT, (c)) |               \
+                        NVVAL_X(NV06C_OPCODE, NV06C_OPCODE_##o),         \
+                   " "n" subc %d mthd 0x%04x size %d - %s",              \
+                   (u32)(s), (u32)(m), (u32)(c), __func__);              \
+} while(0)
+
+#define PUSH_MTHD_HDR(p,c,m,n) PUSH_HDR(p, METHOD, "incr", PUSH006C_SUBC_##c, m, n)
+#define PUSH_MTHD_INC 4:4
+#define PUSH_NINC_HDR(p,c,m,n) PUSH_HDR(p, NONINC_METHOD, "ninc", PUSH006C_SUBC_##c, m, n)
+#define PUSH_NINC_INC 0:0
+
+#define PUSH_JUMP(p,o) do {                                         \
+        PUSH_ASSERT(!((o) & ~0x1fffffffcULL), "offset");            \
+       PUSH_DATA__((p), NVVAL_X(NV06C_OPCODE, NV06C_OPCODE_JUMP) | \
+                        NVVAL_X(NV06C_JUMP_OFFSET, (o) >> 2),      \
+                   " jump 0x%08x - %s", (u32)(o), __func__);       \
+} while(0)
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/push206e.h b/drivers/gpu/drm/nouveau/include/nvif/push206e.h
new file mode 100644 (file)
index 0000000..1dfb8a3
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef __NVIF_PUSH206E_H__
+#define __NVIF_PUSH206E_H__
+#include <nvif/push006c.h>
+
+#include <nvhw/class/cl206e.h>
+
+#define PUSH_CALL(p,o) do {                                         \
+        PUSH_ASSERT(!((o) & ~0xffffffffcULL), "offset");            \
+       PUSH_DATA__((p), NVDEF(NV206E, DMA, OPCODE2, CALL) |        \
+                        NVVAL(NV206E, DMA, CALL_OFFSET, (o) >> 2), \
+                   " call 0x%08x - %s", (u32)(o), __func__);       \
+} while(0)
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/push507c.h b/drivers/gpu/drm/nouveau/include/nvif/push507c.h
new file mode 100644 (file)
index 0000000..889467f
--- /dev/null
@@ -0,0 +1,25 @@
+#ifndef __NVIF_PUSH507C_H__
+#define __NVIF_PUSH507C_H__
+#include <nvif/push.h>
+
+#include <nvhw/class/cl507c.h>
+
+#define PUSH_HDR(p,m,c) do {                                                    \
+        PUSH_ASSERT(!((m) & ~DRF_SMASK(NV507C_DMA_METHOD_OFFSET)), "mthd");     \
+        PUSH_ASSERT(!((c) & ~DRF_MASK(NV507C_DMA_METHOD_COUNT)), "size");       \
+        PUSH_DATA__((p), NVDEF(NV507C, DMA, OPCODE, METHOD) |                   \
+                        NVVAL(NV507C, DMA, METHOD_COUNT, (c)) |                \
+                        NVVAL(NV507C, DMA, METHOD_OFFSET, (m) >> 2),           \
+                   " mthd 0x%04x size %d - %s", (u32)(m), (u32)(c), __func__); \
+} while(0)
+
+#define PUSH_MTHD_HDR(p,s,m,c) PUSH_HDR(p,m,c)
+#define PUSH_MTHD_INC 4:4
+
+#define PUSH_JUMP(p,o) do {                                                 \
+        PUSH_ASSERT(!((o) & ~DRF_SMASK(NV507C_DMA_JUMP_OFFSET)), "offset"); \
+       PUSH_DATA__((p), NVDEF(NV507C, DMA, OPCODE, JUMP) |                 \
+                        NVVAL(NV507C, DMA, JUMP_OFFSET, (o) >> 2),         \
+                   "jump 0x%08x - %s", (u32)(o), __func__);                \
+} while(0)
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/push906f.h b/drivers/gpu/drm/nouveau/include/nvif/push906f.h
new file mode 100644 (file)
index 0000000..cc2866b
--- /dev/null
@@ -0,0 +1,48 @@
+#ifndef __NVIF_PUSH906F_H__
+#define __NVIF_PUSH906F_H__
+#include <nvif/push.h>
+
+#include <nvhw/class/cl906f.h>
+
+#ifndef PUSH906F_SUBC
+// Host methods
+#define PUSH906F_SUBC_NV906F   0
+
+// Twod
+#define PUSH906F_SUBC_NV902D   3
+
+// MemoryToMemoryFormat
+#define PUSH906F_SUBC_NV9039   4
+
+// DmaCopy
+#define PUSH906F_SUBC_NV90B5   4
+#define PUSH906F_SUBC_NVA0B5   4
+#endif
+
+#define PUSH_HDR(p,o,n,f,s,m,c) do {                                                \
+        PUSH_ASSERT(!((s) & ~DRF_MASK(NV906F_DMA_METHOD_SUBCHANNEL)), "subc");      \
+        PUSH_ASSERT(!((m) & ~(DRF_MASK(NV906F_DMA_METHOD_ADDRESS) << 2)), "mthd");   \
+        PUSH_ASSERT(!((c) & ~DRF_MASK(NV906F_DMA_METHOD_COUNT)), "count/immd");     \
+        PUSH_DATA__((p), NVVAL(NV906F, DMA, METHOD_ADDRESS, (m) >> 2) |             \
+                        NVVAL(NV906F, DMA, METHOD_SUBCHANNEL, (s)) |               \
+                        NVVAL(NV906F, DMA, METHOD_COUNT, (c)) |                    \
+                        NVDEF(NV906F, DMA, SEC_OP, o),                             \
+                   " "n" subc %d mthd 0x%04x "f" - %s",                            \
+                   (u32)(s), (u32)(m), (u32)(c), __func__);                        \
+} while(0)
+
+#define PUSH_MTHD_INC 4:4
+#define PUSH_MTHD_HDR(p,c,m,n) \
+       PUSH_HDR(p, INC_METHOD, "incr", "size %d", PUSH906F_SUBC_##c, m, n)
+
+#define PUSH_NINC_INC 0:0
+#define PUSH_NINC_HDR(p,c,m,n) \
+       PUSH_HDR(p, NON_INC_METHOD, "ninc", "size %d", PUSH906F_SUBC_##c, m, n)
+
+#define PUSH_IMMD_HDR(p,c,m,n) \
+       PUSH_HDR(p, IMMD_DATA_METHOD, "immd", "data 0x%04x", PUSH906F_SUBC_##c, m, n)
+
+#define PUSH_1INC_INC 4:0
+#define PUSH_1INC_HDR(p,c,m,n) \
+       PUSH_HDR(p, ONE_INC, "oinc", "size %d", PUSH906F_SUBC_##c, m, n)
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/pushc37b.h b/drivers/gpu/drm/nouveau/include/nvif/pushc37b.h
new file mode 100644 (file)
index 0000000..8f0c457
--- /dev/null
@@ -0,0 +1,18 @@
+#ifndef __NVIF_PUSHC37B_H__
+#define __NVIF_PUSHC37B_H__
+#include <nvif/push.h>
+
+#include <nvhw/class/clc37b.h>
+
+#define PUSH_HDR(p,m,c) do {                                                    \
+        PUSH_ASSERT(!((m) & ~DRF_SMASK(NVC37B_DMA_METHOD_OFFSET)), "mthd");     \
+        PUSH_ASSERT(!((c) & ~DRF_MASK(NVC37B_DMA_METHOD_COUNT)), "size");       \
+        PUSH_DATA__((p), NVDEF(NVC37B, DMA, OPCODE, METHOD) |                   \
+                        NVVAL(NVC37B, DMA, METHOD_COUNT, (c)) |                \
+                        NVVAL(NVC37B, DMA, METHOD_OFFSET, (m) >> 2),           \
+                   " mthd 0x%04x size %d - %s", (u32)(m), (u32)(c), __func__); \
+} while(0)
+
+#define PUSH_MTHD_HDR(p,s,m,c) PUSH_HDR(p,m,c)
+#define PUSH_MTHD_INC 4:4
+#endif
index 6825574..146986a 100644 (file)
@@ -13,8 +13,8 @@ struct nvif_user_func {
        u64 (*time)(struct nvif_user *);
 };
 
-int nvif_user_init(struct nvif_device *);
-void nvif_user_fini(struct nvif_device *);
+int nvif_user_ctor(struct nvif_device *, const char *name);
+void nvif_user_dtor(struct nvif_device *);
 
 extern const struct nvif_user_func nvif_userc361;
 #endif
index 79bf85d..a2ee922 100644 (file)
@@ -30,9 +30,9 @@ struct nvif_vmm {
        int page_nr;
 };
 
-int nvif_vmm_init(struct nvif_mmu *, s32 oclass, bool managed, u64 addr,
-                 u64 size, void *argv, u32 argc, struct nvif_vmm *);
-void nvif_vmm_fini(struct nvif_vmm *);
+int nvif_vmm_ctor(struct nvif_mmu *, const char *name, s32 oclass, bool managed,
+                 u64 addr, u64 size, void *argv, u32 argc, struct nvif_vmm *);
+void nvif_vmm_dtor(struct nvif_vmm *);
 int nvif_vmm_get(struct nvif_vmm *, enum nvif_vmm_get, bool sparse,
                 u8 page, u8 align, u64 size, struct nvif_vma *);
 void nvif_vmm_put(struct nvif_vmm *, struct nvif_vma *);
index daa8e4b..3981cb1 100644 (file)
@@ -31,21 +31,21 @@ int gp102_sec2_flcn_enable(struct nvkm_falcon *);
 #define FLCN_ERR(f,fmt,a...) FLCN_PRINTK(error, (f), fmt, ##a)
 
 /**
- * struct nv_falcon_msg - header for all messages
+ * struct nvfw_falcon_msg - header for all messages
  *
  * @unit_id:   id of firmware process that sent the message
  * @size:      total size of message
  * @ctrl_flags:        control flags
  * @seq_id:    used to match a message from its corresponding command
  */
-struct nv_falcon_msg {
+struct nvfw_falcon_msg {
        u8 unit_id;
        u8 size;
        u8 ctrl_flags;
        u8 seq_id;
 };
 
-#define nv_falcon_cmd nv_falcon_msg
+#define nvfw_falcon_cmd nvfw_falcon_msg
 #define NV_FALCON_CMD_UNIT_ID_REWIND                                       0x00
 
 struct nvkm_falcon_qmgr;
@@ -53,7 +53,7 @@ int nvkm_falcon_qmgr_new(struct nvkm_falcon *, struct nvkm_falcon_qmgr **);
 void nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr **);
 
 typedef int
-(*nvkm_falcon_qmgr_callback)(void *priv, struct nv_falcon_msg *);
+(*nvkm_falcon_qmgr_callback)(void *priv, struct nvfw_falcon_msg *);
 
 struct nvkm_falcon_cmdq;
 int nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr *, const char *name,
@@ -62,7 +62,7 @@ void nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq **);
 void nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq *,
                           u32 index, u32 offset, u32 size);
 void nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq *);
-int nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *, struct nv_falcon_cmd *,
+int nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *, struct nvfw_falcon_cmd *,
                          nvkm_falcon_qmgr_callback, void *priv,
                          unsigned long timeout_jiffies);
 
index d14b7fb..85bcb80 100644 (file)
@@ -45,11 +45,8 @@ int nvkm_firmware_load_name(const struct nvkm_subdev *subdev, const char *path,
                }                                                              \
        }                                                                      \
                                                                                \
-       if (_ret) {                                                            \
-               nvkm_error(_s, "failed to load firmware\n");                   \
+       if (_ret)                                                              \
                _fwif = ERR_PTR(_ret);                                         \
-       }                                                                      \
-                                                                              \
        _fwif;                                                                 \
 })
 #endif
index 5d9c3a9..836d8b9 100644 (file)
@@ -39,6 +39,8 @@ struct nvkm_acr {
        struct list_head hsfw, hsf;
        struct list_head lsfw, lsf;
 
+       u64 managed_falcons;
+
        struct nvkm_memory *wpr;
        u64 wpr_start;
        u64 wpr_end;
@@ -107,6 +109,7 @@ struct nvkm_acr_lsf_func {
        void (*bld_write)(struct nvkm_acr *, u32 bld, struct nvkm_acr_lsfw *);
        void (*bld_patch)(struct nvkm_acr *, u32 bld, s64 adjust);
        int (*boot)(struct nvkm_falcon *);
+       u64 bootstrap_falcons;
        int (*bootstrap_falcon)(struct nvkm_falcon *, enum nvkm_acr_lsf_id);
        int (*bootstrap_multiple_falcons)(struct nvkm_falcon *, u32 mask);
 };
index da55308..5ff6d1f 100644 (file)
@@ -47,8 +47,8 @@ int gk110_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
 int gk208_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
 int gk20a_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
 int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gm200_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
 int gm20b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gp100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
 int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
 int gp10b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
 
index a8c21c6..d06dcbe 100644 (file)
@@ -63,8 +63,8 @@ s64 nvkm_timer_wait_test(struct nvkm_timer_wait *);
                dev_WARN(_wait.tmr->subdev.device->dev, "timeout\n");          \
        _taken;                                                                \
 })
-#define nvkm_usec(d,u,cond...) nvkm_nsec((d), (u) * 1000, ##cond)
-#define nvkm_msec(d,m,cond...) nvkm_usec((d), (m) * 1000, ##cond)
+#define nvkm_usec(d, u, cond...) nvkm_nsec((d), (u) * 1000ULL, ##cond)
+#define nvkm_msec(d, m, cond...) nvkm_usec((d), (m) * 1000ULL, ##cond)
 
 #define nvkm_wait_nsec(d,n,addr,mask,data)                                     \
        nvkm_nsec(d, n,                                                        \
index 5b24069..21537ca 100644 (file)
@@ -55,8 +55,8 @@ nouveau_abi16(struct drm_file *file_priv)
                         * device (ie. the one that belongs to the fd it
                         * opened)
                         */
-                       if (nvif_device_init(&cli->base.object, 0, NV_DEVICE,
-                                            &args, sizeof(args),
+                       if (nvif_device_ctor(&cli->base.object, "abi16Device",
+                                            0, NV_DEVICE, &args, sizeof(args),
                                             &abi16->device) == 0)
                                return cli->abi16;
 
@@ -114,7 +114,7 @@ static void
 nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
                        struct nouveau_abi16_ntfy *ntfy)
 {
-       nvif_object_fini(&ntfy->object);
+       nvif_object_dtor(&ntfy->object);
        nvkm_mm_free(&chan->heap, &ntfy->node);
        list_del(&ntfy->head);
        kfree(ntfy);
@@ -167,7 +167,7 @@ nouveau_abi16_fini(struct nouveau_abi16 *abi16)
        }
 
        /* destroy the device object */
-       nvif_device_fini(&abi16->device);
+       nvif_device_dtor(&abi16->device);
 
        kfree(cli->abi16);
        cli->abi16 = NULL;
@@ -502,8 +502,8 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
        list_add(&ntfy->head, &chan->notifiers);
 
        client->route = NVDRM_OBJECT_ABI16;
-       ret = nvif_object_init(&chan->chan->user, init->handle, oclass,
-                              NULL, 0, &ntfy->object);
+       ret = nvif_object_ctor(&chan->chan->user, "abi16EngObj", init->handle,
+                              oclass, NULL, 0, &ntfy->object);
        client->route = NVDRM_OBJECT_NVIF;
 
        if (ret)
@@ -569,7 +569,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
 
        client->route = NVDRM_OBJECT_ABI16;
        client->super = true;
-       ret = nvif_object_init(&chan->chan->user, info->handle,
+       ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle,
                               NV_DMA_IN_MEMORY, &args, sizeof(args),
                               &ntfy->object);
        client->super = false;
index 4ccf937..7806278 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/swiotlb.h>
 
 #include "nouveau_drv.h"
-#include "nouveau_dma.h"
+#include "nouveau_chan.h"
 #include "nouveau_fence.h"
 
 #include "nouveau_bo.h"
@@ -724,360 +724,6 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
        *pl = nvbo->placement;
 }
 
-
-static int
-nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
-{
-       int ret = RING_SPACE(chan, 2);
-       if (ret == 0) {
-               BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
-               OUT_RING  (chan, handle & 0x0000ffff);
-               FIRE_RING (chan);
-       }
-       return ret;
-}
-
-static int
-nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
-                 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
-{
-       struct nouveau_mem *mem = nouveau_mem(old_reg);
-       int ret = RING_SPACE(chan, 10);
-       if (ret == 0) {
-               BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
-               OUT_RING  (chan, upper_32_bits(mem->vma[0].addr));
-               OUT_RING  (chan, lower_32_bits(mem->vma[0].addr));
-               OUT_RING  (chan, upper_32_bits(mem->vma[1].addr));
-               OUT_RING  (chan, lower_32_bits(mem->vma[1].addr));
-               OUT_RING  (chan, PAGE_SIZE);
-               OUT_RING  (chan, PAGE_SIZE);
-               OUT_RING  (chan, PAGE_SIZE);
-               OUT_RING  (chan, new_reg->num_pages);
-               BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
-       }
-       return ret;
-}
-
-static int
-nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
-{
-       int ret = RING_SPACE(chan, 2);
-       if (ret == 0) {
-               BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
-               OUT_RING  (chan, handle);
-       }
-       return ret;
-}
-
-static int
-nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
-                 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
-{
-       struct nouveau_mem *mem = nouveau_mem(old_reg);
-       u64 src_offset = mem->vma[0].addr;
-       u64 dst_offset = mem->vma[1].addr;
-       u32 page_count = new_reg->num_pages;
-       int ret;
-
-       page_count = new_reg->num_pages;
-       while (page_count) {
-               int line_count = (page_count > 8191) ? 8191 : page_count;
-
-               ret = RING_SPACE(chan, 11);
-               if (ret)
-                       return ret;
-
-               BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
-               OUT_RING  (chan, upper_32_bits(src_offset));
-               OUT_RING  (chan, lower_32_bits(src_offset));
-               OUT_RING  (chan, upper_32_bits(dst_offset));
-               OUT_RING  (chan, lower_32_bits(dst_offset));
-               OUT_RING  (chan, PAGE_SIZE);
-               OUT_RING  (chan, PAGE_SIZE);
-               OUT_RING  (chan, PAGE_SIZE);
-               OUT_RING  (chan, line_count);
-               BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
-               OUT_RING  (chan, 0x00000110);
-
-               page_count -= line_count;
-               src_offset += (PAGE_SIZE * line_count);
-               dst_offset += (PAGE_SIZE * line_count);
-       }
-
-       return 0;
-}
-
-static int
-nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
-                 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
-{
-       struct nouveau_mem *mem = nouveau_mem(old_reg);
-       u64 src_offset = mem->vma[0].addr;
-       u64 dst_offset = mem->vma[1].addr;
-       u32 page_count = new_reg->num_pages;
-       int ret;
-
-       page_count = new_reg->num_pages;
-       while (page_count) {
-               int line_count = (page_count > 2047) ? 2047 : page_count;
-
-               ret = RING_SPACE(chan, 12);
-               if (ret)
-                       return ret;
-
-               BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
-               OUT_RING  (chan, upper_32_bits(dst_offset));
-               OUT_RING  (chan, lower_32_bits(dst_offset));
-               BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
-               OUT_RING  (chan, upper_32_bits(src_offset));
-               OUT_RING  (chan, lower_32_bits(src_offset));
-               OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
-               OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
-               OUT_RING  (chan, PAGE_SIZE); /* line_length */
-               OUT_RING  (chan, line_count);
-               BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
-               OUT_RING  (chan, 0x00100110);
-
-               page_count -= line_count;
-               src_offset += (PAGE_SIZE * line_count);
-               dst_offset += (PAGE_SIZE * line_count);
-       }
-
-       return 0;
-}
-
-static int
-nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
-                 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
-{
-       struct nouveau_mem *mem = nouveau_mem(old_reg);
-       u64 src_offset = mem->vma[0].addr;
-       u64 dst_offset = mem->vma[1].addr;
-       u32 page_count = new_reg->num_pages;
-       int ret;
-
-       page_count = new_reg->num_pages;
-       while (page_count) {
-               int line_count = (page_count > 8191) ? 8191 : page_count;
-
-               ret = RING_SPACE(chan, 11);
-               if (ret)
-                       return ret;
-
-               BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
-               OUT_RING  (chan, upper_32_bits(src_offset));
-               OUT_RING  (chan, lower_32_bits(src_offset));
-               OUT_RING  (chan, upper_32_bits(dst_offset));
-               OUT_RING  (chan, lower_32_bits(dst_offset));
-               OUT_RING  (chan, PAGE_SIZE);
-               OUT_RING  (chan, PAGE_SIZE);
-               OUT_RING  (chan, PAGE_SIZE);
-               OUT_RING  (chan, line_count);
-               BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
-               OUT_RING  (chan, 0x00000110);
-
-               page_count -= line_count;
-               src_offset += (PAGE_SIZE * line_count);
-               dst_offset += (PAGE_SIZE * line_count);
-       }
-
-       return 0;
-}
-
-static int
-nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
-                 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
-{
-       struct nouveau_mem *mem = nouveau_mem(old_reg);
-       int ret = RING_SPACE(chan, 7);
-       if (ret == 0) {
-               BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
-               OUT_RING  (chan, upper_32_bits(mem->vma[0].addr));
-               OUT_RING  (chan, lower_32_bits(mem->vma[0].addr));
-               OUT_RING  (chan, upper_32_bits(mem->vma[1].addr));
-               OUT_RING  (chan, lower_32_bits(mem->vma[1].addr));
-               OUT_RING  (chan, 0x00000000 /* COPY */);
-               OUT_RING  (chan, new_reg->num_pages << PAGE_SHIFT);
-       }
-       return ret;
-}
-
-static int
-nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
-                 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
-{
-       struct nouveau_mem *mem = nouveau_mem(old_reg);
-       int ret = RING_SPACE(chan, 7);
-       if (ret == 0) {
-               BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
-               OUT_RING  (chan, new_reg->num_pages << PAGE_SHIFT);
-               OUT_RING  (chan, upper_32_bits(mem->vma[0].addr));
-               OUT_RING  (chan, lower_32_bits(mem->vma[0].addr));
-               OUT_RING  (chan, upper_32_bits(mem->vma[1].addr));
-               OUT_RING  (chan, lower_32_bits(mem->vma[1].addr));
-               OUT_RING  (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
-       }
-       return ret;
-}
-
-static int
-nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
-{
-       int ret = RING_SPACE(chan, 6);
-       if (ret == 0) {
-               BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
-               OUT_RING  (chan, handle);
-               BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
-               OUT_RING  (chan, chan->drm->ntfy.handle);
-               OUT_RING  (chan, chan->vram.handle);
-               OUT_RING  (chan, chan->vram.handle);
-       }
-
-       return ret;
-}
-
-static int
-nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
-                 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
-{
-       struct nouveau_mem *mem = nouveau_mem(old_reg);
-       u64 length = (new_reg->num_pages << PAGE_SHIFT);
-       u64 src_offset = mem->vma[0].addr;
-       u64 dst_offset = mem->vma[1].addr;
-       int src_tiled = !!mem->kind;
-       int dst_tiled = !!nouveau_mem(new_reg)->kind;
-       int ret;
-
-       while (length) {
-               u32 amount, stride, height;
-
-               ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
-               if (ret)
-                       return ret;
-
-               amount  = min(length, (u64)(4 * 1024 * 1024));
-               stride  = 16 * 4;
-               height  = amount / stride;
-
-               if (src_tiled) {
-                       BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
-                       OUT_RING  (chan, 0);
-                       OUT_RING  (chan, 0);
-                       OUT_RING  (chan, stride);
-                       OUT_RING  (chan, height);
-                       OUT_RING  (chan, 1);
-                       OUT_RING  (chan, 0);
-                       OUT_RING  (chan, 0);
-               } else {
-                       BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
-                       OUT_RING  (chan, 1);
-               }
-               if (dst_tiled) {
-                       BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
-                       OUT_RING  (chan, 0);
-                       OUT_RING  (chan, 0);
-                       OUT_RING  (chan, stride);
-                       OUT_RING  (chan, height);
-                       OUT_RING  (chan, 1);
-                       OUT_RING  (chan, 0);
-                       OUT_RING  (chan, 0);
-               } else {
-                       BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
-                       OUT_RING  (chan, 1);
-               }
-
-               BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
-               OUT_RING  (chan, upper_32_bits(src_offset));
-               OUT_RING  (chan, upper_32_bits(dst_offset));
-               BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
-               OUT_RING  (chan, lower_32_bits(src_offset));
-               OUT_RING  (chan, lower_32_bits(dst_offset));
-               OUT_RING  (chan, stride);
-               OUT_RING  (chan, stride);
-               OUT_RING  (chan, stride);
-               OUT_RING  (chan, height);
-               OUT_RING  (chan, 0x00000101);
-               OUT_RING  (chan, 0x00000000);
-               BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
-               OUT_RING  (chan, 0);
-
-               length -= amount;
-               src_offset += amount;
-               dst_offset += amount;
-       }
-
-       return 0;
-}
-
-static int
-nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
-{
-       int ret = RING_SPACE(chan, 4);
-       if (ret == 0) {
-               BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
-               OUT_RING  (chan, handle);
-               BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
-               OUT_RING  (chan, chan->drm->ntfy.handle);
-       }
-
-       return ret;
-}
-
-static inline uint32_t
-nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
-                     struct nouveau_channel *chan, struct ttm_mem_reg *reg)
-{
-       if (reg->mem_type == TTM_PL_TT)
-               return NvDmaTT;
-       return chan->vram.handle;
-}
-
-static int
-nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
-                 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
-{
-       u32 src_offset = old_reg->start << PAGE_SHIFT;
-       u32 dst_offset = new_reg->start << PAGE_SHIFT;
-       u32 page_count = new_reg->num_pages;
-       int ret;
-
-       ret = RING_SPACE(chan, 3);
-       if (ret)
-               return ret;
-
-       BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
-       OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_reg));
-       OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_reg));
-
-       page_count = new_reg->num_pages;
-       while (page_count) {
-               int line_count = (page_count > 2047) ? 2047 : page_count;
-
-               ret = RING_SPACE(chan, 11);
-               if (ret)
-                       return ret;
-
-               BEGIN_NV04(chan, NvSubCopy,
-                                NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
-               OUT_RING  (chan, src_offset);
-               OUT_RING  (chan, dst_offset);
-               OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
-               OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
-               OUT_RING  (chan, PAGE_SIZE); /* line_length */
-               OUT_RING  (chan, line_count);
-               OUT_RING  (chan, 0x00000101);
-               OUT_RING  (chan, 0x00000000);
-               BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
-               OUT_RING  (chan, 0);
-
-               page_count -= line_count;
-               src_offset += (PAGE_SIZE * line_count);
-               dst_offset += (PAGE_SIZE * line_count);
-       }
-
-       return 0;
-}
-
 static int
 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
                     struct ttm_mem_reg *reg)
@@ -1181,7 +827,6 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
                {  "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
                {  "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
                {},
-               { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
        };
        const struct _method_table *mthd = _methods;
        const char *name = "CPU";
@@ -1197,14 +842,14 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
                if (chan == NULL)
                        continue;
 
-               ret = nvif_object_init(&chan->user,
+               ret = nvif_object_ctor(&chan->user, "ttmBoMove",
                                       mthd->oclass | (mthd->engine << 16),
                                       mthd->oclass, NULL, 0,
                                       &drm->ttm.copy);
                if (ret == 0) {
                        ret = mthd->init(chan, drm->ttm.copy.handle);
                        if (ret) {
-                               nvif_object_fini(&drm->ttm.copy);
+                               nvif_object_dtor(&drm->ttm.copy);
                                continue;
                        }
 
@@ -1461,7 +1106,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
                if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind)
                        /* untiled */
                        break;
-               /* fall through - tiled memory */
+               fallthrough;    /* tiled memory */
        case TTM_PL_VRAM:
                reg->bus.offset = reg->start << PAGE_SHIFT;
                reg->bus.base = device->func->resource_addr(device, 1);
index e944b4a..52489ce 100644 (file)
@@ -1,12 +1,13 @@
 /* SPDX-License-Identifier: MIT */
 #ifndef __NOUVEAU_BO_H__
 #define __NOUVEAU_BO_H__
-
+#include <drm/ttm/ttm_bo_driver.h>
 #include <drm/drm_gem.h>
 
 struct nouveau_channel;
+struct nouveau_cli;
+struct nouveau_drm;
 struct nouveau_fence;
-struct nvkm_vma;
 
 struct nouveau_bo {
        struct ttm_buffer_object bo;
@@ -135,4 +136,42 @@ nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 flags,
        }
        return ret;
 }
+
+int nv04_bo_move_init(struct nouveau_channel *, u32);
+int nv04_bo_move_m2mf(struct nouveau_channel *, struct ttm_buffer_object *,
+                     struct ttm_mem_reg *, struct ttm_mem_reg *);
+
+int nv50_bo_move_init(struct nouveau_channel *, u32);
+int nv50_bo_move_m2mf(struct nouveau_channel *, struct ttm_buffer_object *,
+                     struct ttm_mem_reg *, struct ttm_mem_reg *);
+
+int nv84_bo_move_exec(struct nouveau_channel *, struct ttm_buffer_object *,
+                     struct ttm_mem_reg *, struct ttm_mem_reg *);
+
+int nva3_bo_move_copy(struct nouveau_channel *, struct ttm_buffer_object *,
+                     struct ttm_mem_reg *, struct ttm_mem_reg *);
+
+int nvc0_bo_move_init(struct nouveau_channel *, u32);
+int nvc0_bo_move_m2mf(struct nouveau_channel *, struct ttm_buffer_object *,
+                     struct ttm_mem_reg *, struct ttm_mem_reg *);
+
+int nvc0_bo_move_copy(struct nouveau_channel *, struct ttm_buffer_object *,
+                     struct ttm_mem_reg *, struct ttm_mem_reg *);
+
+int nve0_bo_move_init(struct nouveau_channel *, u32);
+int nve0_bo_move_copy(struct nouveau_channel *, struct ttm_buffer_object *,
+                     struct ttm_mem_reg *, struct ttm_mem_reg *);
+
+#define NVBO_WR32_(b,o,dr,f) nouveau_bo_wr32((b), (o)/4 + (dr), (f))
+#define NVBO_RD32_(b,o,dr)   nouveau_bo_rd32((b), (o)/4 + (dr))
+#define NVBO_RD32(A...) DRF_RD(NVBO_RD32_,                  ##A)
+#define NVBO_RV32(A...) DRF_RV(NVBO_RD32_,                  ##A)
+#define NVBO_TV32(A...) DRF_TV(NVBO_RD32_,                  ##A)
+#define NVBO_TD32(A...) DRF_TD(NVBO_RD32_,                  ##A)
+#define NVBO_WR32(A...) DRF_WR(            NVBO_WR32_,      ##A)
+#define NVBO_WV32(A...) DRF_WV(            NVBO_WR32_,      ##A)
+#define NVBO_WD32(A...) DRF_WD(            NVBO_WR32_,      ##A)
+#define NVBO_MR32(A...) DRF_MR(NVBO_RD32_, NVBO_WR32_, u32, ##A)
+#define NVBO_MV32(A...) DRF_MV(NVBO_RD32_, NVBO_WR32_, u32, ##A)
+#define NVBO_MD32(A...) DRF_MD(NVBO_RD32_, NVBO_WR32_, u32, ##A)
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo0039.c b/drivers/gpu/drm/nouveau/nouveau_bo0039.c
new file mode 100644 (file)
index 0000000..bf7ae2c
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2007 Dave Airlied
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * Authors: Dave Airlied <airlied@linux.ie>
+ *         Ben Skeggs   <darktama@iinet.net.au>
+ *         Jeremy Kolb  <jkolb@brandeis.edu>
+ */
+#include "nouveau_bo.h"
+#include "nouveau_dma.h"
+#include "nouveau_drv.h"
+
+#include <nvif/push006c.h>
+
+#include <nvhw/class/cl0039.h>
+
+static inline uint32_t
+nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
+                     struct nouveau_channel *chan, struct ttm_mem_reg *reg)
+{
+       if (reg->mem_type == TTM_PL_TT)
+               return NvDmaTT;
+       return chan->vram.handle;
+}
+
+int
+nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+                 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+{
+       struct nvif_push *push = chan->chan.push;
+       u32 src_ctxdma = nouveau_bo_mem_ctxdma(bo, chan, old_reg);
+       u32 src_offset = old_reg->start << PAGE_SHIFT;
+       u32 dst_ctxdma = nouveau_bo_mem_ctxdma(bo, chan, new_reg);
+       u32 dst_offset = new_reg->start << PAGE_SHIFT;
+       u32 page_count = new_reg->num_pages;
+       int ret;
+
+       ret = PUSH_WAIT(push, 3);
+       if (ret)
+               return ret;
+
+       PUSH_MTHD(push, NV039, SET_CONTEXT_DMA_BUFFER_IN, src_ctxdma,
+                              SET_CONTEXT_DMA_BUFFER_OUT, dst_ctxdma);
+
+       page_count = new_reg->num_pages;
+       while (page_count) {
+               int line_count = (page_count > 2047) ? 2047 : page_count;
+
+               ret = PUSH_WAIT(push, 11);
+               if (ret)
+                       return ret;
+
+               PUSH_MTHD(push, NV039, OFFSET_IN, src_offset,
+                                      OFFSET_OUT, dst_offset,
+                                      PITCH_IN, PAGE_SIZE,
+                                      PITCH_OUT, PAGE_SIZE,
+                                      LINE_LENGTH_IN, PAGE_SIZE,
+                                      LINE_COUNT, line_count,
+
+                                      FORMAT,
+                         NVVAL(NV039, FORMAT, IN, 1) |
+                         NVVAL(NV039, FORMAT, OUT, 1),
+
+                                      BUFFER_NOTIFY, NV039_BUFFER_NOTIFY_WRITE_ONLY);
+
+               PUSH_MTHD(push, NV039, NO_OPERATION, 0x00000000);
+
+               page_count -= line_count;
+               src_offset += (PAGE_SIZE * line_count);
+               dst_offset += (PAGE_SIZE * line_count);
+       }
+
+       return 0;
+}
+
+int
+nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
+{
+       struct nvif_push *push = chan->chan.push;
+       int ret;
+
+       ret = PUSH_WAIT(push, 4);
+       if (ret)
+               return ret;
+
+       PUSH_MTHD(push, NV039, SET_OBJECT, handle);
+       PUSH_MTHD(push, NV039, SET_CONTEXT_DMA_NOTIFIES, chan->drm->ntfy.handle);
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo5039.c b/drivers/gpu/drm/nouveau/nouveau_bo5039.c
new file mode 100644 (file)
index 0000000..f9b9b85
--- /dev/null
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2007 Dave Airlied
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * Authors: Dave Airlied <airlied@linux.ie>
+ *         Ben Skeggs   <darktama@iinet.net.au>
+ *         Jeremy Kolb  <jkolb@brandeis.edu>
+ */
+#include "nouveau_bo.h"
+#include "nouveau_dma.h"
+#include "nouveau_drv.h"
+#include "nouveau_mem.h"
+
+#include <nvif/push206e.h>
+
+#include <nvhw/class/cl5039.h>
+
+int
+nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+                 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+{
+       struct nouveau_mem *mem = nouveau_mem(old_reg);
+       struct nvif_push *push = chan->chan.push;
+       u64 length = (new_reg->num_pages << PAGE_SHIFT);
+       u64 src_offset = mem->vma[0].addr;
+       u64 dst_offset = mem->vma[1].addr;
+       int src_tiled = !!mem->kind;
+       int dst_tiled = !!nouveau_mem(new_reg)->kind;
+       int ret;
+
+       while (length) {
+               u32 amount, stride, height;
+
+               ret = PUSH_WAIT(push, 18 + 6 * (src_tiled + dst_tiled));
+               if (ret)
+                       return ret;
+
+               amount  = min(length, (u64)(4 * 1024 * 1024));
+               stride  = 16 * 4;
+               height  = amount / stride;
+
+               if (src_tiled) {
+                       PUSH_MTHD(push, NV5039, SET_SRC_MEMORY_LAYOUT,
+                                 NVDEF(NV5039, SET_SRC_MEMORY_LAYOUT, V, BLOCKLINEAR),
+
+                                               SET_SRC_BLOCK_SIZE,
+                                 NVDEF(NV5039, SET_SRC_BLOCK_SIZE, WIDTH, ONE_GOB) |
+                                 NVDEF(NV5039, SET_SRC_BLOCK_SIZE, HEIGHT, ONE_GOB) |
+                                 NVDEF(NV5039, SET_SRC_BLOCK_SIZE, DEPTH, ONE_GOB),
+
+                                               SET_SRC_WIDTH, stride,
+                                               SET_SRC_HEIGHT, height,
+                                               SET_SRC_DEPTH, 1,
+                                               SET_SRC_LAYER, 0,
+
+                                               SET_SRC_ORIGIN,
+                                 NVVAL(NV5039, SET_SRC_ORIGIN, X, 0) |
+                                 NVVAL(NV5039, SET_SRC_ORIGIN, Y, 0));
+               } else {
+                       PUSH_MTHD(push, NV5039, SET_SRC_MEMORY_LAYOUT,
+                                 NVDEF(NV5039, SET_SRC_MEMORY_LAYOUT, V, PITCH));
+               }
+
+               if (dst_tiled) {
+                       PUSH_MTHD(push, NV5039, SET_DST_MEMORY_LAYOUT,
+                                 NVDEF(NV5039, SET_DST_MEMORY_LAYOUT, V, BLOCKLINEAR),
+
+                                               SET_DST_BLOCK_SIZE,
+                                 NVDEF(NV5039, SET_DST_BLOCK_SIZE, WIDTH, ONE_GOB) |
+                                 NVDEF(NV5039, SET_DST_BLOCK_SIZE, HEIGHT, ONE_GOB) |
+                                 NVDEF(NV5039, SET_DST_BLOCK_SIZE, DEPTH, ONE_GOB),
+
+                                               SET_DST_WIDTH, stride,
+                                               SET_DST_HEIGHT, height,
+                                               SET_DST_DEPTH, 1,
+                                               SET_DST_LAYER, 0,
+
+                                               SET_DST_ORIGIN,
+                                 NVVAL(NV5039, SET_DST_ORIGIN, X, 0) |
+                                 NVVAL(NV5039, SET_DST_ORIGIN, Y, 0));
+               } else {
+                       PUSH_MTHD(push, NV5039, SET_DST_MEMORY_LAYOUT,
+                                 NVDEF(NV5039, SET_DST_MEMORY_LAYOUT, V, PITCH));
+               }
+
+               PUSH_MTHD(push, NV5039, OFFSET_IN_UPPER,
+                         NVVAL(NV5039, OFFSET_IN_UPPER, VALUE, upper_32_bits(src_offset)),
+
+                                       OFFSET_OUT_UPPER,
+                         NVVAL(NV5039, OFFSET_OUT_UPPER, VALUE, upper_32_bits(dst_offset)));
+
+               PUSH_MTHD(push, NV5039, OFFSET_IN, lower_32_bits(src_offset),
+                                       OFFSET_OUT, lower_32_bits(dst_offset),
+                                       PITCH_IN, stride,
+                                       PITCH_OUT, stride,
+                                       LINE_LENGTH_IN, stride,
+                                       LINE_COUNT, height,
+
+                                       FORMAT,
+                         NVDEF(NV5039, FORMAT, IN, ONE) |
+                         NVDEF(NV5039, FORMAT, OUT, ONE),
+
+                                       BUFFER_NOTIFY,
+                         NVDEF(NV5039, BUFFER_NOTIFY, TYPE, WRITE_ONLY));
+
+               PUSH_MTHD(push, NV5039, NO_OPERATION, 0x00000000);
+
+               length -= amount;
+               src_offset += amount;
+               dst_offset += amount;
+       }
+
+       return 0;
+}
+
+int
+nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
+{
+       struct nvif_push *push = chan->chan.push;
+       int ret;
+
+       ret = PUSH_WAIT(push, 6);
+       if (ret)
+               return ret;
+
+       PUSH_MTHD(push, NV5039, SET_OBJECT, handle);
+       PUSH_MTHD(push, NV5039, SET_CONTEXT_DMA_NOTIFY, chan->drm->ntfy.handle,
+                               SET_CONTEXT_DMA_BUFFER_IN, chan->vram.handle,
+                               SET_CONTEXT_DMA_BUFFER_OUT, chan->vram.handle);
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo74c1.c b/drivers/gpu/drm/nouveau/nouveau_bo74c1.c
new file mode 100644 (file)
index 0000000..1b5fd78
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2007 Dave Airlied
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * Authors: Dave Airlied <airlied@linux.ie>
+ *         Ben Skeggs   <darktama@iinet.net.au>
+ *         Jeremy Kolb  <jkolb@brandeis.edu>
+ */
+#include "nouveau_bo.h"
+#include "nouveau_dma.h"
+#include "nouveau_mem.h"
+
+#include <nvif/push206e.h>
+
+int
+nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+                 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+{
+       struct nouveau_mem *mem = nouveau_mem(old_reg);
+       struct nvif_push *push = chan->chan.push;
+       int ret;
+
+       ret = PUSH_WAIT(push, 7);
+       if (ret)
+               return ret;
+
+       PUSH_NVSQ(push, NV74C1, 0x0304, new_reg->num_pages << PAGE_SHIFT,
+                               0x0308, upper_32_bits(mem->vma[0].addr),
+                               0x030c, lower_32_bits(mem->vma[0].addr),
+                               0x0310, upper_32_bits(mem->vma[1].addr),
+                               0x0314, lower_32_bits(mem->vma[1].addr),
+                               0x0318, 0x00000000 /* MODE_COPY, QUERY_NONE */);
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo85b5.c b/drivers/gpu/drm/nouveau/nouveau_bo85b5.c
new file mode 100644 (file)
index 0000000..f0df172
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2007 Dave Airlied
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * Authors: Dave Airlied <airlied@linux.ie>
+ *         Ben Skeggs   <darktama@iinet.net.au>
+ *         Jeremy Kolb  <jkolb@brandeis.edu>
+ */
+#include "nouveau_bo.h"
+#include "nouveau_dma.h"
+#include "nouveau_mem.h"
+
+#include <nvif/push206e.h>
+
+/*XXX: Fixup class to be compatible with NVIDIA's, which will allow sharing
+ *     code with KeplerDmaCopyA.
+ */
+
+int
+nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+                 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+{
+       struct nouveau_mem *mem = nouveau_mem(old_reg);
+       struct nvif_push *push = chan->chan.push;
+       u64 src_offset = mem->vma[0].addr;
+       u64 dst_offset = mem->vma[1].addr;
+       u32 page_count = new_reg->num_pages;
+       int ret;
+
+       page_count = new_reg->num_pages;
+       while (page_count) {
+               int line_count = (page_count > 8191) ? 8191 : page_count;
+
+               ret = PUSH_WAIT(push, 11);
+               if (ret)
+                       return ret;
+
+               PUSH_NVSQ(push, NV85B5, 0x030c, upper_32_bits(src_offset),
+                                       0x0310, lower_32_bits(src_offset),
+                                       0x0314, upper_32_bits(dst_offset),
+                                       0x0318, lower_32_bits(dst_offset),
+                                       0x031c, PAGE_SIZE,
+                                       0x0320, PAGE_SIZE,
+                                       0x0324, PAGE_SIZE,
+                                       0x0328, line_count);
+               PUSH_NVSQ(push, NV85B5, 0x0300, 0x00000110);
+
+               page_count -= line_count;
+               src_offset += (PAGE_SIZE * line_count);
+               dst_offset += (PAGE_SIZE * line_count);
+       }
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo9039.c b/drivers/gpu/drm/nouveau/nouveau_bo9039.c
new file mode 100644 (file)
index 0000000..52fefb3
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2007 Dave Airlied
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * Authors: Dave Airlied <airlied@linux.ie>
+ *         Ben Skeggs   <darktama@iinet.net.au>
+ *         Jeremy Kolb  <jkolb@brandeis.edu>
+ */
+#include "nouveau_bo.h"
+#include "nouveau_dma.h"
+#include "nouveau_mem.h"
+
+#include <nvif/push906f.h>
+
+#include <nvhw/class/cl9039.h>
+
+int
+nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+                 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+{
+       struct nvif_push *push = chan->chan.push;
+       struct nouveau_mem *mem = nouveau_mem(old_reg);
+       u64 src_offset = mem->vma[0].addr;
+       u64 dst_offset = mem->vma[1].addr;
+       u32 page_count = new_reg->num_pages;
+       int ret;
+
+       page_count = new_reg->num_pages;
+       while (page_count) {
+               int line_count = (page_count > 2047) ? 2047 : page_count;
+
+               ret = PUSH_WAIT(push, 12);
+               if (ret)
+                       return ret;
+
+               PUSH_MTHD(push, NV9039, OFFSET_OUT_UPPER,
+                         NVVAL(NV9039, OFFSET_OUT_UPPER, VALUE, upper_32_bits(dst_offset)),
+
+                                       OFFSET_OUT, lower_32_bits(dst_offset));
+
+               PUSH_MTHD(push, NV9039, OFFSET_IN_UPPER,
+                         NVVAL(NV9039, OFFSET_IN_UPPER, VALUE, upper_32_bits(src_offset)),
+
+                                       OFFSET_IN, lower_32_bits(src_offset),
+                                       PITCH_IN, PAGE_SIZE,
+                                       PITCH_OUT, PAGE_SIZE,
+                                       LINE_LENGTH_IN, PAGE_SIZE,
+                                       LINE_COUNT, line_count);
+
+               PUSH_MTHD(push, NV9039, LAUNCH_DMA,
+                         NVDEF(NV9039, LAUNCH_DMA, SRC_INLINE, FALSE) |
+                         NVDEF(NV9039, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
+                         NVDEF(NV9039, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
+                         NVDEF(NV9039, LAUNCH_DMA, COMPLETION_TYPE, FLUSH_DISABLE) |
+                         NVDEF(NV9039, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
+                         NVDEF(NV9039, LAUNCH_DMA, SEMAPHORE_STRUCT_SIZE, ONE_WORD));
+
+               page_count -= line_count;
+               src_offset += (PAGE_SIZE * line_count);
+               dst_offset += (PAGE_SIZE * line_count);
+       }
+
+       return 0;
+}
+
+int
+nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
+{
+       struct nvif_push *push = chan->chan.push;
+       int ret;
+
+       ret = PUSH_WAIT(push, 2);
+       if (ret)
+               return ret;
+
+       PUSH_MTHD(push, NV9039, SET_OBJECT, handle);
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo90b5.c b/drivers/gpu/drm/nouveau/nouveau_bo90b5.c
new file mode 100644 (file)
index 0000000..34b79d5
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2020 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nouveau_bo.h"
+#include "nouveau_dma.h"
+#include "nouveau_mem.h"
+
+#include <nvif/push906f.h>
+
+/*XXX: Fixup class to be compatible with NVIDIA's, which will allow sharing
+ *     code with KeplerDmaCopyA.
+ */
+
+int
+nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+                 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+{
+       struct nouveau_mem *mem = nouveau_mem(old_reg);
+       struct nvif_push *push = chan->chan.push;
+       u64 src_offset = mem->vma[0].addr;
+       u64 dst_offset = mem->vma[1].addr;
+       u32 page_count = new_reg->num_pages;
+       int ret;
+
+       page_count = new_reg->num_pages;
+       while (page_count) {
+               int line_count = (page_count > 8191) ? 8191 : page_count;
+
+               ret = PUSH_WAIT(push, 10);
+               if (ret)
+                       return ret;
+
+               PUSH_NVSQ(push, NV90B5, 0x030c, upper_32_bits(src_offset),
+                                       0x0310, lower_32_bits(src_offset),
+                                       0x0314, upper_32_bits(dst_offset),
+                                       0x0318, lower_32_bits(dst_offset),
+                                       0x031c, PAGE_SIZE,
+                                       0x0320, PAGE_SIZE,
+                                       0x0324, PAGE_SIZE,
+                                       0x0328, line_count);
+               PUSH_NVIM(push, NV90B5, 0x0300, 0x0110);
+
+               page_count -= line_count;
+               src_offset += (PAGE_SIZE * line_count);
+               dst_offset += (PAGE_SIZE * line_count);
+       }
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_boa0b5.c b/drivers/gpu/drm/nouveau/nouveau_boa0b5.c
new file mode 100644 (file)
index 0000000..394e290
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2007 Dave Airlied
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * Authors: Dave Airlied <airlied@linux.ie>
+ *         Ben Skeggs   <darktama@iinet.net.au>
+ *         Jeremy Kolb  <jkolb@brandeis.edu>
+ */
+#include "nouveau_bo.h"
+#include "nouveau_dma.h"
+#include "nouveau_mem.h"
+
+#include <nvif/push906f.h>
+
+#include <nvhw/class/cla0b5.h>
+
+int
+nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+                 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+{
+       struct nouveau_mem *mem = nouveau_mem(old_reg);
+       struct nvif_push *push = chan->chan.push;
+       int ret;
+
+       ret = PUSH_WAIT(push, 10);
+       if (ret)
+               return ret;
+
+       PUSH_MTHD(push, NVA0B5, OFFSET_IN_UPPER,
+                 NVVAL(NVA0B5, OFFSET_IN_UPPER, UPPER, upper_32_bits(mem->vma[0].addr)),
+
+                               OFFSET_IN_LOWER, lower_32_bits(mem->vma[0].addr),
+
+                               OFFSET_OUT_UPPER,
+                 NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(mem->vma[1].addr)),
+
+                               OFFSET_OUT_LOWER, lower_32_bits(mem->vma[1].addr),
+                               PITCH_IN, PAGE_SIZE,
+                               PITCH_OUT, PAGE_SIZE,
+                               LINE_LENGTH_IN, PAGE_SIZE,
+                               LINE_COUNT, new_reg->num_pages);
+
+       PUSH_IMMD(push, NVA0B5, LAUNCH_DMA,
+                 NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, TRUE) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, FALSE) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, SRC_TYPE, VIRTUAL) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, VIRTUAL));
+       return 0;
+}
+
+int
+nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
+{
+       struct nvif_push *push = chan->chan.push;
+       int ret;
+
+       ret = PUSH_WAIT(push, 2);
+       if (ret)
+               return ret;
+
+       PUSH_NVSQ(push, NVA0B5, 0x0000, handle & 0x0000ffff);
+       return 0;
+}
index 3d71dfc..b80e4eb 100644 (file)
@@ -21,8 +21,8 @@
  *
  * Authors: Ben Skeggs
  */
+#include <nvif/push006c.h>
 
-#include <nvif/os.h>
 #include <nvif/class.h>
 #include <nvif/cl0002.h>
 #include <nvif/cl006b.h>
@@ -32,9 +32,6 @@
 #include <nvif/clc36f.h>
 #include <nvif/ioctl.h>
 
-/*XXX*/
-#include <core/client.h>
-
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_bo.h"
@@ -102,12 +99,12 @@ nouveau_channel_del(struct nouveau_channel **pchan)
                if (cli)
                        nouveau_svmm_part(chan->vmm->svmm, chan->inst);
 
-               nvif_object_fini(&chan->nvsw);
-               nvif_object_fini(&chan->gart);
-               nvif_object_fini(&chan->vram);
-               nvif_notify_fini(&chan->kill);
-               nvif_object_fini(&chan->user);
-               nvif_object_fini(&chan->push.ctxdma);
+               nvif_object_dtor(&chan->nvsw);
+               nvif_object_dtor(&chan->gart);
+               nvif_object_dtor(&chan->vram);
+               nvif_notify_dtor(&chan->kill);
+               nvif_object_dtor(&chan->user);
+               nvif_object_dtor(&chan->push.ctxdma);
                nouveau_vma_del(&chan->push.vma);
                nouveau_bo_unmap(chan->push.buffer);
                if (chan->push.buffer && chan->push.buffer->pin_refcnt)
@@ -121,6 +118,31 @@ nouveau_channel_del(struct nouveau_channel **pchan)
        *pchan = NULL;
 }
 
+static void
+nouveau_channel_kick(struct nvif_push *push)
+{
+       struct nouveau_channel *chan = container_of(push, typeof(*chan), chan._push);
+       chan->dma.cur = chan->dma.cur + (chan->chan._push.cur - chan->chan._push.bgn);
+       FIRE_RING(chan);
+       chan->chan._push.bgn = chan->chan._push.cur;
+}
+
+static int
+nouveau_channel_wait(struct nvif_push *push, u32 size)
+{
+       struct nouveau_channel *chan = container_of(push, typeof(*chan), chan._push);
+       int ret;
+       chan->dma.cur = chan->dma.cur + (chan->chan._push.cur - chan->chan._push.bgn);
+       ret = RING_SPACE(chan, size);
+       if (ret == 0) {
+               chan->chan._push.bgn = chan->chan._push.mem.object.map.ptr;
+               chan->chan._push.bgn = chan->chan._push.bgn + chan->dma.cur;
+               chan->chan._push.cur = chan->chan._push.bgn;
+               chan->chan._push.end = chan->chan._push.bgn + size;
+       }
+       return ret;
+}
+
 static int
 nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
                     u32 size, struct nouveau_channel **pchan)
@@ -158,6 +180,14 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
                return ret;
        }
 
+       chan->chan._push.mem.object.parent = cli->base.object.parent;
+       chan->chan._push.mem.object.client = &cli->base;
+       chan->chan._push.mem.object.name = "chanPush";
+       chan->chan._push.mem.object.map.ptr = chan->push.buffer->kmap.virtual;
+       chan->chan._push.wait = nouveau_channel_wait;
+       chan->chan._push.kick = nouveau_channel_kick;
+       chan->chan.push = &chan->chan._push;
+
        /* create dma object covering the *entire* memory space that the
         * pushbuf lives in, this is because the GEM code requires that
         * we be able to call out to other (indirect) push buffers
@@ -214,8 +244,9 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
                }
        }
 
-       ret = nvif_object_init(&device->object, 0, NV_DMA_FROM_MEMORY,
-                              &args, sizeof(args), &chan->push.ctxdma);
+       ret = nvif_object_ctor(&device->object, "abi16PushCtxDma", 0,
+                              NV_DMA_FROM_MEMORY, &args, sizeof(args),
+                              &chan->push.ctxdma);
        if (ret) {
                nouveau_channel_del(pchan);
                return ret;
@@ -290,8 +321,8 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
                        size = sizeof(args.nv50);
                }
 
-               ret = nvif_object_init(&device->object, 0, *oclass++,
-                                      &args, size, &chan->user);
+               ret = nvif_object_ctor(&device->object, "abi16ChanUser", 0,
+                                      *oclass++, &args, size, &chan->user);
                if (ret == 0) {
                        if (chan->user.oclass >= VOLTA_CHANNEL_GPFIFO_A) {
                                chan->chid = args.volta.chid;
@@ -341,8 +372,9 @@ nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device,
        args.offset = chan->push.addr;
 
        do {
-               ret = nvif_object_init(&device->object, 0, *oclass++,
-                                      &args, sizeof(args), &chan->user);
+               ret = nvif_object_ctor(&device->object, "abi16ChanUser", 0,
+                                      *oclass++, &args, sizeof(args),
+                                      &chan->user);
                if (ret == 0) {
                        chan->chid = args.chid;
                        return ret;
@@ -364,7 +396,8 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
        nvif_object_map(&chan->user, NULL, 0);
 
        if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
-               ret = nvif_notify_init(&chan->user, nouveau_channel_killed,
+               ret = nvif_notify_ctor(&chan->user, "abi16ChanKilled",
+                                      nouveau_channel_killed,
                                       true, NV906F_V0_NTFY_KILLED,
                                       NULL, 0, 0, &chan->kill);
                if (ret == 0)
@@ -390,8 +423,9 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
                        args.limit = device->info.ram_user - 1;
                }
 
-               ret = nvif_object_init(&chan->user, vram, NV_DMA_IN_MEMORY,
-                                      &args, sizeof(args), &chan->vram);
+               ret = nvif_object_ctor(&chan->user, "abi16ChanVramCtxDma", vram,
+                                      NV_DMA_IN_MEMORY, &args, sizeof(args),
+                                      &chan->vram);
                if (ret)
                        return ret;
 
@@ -414,8 +448,9 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
                        args.limit = chan->vmm->vmm.limit - 1;
                }
 
-               ret = nvif_object_init(&chan->user, gart, NV_DMA_IN_MEMORY,
-                                      &args, sizeof(args), &chan->gart);
+               ret = nvif_object_ctor(&chan->user, "abi16ChanGartCtxDma", gart,
+                                      NV_DMA_IN_MEMORY, &args, sizeof(args),
+                                      &chan->gart);
                if (ret)
                        return ret;
        }
@@ -444,28 +479,27 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
        chan->dma.cur = chan->dma.put;
        chan->dma.free = chan->dma.max - chan->dma.cur;
 
-       ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
+       ret = PUSH_WAIT(chan->chan.push, NOUVEAU_DMA_SKIPS);
        if (ret)
                return ret;
 
        for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
-               OUT_RING(chan, 0x00000000);
+               PUSH_DATA(chan->chan.push, 0x00000000);
 
        /* allocate software object class (used for fences on <= nv05) */
        if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
-               ret = nvif_object_init(&chan->user, 0x006e,
+               ret = nvif_object_ctor(&chan->user, "abi16NvswFence", 0x006e,
                                       NVIF_CLASS_SW_NV04,
                                       NULL, 0, &chan->nvsw);
                if (ret)
                        return ret;
 
-               ret = RING_SPACE(chan, 2);
+               ret = PUSH_WAIT(chan->chan.push, 2);
                if (ret)
                        return ret;
 
-               BEGIN_NV04(chan, NvSubSw, 0x0000, 1);
-               OUT_RING  (chan, chan->nvsw.handle);
-               FIRE_RING (chan);
+               PUSH_NVSQ(chan->chan.push, NV_SW, 0x0000, chan->nvsw.handle);
+               PUSH_KICK(chan->chan.push);
        }
 
        /* initialise synchronisation */
index 9307357..98ba9d2 100644 (file)
@@ -3,9 +3,15 @@
 #define __NOUVEAU_CHAN_H__
 #include <nvif/object.h>
 #include <nvif/notify.h>
+#include <nvif/push.h>
 struct nvif_device;
 
 struct nouveau_channel {
+       struct {
+               struct nvif_push _push;
+               struct nvif_push *push;
+       } chan;
+
        struct nvif_device *device;
        struct nouveau_drm *drm;
        struct nouveau_vmm *vmm;
index ab2c2b2..7674025 100644 (file)
@@ -330,7 +330,7 @@ nouveau_conn_attach_properties(struct drm_connector *connector)
        case DRM_MODE_CONNECTOR_VGA:
                if (disp->disp.object.oclass < NV50_DISP)
                        break; /* Can only scale on DFPs. */
-               /* Fall-through. */
+               fallthrough;
        default:
                drm_object_attach_property(&connector->base, dev->mode_config.
                                           scaling_mode_property,
@@ -409,7 +409,7 @@ static void
 nouveau_connector_destroy(struct drm_connector *connector)
 {
        struct nouveau_connector *nv_connector = nouveau_connector(connector);
-       nvif_notify_fini(&nv_connector->hpd);
+       nvif_notify_dtor(&nv_connector->hpd);
        kfree(nv_connector->edid);
        drm_connector_unregister(connector);
        drm_connector_cleanup(connector);
@@ -445,7 +445,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
                case DCB_OUTPUT_LVDS:
                        switcheroo_ddc = !!(vga_switcheroo_handler_flags() &
                                            VGA_SWITCHEROO_CAN_SWITCH_DDC);
-               /* fall-through */
+                       fallthrough;
                default:
                        if (!nv_encoder->i2c)
                                break;
@@ -571,8 +571,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
                pm_runtime_get_noresume(dev->dev);
        } else {
                ret = pm_runtime_get_sync(dev->dev);
-               if (ret < 0 && ret != -EACCES)
+               if (ret < 0 && ret != -EACCES) {
+                       pm_runtime_put_autosuspend(dev->dev);
                        return conn_status;
+               }
        }
 
        nv_encoder = nouveau_connector_ddc_detect(connector);
@@ -1448,7 +1450,8 @@ nouveau_connector_create(struct drm_device *dev,
                break;
        }
 
-       ret = nvif_notify_init(&disp->disp.object, nouveau_connector_hotplug,
+       ret = nvif_notify_ctor(&disp->disp.object, "kmsHotplug",
+                              nouveau_connector_hotplug,
                               true, NV04_DISP_NTFY_CONN,
                               &(struct nvif_notify_conn_req_v0) {
                                .mask = NVIF_NOTIFY_CONN_V0_ANY,
index 9e062c7..d6de5cb 100644 (file)
 
 #include <nvif/notify.h>
 
+#include <nvhw/class/cl507d.h>
+#include <nvhw/class/cl907d.h>
+#include <nvhw/drf.h>
+
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_encoder.h>
@@ -56,16 +60,23 @@ struct nouveau_conn_atom {
                 * hw values, and the code relies on this.
                 */
                enum {
-                       DITHERING_MODE_OFF = 0x00,
-                       DITHERING_MODE_ON = 0x01,
-                       DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
-                       DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
-                       DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
+                       DITHERING_MODE_OFF =
+                               NVDEF(NV507D, HEAD_SET_DITHER_CONTROL, ENABLE, DISABLE),
+                       DITHERING_MODE_ON =
+                               NVDEF(NV507D, HEAD_SET_DITHER_CONTROL, ENABLE, ENABLE),
+                       DITHERING_MODE_DYNAMIC2X2 = DITHERING_MODE_ON |
+                               NVDEF(NV507D, HEAD_SET_DITHER_CONTROL, MODE, DYNAMIC_2X2),
+                       DITHERING_MODE_STATIC2X2 = DITHERING_MODE_ON |
+                               NVDEF(NV507D, HEAD_SET_DITHER_CONTROL, MODE, STATIC_2X2),
+                       DITHERING_MODE_TEMPORAL = DITHERING_MODE_ON |
+                               NVDEF(NV907D, HEAD_SET_DITHER_CONTROL, MODE, TEMPORAL),
                        DITHERING_MODE_AUTO
                } mode;
                enum {
-                       DITHERING_DEPTH_6BPC = 0x00,
-                       DITHERING_DEPTH_8BPC = 0x02,
+                       DITHERING_DEPTH_6BPC =
+                               NVDEF(NV507D, HEAD_SET_DITHER_CONTROL, BITS, DITHER_TO_6_BITS),
+                       DITHERING_DEPTH_8BPC =
+                               NVDEF(NV507D, HEAD_SET_DITHER_CONTROL, BITS, DITHER_TO_8_BITS),
                        DITHERING_DEPTH_AUTO
                } depth;
        } dither;
index 63b5c8c..c2bc05e 100644 (file)
@@ -54,8 +54,10 @@ nouveau_debugfs_strap_peek(struct seq_file *m, void *data)
        int ret;
 
        ret = pm_runtime_get_sync(drm->dev->dev);
-       if (ret < 0 && ret != -EACCES)
+       if (ret < 0 && ret != -EACCES) {
+               pm_runtime_put_autosuspend(drm->dev->dev);
                return ret;
+       }
 
        seq_printf(m, "0x%08x\n",
                   nvif_rd32(&drm->client.device.object, 0x101000));
@@ -258,7 +260,7 @@ nouveau_debugfs_init(struct nouveau_drm *drm)
        if (!drm->debugfs)
                return -ENOMEM;
 
-       ret = nvif_object_init(&drm->client.device.object, 0,
+       ret = nvif_object_ctor(&drm->client.device.object, "debugfsCtrl", 0,
                               NVIF_CLASS_CONTROL, NULL, 0,
                               &drm->debugfs->ctrl);
        if (ret)
@@ -271,7 +273,7 @@ void
 nouveau_debugfs_fini(struct nouveau_drm *drm)
 {
        if (drm->debugfs && drm->debugfs->ctrl.priv)
-               nvif_object_fini(&drm->debugfs->ctrl);
+               nvif_object_dtor(&drm->debugfs->ctrl);
 
        kfree(drm->debugfs);
        drm->debugfs = NULL;
index 657554c..63c58f1 100644 (file)
@@ -635,7 +635,8 @@ nouveau_display_create(struct drm_device *dev)
        drm_kms_helper_poll_disable(dev);
 
        if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
-               ret = nvif_disp_ctor(&drm->client.device, 0, &disp->disp);
+               ret = nvif_disp_ctor(&drm->client.device, "kmsDisp", 0,
+                                    &disp->disp);
                if (ret == 0) {
                        nouveau_display_create_properties(dev);
                        if (disp->disp.object.oclass < NV50_DISP)
index 3c430a5..ddb75d8 100644 (file)
 
 #include <nvif/user.h>
 
-void
-OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
-{
-       bool is_iomem;
-       u32 *mem = ttm_kmap_obj_virtual(&chan->push.buffer->kmap, &is_iomem);
-       mem = &mem[chan->dma.cur];
-       if (is_iomem)
-               memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
-       else
-               memcpy(mem, data, nr_dwords * 4);
-       chan->dma.cur += nr_dwords;
-}
-
 /* Fetch and adjust GPU GET pointer
  *
  * Returns:
index fc5e3f4..035a709 100644 (file)
@@ -45,17 +45,6 @@ void nv50_dma_push(struct nouveau_channel *, u64 addr, int length);
  */
 #define NOUVEAU_DMA_SKIPS (128 / 4)
 
-/* Hardcoded object assignments to subchannels (subchannel id). */
-enum {
-       NvSubCtxSurf2D  = 0,
-       NvSubSw         = 1,
-       NvSubImageBlit  = 2,
-       NvSubGdiRect    = 3,
-
-       NvSub2D         = 3, /* DO NOT CHANGE - hardcoded for kepler gr fifo */
-       NvSubCopy       = 4, /* DO NOT CHANGE - hardcoded for kepler gr fifo */
-};
-
 /* Object handles - for stuff that's doesn't use handle == oclass. */
 enum {
        NvDmaFB         = 0x80000002,
@@ -66,23 +55,6 @@ enum {
        NvEvoSema1      = 0x80000011,
 };
 
-#define NV_MEMORY_TO_MEMORY_FORMAT                                    0x00000039
-#define NV_MEMORY_TO_MEMORY_FORMAT_NAME                               0x00000000
-#define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF                            0x00000050
-#define NV_MEMORY_TO_MEMORY_FORMAT_NOP                                0x00000100
-#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY                             0x00000104
-#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE                 0x00000000
-#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN       0x00000001
-#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY                         0x00000180
-#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE                         0x00000184
-#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN                          0x0000030c
-
-#define NV50_MEMORY_TO_MEMORY_FORMAT                                  0x00005039
-#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200                           0x00000200
-#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C                           0x0000021c
-#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH                   0x00000238
-#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH                  0x0000023c
-
 static __must_check inline int
 RING_SPACE(struct nouveau_channel *chan, int size)
 {
@@ -102,39 +74,6 @@ OUT_RING(struct nouveau_channel *chan, int data)
        nouveau_bo_wr32(chan->push.buffer, chan->dma.cur++, data);
 }
 
-extern void
-OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
-
-static inline void
-BEGIN_NV04(struct nouveau_channel *chan, int subc, int mthd, int size)
-{
-       OUT_RING(chan, 0x00000000 | (subc << 13) | (size << 18) | mthd);
-}
-
-static inline void
-BEGIN_NI04(struct nouveau_channel *chan, int subc, int mthd, int size)
-{
-       OUT_RING(chan, 0x40000000 | (subc << 13) | (size << 18) | mthd);
-}
-
-static inline void
-BEGIN_NVC0(struct nouveau_channel *chan, int subc, int mthd, int size)
-{
-       OUT_RING(chan, 0x20000000 | (size << 16) | (subc << 13) | (mthd >> 2));
-}
-
-static inline void
-BEGIN_NIC0(struct nouveau_channel *chan, int subc, int mthd, int size)
-{
-       OUT_RING(chan, 0x60000000 | (size << 16) | (subc << 13) | (mthd >> 2));
-}
-
-static inline void
-BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
-{
-       OUT_RING(chan, 0x80000000 | (data << 16) | (subc << 13) | (mthd >> 2));
-}
-
 #define WRITE_PUT(val) do {                                                    \
        mb();                                                   \
        nouveau_bo_rd32(chan->push.buffer, 0);                                 \
@@ -164,25 +103,6 @@ WIND_RING(struct nouveau_channel *chan)
        chan->dma.cur = chan->dma.put;
 }
 
-/* FIFO methods */
-#define NV01_SUBCHAN_OBJECT                                          0x00000000
-#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH                          0x00000010
-#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_LOW                           0x00000014
-#define NV84_SUBCHAN_SEMAPHORE_SEQUENCE                              0x00000018
-#define NV84_SUBCHAN_SEMAPHORE_TRIGGER                               0x0000001c
-#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL                 0x00000001
-#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG                    0x00000002
-#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL                0x00000004
-#define NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD                         0x00001000
-#define NV84_SUBCHAN_UEVENT                                          0x00000020
-#define NV84_SUBCHAN_WRCACHE_FLUSH                                   0x00000024
-#define NV10_SUBCHAN_REF_CNT                                         0x00000050
-#define NV11_SUBCHAN_DMA_SEMAPHORE                                   0x00000060
-#define NV11_SUBCHAN_SEMAPHORE_OFFSET                                0x00000064
-#define NV11_SUBCHAN_SEMAPHORE_ACQUIRE                               0x00000068
-#define NV11_SUBCHAN_SEMAPHORE_RELEASE                               0x0000006c
-#define NV40_SUBCHAN_YIELD                                           0x00000080
-
 /* NV_SW object class */
 #define NV_SW_DMA_VBLSEM                                             0x0000018c
 #define NV_SW_VBLSEM_OFFSET                                          0x00000400
index af87067..98a1739 100644 (file)
 
 #include <nvif/class.h>
 #include <nvif/object.h>
+#include <nvif/push906f.h>
 #include <nvif/if000c.h>
 #include <nvif/if500b.h>
 #include <nvif/if900b.h>
 #include <nvif/if000c.h>
 
+#include <nvhw/class/cla0b5.h>
+
 #include <linux/sched/mm.h>
 #include <linux/hmm.h>
 
@@ -385,57 +388,72 @@ nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
                    enum nouveau_aper dst_aper, u64 dst_addr,
                    enum nouveau_aper src_aper, u64 src_addr)
 {
-       struct nouveau_channel *chan = drm->dmem->migrate.chan;
-       u32 launch_dma = (1 << 9) /* MULTI_LINE_ENABLE. */ |
-                        (1 << 8) /* DST_MEMORY_LAYOUT_PITCH. */ |
-                        (1 << 7) /* SRC_MEMORY_LAYOUT_PITCH. */ |
-                        (1 << 2) /* FLUSH_ENABLE_TRUE. */ |
-                        (2 << 0) /* DATA_TRANSFER_TYPE_NON_PIPELINED. */;
+       struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
+       u32 launch_dma = 0;
        int ret;
 
-       ret = RING_SPACE(chan, 13);
+       ret = PUSH_WAIT(push, 13);
        if (ret)
                return ret;
 
        if (src_aper != NOUVEAU_APER_VIRT) {
                switch (src_aper) {
                case NOUVEAU_APER_VRAM:
-                       BEGIN_IMC0(chan, NvSubCopy, 0x0260, 0);
+                       PUSH_IMMD(push, NVA0B5, SET_SRC_PHYS_MODE,
+                                 NVDEF(NVA0B5, SET_SRC_PHYS_MODE, TARGET, LOCAL_FB));
                        break;
                case NOUVEAU_APER_HOST:
-                       BEGIN_IMC0(chan, NvSubCopy, 0x0260, 1);
+                       PUSH_IMMD(push, NVA0B5, SET_SRC_PHYS_MODE,
+                                 NVDEF(NVA0B5, SET_SRC_PHYS_MODE, TARGET, COHERENT_SYSMEM));
                        break;
                default:
                        return -EINVAL;
                }
-               launch_dma |= 0x00001000; /* SRC_TYPE_PHYSICAL. */
+
+               launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, SRC_TYPE, PHYSICAL);
        }
 
        if (dst_aper != NOUVEAU_APER_VIRT) {
                switch (dst_aper) {
                case NOUVEAU_APER_VRAM:
-                       BEGIN_IMC0(chan, NvSubCopy, 0x0264, 0);
+                       PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
+                                 NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, LOCAL_FB));
                        break;
                case NOUVEAU_APER_HOST:
-                       BEGIN_IMC0(chan, NvSubCopy, 0x0264, 1);
+                       PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
+                                 NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, COHERENT_SYSMEM));
                        break;
                default:
                        return -EINVAL;
                }
-               launch_dma |= 0x00002000; /* DST_TYPE_PHYSICAL. */
+
+               launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, PHYSICAL);
        }
 
-       BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
-       OUT_RING  (chan, upper_32_bits(src_addr));
-       OUT_RING  (chan, lower_32_bits(src_addr));
-       OUT_RING  (chan, upper_32_bits(dst_addr));
-       OUT_RING  (chan, lower_32_bits(dst_addr));
-       OUT_RING  (chan, PAGE_SIZE);
-       OUT_RING  (chan, PAGE_SIZE);
-       OUT_RING  (chan, PAGE_SIZE);
-       OUT_RING  (chan, npages);
-       BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
-       OUT_RING  (chan, launch_dma);
+       PUSH_MTHD(push, NVA0B5, OFFSET_IN_UPPER,
+                 NVVAL(NVA0B5, OFFSET_IN_UPPER, UPPER, upper_32_bits(src_addr)),
+
+                               OFFSET_IN_LOWER, lower_32_bits(src_addr),
+
+                               OFFSET_OUT_UPPER,
+                 NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(dst_addr)),
+
+                               OFFSET_OUT_LOWER, lower_32_bits(dst_addr),
+                               PITCH_IN, PAGE_SIZE,
+                               PITCH_OUT, PAGE_SIZE,
+                               LINE_LENGTH_IN, PAGE_SIZE,
+                               LINE_COUNT, npages);
+
+       PUSH_MTHD(push, NVA0B5, LAUNCH_DMA, launch_dma |
+                 NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, TRUE) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, FALSE) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING));
        return 0;
 }
 
@@ -443,45 +461,55 @@ static int
 nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
                     enum nouveau_aper dst_aper, u64 dst_addr)
 {
-       struct nouveau_channel *chan = drm->dmem->migrate.chan;
-       u32 launch_dma = (1 << 10) /* REMAP_ENABLE_TRUE */ |
-                        (1 << 8) /* DST_MEMORY_LAYOUT_PITCH. */ |
-                        (1 << 7) /* SRC_MEMORY_LAYOUT_PITCH. */ |
-                        (1 << 2) /* FLUSH_ENABLE_TRUE. */ |
-                        (2 << 0) /* DATA_TRANSFER_TYPE_NON_PIPELINED. */;
-       u32 remap = (4 <<  0) /* DST_X_CONST_A */ |
-                   (5 <<  4) /* DST_Y_CONST_B */ |
-                   (3 << 16) /* COMPONENT_SIZE_FOUR */ |
-                   (1 << 24) /* NUM_DST_COMPONENTS_TWO */;
+       struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
+       u32 launch_dma = 0;
        int ret;
 
-       ret = RING_SPACE(chan, 12);
+       ret = PUSH_WAIT(push, 12);
        if (ret)
                return ret;
 
        switch (dst_aper) {
        case NOUVEAU_APER_VRAM:
-               BEGIN_IMC0(chan, NvSubCopy, 0x0264, 0);
-                       break;
+               PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
+                         NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, LOCAL_FB));
+               break;
        case NOUVEAU_APER_HOST:
-               BEGIN_IMC0(chan, NvSubCopy, 0x0264, 1);
+               PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
+                         NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, COHERENT_SYSMEM));
                break;
        default:
                return -EINVAL;
        }
-       launch_dma |= 0x00002000; /* DST_TYPE_PHYSICAL. */
-
-       BEGIN_NVC0(chan, NvSubCopy, 0x0700, 3);
-       OUT_RING(chan, 0);
-       OUT_RING(chan, 0);
-       OUT_RING(chan, remap);
-       BEGIN_NVC0(chan, NvSubCopy, 0x0408, 2);
-       OUT_RING(chan, upper_32_bits(dst_addr));
-       OUT_RING(chan, lower_32_bits(dst_addr));
-       BEGIN_NVC0(chan, NvSubCopy, 0x0418, 1);
-       OUT_RING(chan, length >> 3);
-       BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
-       OUT_RING(chan, launch_dma);
+
+       launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, PHYSICAL);
+
+       PUSH_MTHD(push, NVA0B5, SET_REMAP_CONST_A, 0,
+                               SET_REMAP_CONST_B, 0,
+
+                               SET_REMAP_COMPONENTS,
+                 NVDEF(NVA0B5, SET_REMAP_COMPONENTS, DST_X, CONST_A) |
+                 NVDEF(NVA0B5, SET_REMAP_COMPONENTS, DST_Y, CONST_B) |
+                 NVDEF(NVA0B5, SET_REMAP_COMPONENTS, COMPONENT_SIZE, FOUR) |
+                 NVDEF(NVA0B5, SET_REMAP_COMPONENTS, NUM_DST_COMPONENTS, TWO));
+
+       PUSH_MTHD(push, NVA0B5, OFFSET_OUT_UPPER,
+                 NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(dst_addr)),
+
+                               OFFSET_OUT_LOWER, lower_32_bits(dst_addr));
+
+       PUSH_MTHD(push, NVA0B5, LINE_LENGTH_IN, length >> 3);
+
+       PUSH_MTHD(push, NVA0B5, LAUNCH_DMA, launch_dma |
+                 NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, FALSE) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, TRUE) |
+                 NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING));
        return 0;
 }
 
@@ -550,7 +578,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
                                         DMA_BIDIRECTIONAL);
                if (dma_mapping_error(dev, *dma_addr))
                        goto out_free_page;
-               if (drm->dmem->migrate.copy_func(drm, page_size(spage),
+               if (drm->dmem->migrate.copy_func(drm, 1,
                        NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
                        goto out_dma_unmap;
        } else {
index ac93d12..22d246a 100644 (file)
@@ -41,6 +41,7 @@
 
 #include <nvif/driver.h>
 #include <nvif/fifo.h>
+#include <nvif/push006c.h>
 #include <nvif/user.h>
 
 #include <nvif/class.h>
@@ -178,10 +179,10 @@ nouveau_cli_fini(struct nouveau_cli *cli)
        usif_client_fini(cli);
        nouveau_vmm_fini(&cli->svm);
        nouveau_vmm_fini(&cli->vmm);
-       nvif_mmu_fini(&cli->mmu);
-       nvif_device_fini(&cli->device);
+       nvif_mmu_dtor(&cli->mmu);
+       nvif_device_dtor(&cli->device);
        mutex_lock(&cli->drm->master.lock);
-       nvif_client_fini(&cli->base);
+       nvif_client_dtor(&cli->base);
        mutex_unlock(&cli->drm->master.lock);
 }
 
@@ -229,7 +230,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
                                       cli->name, device, &cli->base);
        } else {
                mutex_lock(&drm->master.lock);
-               ret = nvif_client_init(&drm->master.base, cli->name, device,
+               ret = nvif_client_ctor(&drm->master.base, cli->name, device,
                                       &cli->base);
                mutex_unlock(&drm->master.lock);
        }
@@ -238,7 +239,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
                goto done;
        }
 
-       ret = nvif_device_init(&cli->base.object, 0, NV_DEVICE,
+       ret = nvif_device_ctor(&cli->base.object, "drmDevice", 0, NV_DEVICE,
                               &(struct nv_device_v0) {
                                        .device = ~0,
                               }, sizeof(struct nv_device_v0),
@@ -254,7 +255,8 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
                goto done;
        }
 
-       ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu);
+       ret = nvif_mmu_ctor(&cli->device.object, "drmMmu", mmus[ret].oclass,
+                           &cli->mmu);
        if (ret) {
                NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret);
                goto done;
@@ -290,7 +292,7 @@ static void
 nouveau_accel_ce_fini(struct nouveau_drm *drm)
 {
        nouveau_channel_idle(drm->cechan);
-       nvif_object_fini(&drm->ttm.copy);
+       nvif_object_dtor(&drm->ttm.copy);
        nouveau_channel_del(&drm->cechan);
 }
 
@@ -328,9 +330,8 @@ static void
 nouveau_accel_gr_fini(struct nouveau_drm *drm)
 {
        nouveau_channel_idle(drm->channel);
-       nvif_object_fini(&drm->ntfy);
+       nvif_object_dtor(&drm->ntfy);
        nvkm_gpuobj_del(&drm->notify);
-       nvif_object_fini(&drm->nvsw);
        nouveau_channel_del(&drm->channel);
 }
 
@@ -362,16 +363,15 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
         * synchronisation of page flips, as well as to implement fences
         * on TNT/TNT2 HW that lacks any kind of support in host.
         */
-       if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
-               ret = nvif_object_init(&drm->channel->user, NVDRM_NVSW,
-                                      nouveau_abi16_swclass(drm), NULL, 0,
-                                      &drm->nvsw);
+       if (!drm->channel->nvsw.client && device->info.family < NV_DEVICE_INFO_V0_TESLA) {
+               ret = nvif_object_ctor(&drm->channel->user, "drmNvsw",
+                                      NVDRM_NVSW, nouveau_abi16_swclass(drm),
+                                      NULL, 0, &drm->channel->nvsw);
                if (ret == 0) {
-                       ret = RING_SPACE(drm->channel, 2);
-                       if (ret == 0) {
-                               BEGIN_NV04(drm->channel, NvSubSw, 0, 1);
-                               OUT_RING  (drm->channel, drm->nvsw.handle);
-                       }
+                       struct nvif_push *push = drm->channel->chan.push;
+                       ret = PUSH_WAIT(push, 2);
+                       if (ret == 0)
+                               PUSH_NVSQ(push, NV_SW, 0x0000, drm->channel->nvsw.handle);
                }
 
                if (ret) {
@@ -394,8 +394,8 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
                        return;
                }
 
-               ret = nvif_object_init(&drm->channel->user, NvNotify0,
-                                      NV_DMA_IN_MEMORY,
+               ret = nvif_object_ctor(&drm->channel->user, "drmM2mfNtfy",
+                                      NvNotify0, NV_DMA_IN_MEMORY,
                                       &(struct nv_dma_v0) {
                                                .target = NV_DMA_V0_TARGET_VRAM,
                                                .access = NV_DMA_V0_ACCESS_RDWR,
@@ -482,7 +482,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
 
        /* Volta requires access to a doorbell register for kickoff. */
        if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_VOLTA) {
-               ret = nvif_user_init(device);
+               ret = nvif_user_ctor(device, "drmUsermode");
                if (ret)
                        return;
        }
@@ -495,6 +495,40 @@ nouveau_accel_init(struct nouveau_drm *drm)
        nouveau_bo_move_init(drm);
 }
 
+static void __printf(2, 3)
+nouveau_drm_errorf(struct nvif_object *object, const char *fmt, ...)
+{
+       struct nouveau_drm *drm = container_of(object->parent, typeof(*drm), parent);
+       struct va_format vaf;
+       va_list va;
+
+       va_start(va, fmt);
+       vaf.fmt = fmt;
+       vaf.va = &va;
+       NV_ERROR(drm, "%pV", &vaf);
+       va_end(va);
+}
+
+static void __printf(2, 3)
+nouveau_drm_debugf(struct nvif_object *object, const char *fmt, ...)
+{
+       struct nouveau_drm *drm = container_of(object->parent, typeof(*drm), parent);
+       struct va_format vaf;
+       va_list va;
+
+       va_start(va, fmt);
+       vaf.fmt = fmt;
+       vaf.va = &va;
+       NV_DEBUG(drm, "%pV", &vaf);
+       va_end(va);
+}
+
+static const struct nvif_parent_func
+nouveau_parent = {
+       .debugf = nouveau_drm_debugf,
+       .errorf = nouveau_drm_errorf,
+};
+
 static int
 nouveau_drm_device_init(struct drm_device *dev)
 {
@@ -506,6 +540,9 @@ nouveau_drm_device_init(struct drm_device *dev)
        dev->dev_private = drm;
        drm->dev = dev;
 
+       nvif_parent_ctor(&nouveau_parent, &drm->parent);
+       drm->master.base.object.parent = &drm->parent;
+
        ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
        if (ret)
                goto fail_alloc;
@@ -582,6 +619,7 @@ fail_ttm:
 fail_master:
        nouveau_cli_fini(&drm->master);
 fail_alloc:
+       nvif_parent_dtor(&drm->parent);
        kfree(drm);
        return ret;
 }
@@ -615,6 +653,7 @@ nouveau_drm_device_fini(struct drm_device *dev)
 
        nouveau_cli_fini(&drm->client);
        nouveau_cli_fini(&drm->master);
+       nvif_parent_dtor(&drm->parent);
        kfree(drm);
 }
 
@@ -1026,8 +1065,10 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
 
        /* need to bring up power immediately if opening device */
        ret = pm_runtime_get_sync(dev->dev);
-       if (ret < 0 && ret != -EACCES)
+       if (ret < 0 && ret != -EACCES) {
+               pm_runtime_put_autosuspend(dev->dev);
                return ret;
+       }
 
        get_task_comm(tmpname, current);
        snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
@@ -1109,8 +1150,10 @@ nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        long ret;
 
        ret = pm_runtime_get_sync(dev->dev);
-       if (ret < 0 && ret != -EACCES)
+       if (ret < 0 && ret != -EACCES) {
+               pm_runtime_put_autosuspend(dev->dev);
                return ret;
+       }
 
        switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) {
        case DRM_NOUVEAU_NVIF:
index 2a65197..ae76a58 100644 (file)
@@ -132,8 +132,10 @@ nouveau_cli(struct drm_file *fpriv)
 }
 
 #include <nvif/object.h>
+#include <nvif/parent.h>
 
 struct nouveau_drm {
+       struct nvif_parent parent;
        struct nouveau_cli master;
        struct nouveau_cli client;
        struct drm_device *dev;
@@ -184,7 +186,6 @@ struct nouveau_drm {
        struct nouveau_channel *channel;
        struct nvkm_gpuobj *notify;
        struct nouveau_fbdev *fbcon;
-       struct nvif_object nvsw;
        struct nvif_object ntfy;
 
        /* nv10-nv40 tiling regions */
index f9f5a13..9eb6085 100644 (file)
@@ -189,8 +189,10 @@ nouveau_fbcon_open(struct fb_info *info, int user)
        struct nouveau_fbdev *fbcon = info->par;
        struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
        int ret = pm_runtime_get_sync(drm->dev->dev);
-       if (ret < 0 && ret != -EACCES)
+       if (ret < 0 && ret != -EACCES) {
+               pm_runtime_put(drm->dev->dev);
                return ret;
+       }
        return 0;
 }
 
@@ -254,13 +256,13 @@ nouveau_fbcon_accel_fini(struct drm_device *dev)
                        fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
                console_unlock();
                nouveau_channel_idle(drm->channel);
-               nvif_object_fini(&fbcon->twod);
-               nvif_object_fini(&fbcon->blit);
-               nvif_object_fini(&fbcon->gdi);
-               nvif_object_fini(&fbcon->patt);
-               nvif_object_fini(&fbcon->rop);
-               nvif_object_fini(&fbcon->clip);
-               nvif_object_fini(&fbcon->surf2d);
+               nvif_object_dtor(&fbcon->twod);
+               nvif_object_dtor(&fbcon->blit);
+               nvif_object_dtor(&fbcon->gdi);
+               nvif_object_dtor(&fbcon->patt);
+               nvif_object_dtor(&fbcon->rop);
+               nvif_object_dtor(&fbcon->clip);
+               nvif_object_dtor(&fbcon->surf2d);
        }
 }
 
index 666f209..e5dcbf6 100644 (file)
@@ -108,7 +108,7 @@ void
 nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
 {
        nouveau_fence_context_kill(fctx, 0);
-       nvif_notify_fini(&fctx->notify);
+       nvif_notify_dtor(&fctx->notify);
        fctx->dead = 1;
 
        /*
@@ -195,7 +195,8 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
        if (!priv->uevent)
                return;
 
-       ret = nvif_notify_init(&chan->user, nouveau_fence_wait_uevent_handler,
+       ret = nvif_notify_ctor(&chan->user, "fenceNonStallIntr",
+                              nouveau_fence_wait_uevent_handler,
                               false, NV826E_V0_NTFY_NON_STALL_INTERRUPT,
                               &(struct nvif_notify_uevent_req) { },
                               sizeof(struct nvif_notify_uevent_req),
index 63b8325..81f111a 100644 (file)
@@ -35,6 +35,7 @@
 #include "nouveau_vmm.h"
 
 #include <nvif/class.h>
+#include <nvif/push206e.h>
 
 void
 nouveau_gem_object_del(struct drm_gem_object *gem)
@@ -45,8 +46,10 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
        int ret;
 
        ret = pm_runtime_get_sync(dev);
-       if (WARN_ON(ret < 0 && ret != -EACCES))
+       if (WARN_ON(ret < 0 && ret != -EACCES)) {
+               pm_runtime_put_autosuspend(dev);
                return;
+       }
 
        if (gem->import_attach)
                drm_prime_gem_destroy(gem, nvbo->bo.sg);
@@ -797,7 +800,7 @@ revalidate:
                }
        } else
        if (drm->client.device.info.chipset >= 0x25) {
-               ret = RING_SPACE(chan, req->nr_push * 2);
+               ret = PUSH_WAIT(chan->chan.push, req->nr_push * 2);
                if (ret) {
                        NV_PRINTK(err, cli, "cal_space: %d\n", ret);
                        goto out;
@@ -807,11 +810,11 @@ revalidate:
                        struct nouveau_bo *nvbo = (void *)(unsigned long)
                                bo[push[i].bo_index].user_priv;
 
-                       OUT_RING(chan, (nvbo->offset + push[i].offset) | 2);
-                       OUT_RING(chan, 0);
+                       PUSH_CALL(chan->chan.push, nvbo->offset + push[i].offset);
+                       PUSH_DATA(chan->chan.push, 0);
                }
        } else {
-               ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
+               ret = PUSH_WAIT(chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
                if (ret) {
                        NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
                        goto out;
@@ -841,11 +844,10 @@ revalidate:
                                                push[i].length - 8) / 4, cmd);
                        }
 
-                       OUT_RING(chan, 0x20000000 |
-                                     (nvbo->offset + push[i].offset));
-                       OUT_RING(chan, 0);
+                       PUSH_JUMP(chan->chan.push, nvbo->offset + push[i].offset);
+                       PUSH_DATA(chan->chan.push, 0);
                        for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
-                               OUT_RING(chan, 0);
+                               PUSH_DATA(chan->chan.push, 0);
                }
        }
 
index c002f89..b1bb542 100644 (file)
@@ -87,7 +87,7 @@ nouveau_mem_fini(struct nouveau_mem *mem)
        nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[1]);
        nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[0]);
        mutex_lock(&mem->cli->drm->master.lock);
-       nvif_mem_fini(&mem->mem);
+       nvif_mem_dtor(&mem->mem);
        mutex_unlock(&mem->cli->drm->master.lock);
 }
 
@@ -121,7 +121,7 @@ nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt)
 
        mutex_lock(&drm->master.lock);
        cli->base.super = true;
-       ret = nvif_mem_init_type(mmu, cli->mem->oclass, type, PAGE_SHIFT,
+       ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT,
                                 reg->num_pages << PAGE_SHIFT,
                                 &args, sizeof(args), &mem->mem);
        cli->base.super = super;
@@ -144,7 +144,7 @@ nouveau_mem_vram(struct ttm_mem_reg *reg, bool contig, u8 page)
        cli->base.super = true;
        switch (cli->mem->oclass) {
        case NVIF_CLASS_MEM_GF100:
-               ret = nvif_mem_init_type(mmu, cli->mem->oclass,
+               ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass,
                                         drm->ttm.type_vram, page, size,
                                         &(struct gf100_mem_v0) {
                                                .contig = contig,
@@ -152,7 +152,7 @@ nouveau_mem_vram(struct ttm_mem_reg *reg, bool contig, u8 page)
                                         &mem->mem);
                break;
        case NVIF_CLASS_MEM_NV50:
-               ret = nvif_mem_init_type(mmu, cli->mem->oclass,
+               ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass,
                                         drm->ttm.type_vram, page, size,
                                         &(struct nv50_mem_v0) {
                                                .bankswz = mmu->kind[mem->kind] == 2,
index feaac90..c3ccf66 100644 (file)
@@ -46,12 +46,11 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
        return 0;
 }
 
-static int
+static void
 nv04_sgdma_unbind(struct ttm_tt *ttm)
 {
        struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
        nouveau_mem_fini(nvbe->mem);
-       return 0;
 }
 
 static struct ttm_backend_func nv04_sgdma_backend = {
@@ -96,12 +95,9 @@ nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
        else
                nvbe->ttm.ttm.func = &nv50_sgdma_backend;
 
-       if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags))
-               /*
-                * A failing ttm_dma_tt_init() will call ttm_tt_destroy()
-                * and thus our nouveau_sgdma_destroy() hook, so we don't need
-                * to free nvbe here.
-                */
+       if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) {
+               kfree(nvbe);
                return NULL;
+       }
        return &nvbe->ttm.ttm;
 }
index ba9f935..d4b4f86 100644 (file)
@@ -347,7 +347,8 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
         * All future channel/memory allocations will make use of this
         * VMM instead of the standard one.
         */
-       ret = nvif_vmm_init(&cli->mmu, cli->vmm.vmm.object.oclass, true,
+       ret = nvif_vmm_ctor(&cli->mmu, "svmVmm",
+                           cli->vmm.vmm.object.oclass, true,
                            args->unmanaged_addr, args->unmanaged_size,
                            &(struct gp100_vmm_v0) {
                                .fault_replay = true,
@@ -562,6 +563,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
                .end = notifier->notifier.interval_tree.last + 1,
                .pfn_flags_mask = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE,
                .hmm_pfns = hmm_pfns,
+               .dev_private_owner = drm->dev,
        };
        struct mm_struct *mm = notifier->notifier.mm;
        int ret;
@@ -903,8 +905,8 @@ nouveau_svm_fault_buffer_dtor(struct nouveau_svm *svm, int id)
 
        nouveau_svm_fault_buffer_fini(svm, id);
 
-       nvif_notify_fini(&buffer->notify);
-       nvif_object_fini(&buffer->object);
+       nvif_notify_dtor(&buffer->notify);
+       nvif_object_dtor(&buffer->object);
 }
 
 static int
@@ -918,8 +920,8 @@ nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
 
        buffer->id = id;
 
-       ret = nvif_object_init(device, 0, oclass, &args, sizeof(args),
-                              &buffer->object);
+       ret = nvif_object_ctor(device, "svmFaultBuffer", 0, oclass, &args,
+                              sizeof(args), &buffer->object);
        if (ret < 0) {
                SVM_ERR(svm, "Fault buffer allocation failed: %d", ret);
                return ret;
@@ -930,8 +932,8 @@ nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
        buffer->getaddr = args.get;
        buffer->putaddr = args.put;
 
-       ret = nvif_notify_init(&buffer->object, nouveau_svm_fault, true,
-                              NVB069_V0_NTFY_FAULT, NULL, 0, 0,
+       ret = nvif_notify_ctor(&buffer->object, "svmFault", nouveau_svm_fault,
+                              true, NVB069_V0_NTFY_FAULT, NULL, 0, 0,
                               &buffer->notify);
        if (ret)
                return ret;
index b28c7dc..a49e881 100644 (file)
@@ -121,15 +121,15 @@ void
 nouveau_vmm_fini(struct nouveau_vmm *vmm)
 {
        nouveau_svmm_fini(&vmm->svmm);
-       nvif_vmm_fini(&vmm->vmm);
+       nvif_vmm_dtor(&vmm->vmm);
        vmm->cli = NULL;
 }
 
 int
 nouveau_vmm_init(struct nouveau_cli *cli, s32 oclass, struct nouveau_vmm *vmm)
 {
-       int ret = nvif_vmm_init(&cli->mmu, oclass, false, PAGE_SIZE, 0, NULL, 0,
-                               &vmm->vmm);
+       int ret = nvif_vmm_ctor(&cli->mmu, "drmVmm", oclass, false, PAGE_SIZE,
+                               0, NULL, 0, &vmm->vmm);
        if (ret)
                return ret;
 
index 01731db..92f3fb6 100644 (file)
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-
+#define NVIF_DEBUG_PRINT_DISABLE
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_fbcon.h"
 
+#include <nvif/push006c.h>
+
 int
 nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
 {
        struct nouveau_fbdev *nfbdev = info->par;
        struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
        struct nouveau_channel *chan = drm->channel;
+       struct nvif_push *push = chan->chan.push;
        int ret;
 
-       ret = RING_SPACE(chan, 4);
+       ret = PUSH_WAIT(push, 4);
        if (ret)
                return ret;
 
-       BEGIN_NV04(chan, NvSubImageBlit, 0x0300, 3);
-       OUT_RING(chan, (region->sy << 16) | region->sx);
-       OUT_RING(chan, (region->dy << 16) | region->dx);
-       OUT_RING(chan, (region->height << 16) | region->width);
-       FIRE_RING(chan);
+       PUSH_NVSQ(push, NV05F, 0x0300, (region->sy << 16) | region->sx,
+                              0x0304, (region->dy << 16) | region->dx,
+                              0x0308, (region->height << 16) | region->width);
+       PUSH_KICK(push);
        return 0;
 }
 
@@ -52,24 +54,22 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
        struct nouveau_fbdev *nfbdev = info->par;
        struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
        struct nouveau_channel *chan = drm->channel;
+       struct nvif_push *push = chan->chan.push;
        int ret;
 
-       ret = RING_SPACE(chan, 7);
+       ret = PUSH_WAIT(push, 7);
        if (ret)
                return ret;
 
-       BEGIN_NV04(chan, NvSubGdiRect, 0x02fc, 1);
-       OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
-       BEGIN_NV04(chan, NvSubGdiRect, 0x03fc, 1);
+       PUSH_NVSQ(push, NV04A, 0x02fc, (rect->rop != ROP_COPY) ? 1 : 3);
        if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
            info->fix.visual == FB_VISUAL_DIRECTCOLOR)
-               OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
+               PUSH_NVSQ(push, NV04A, 0x03fc, ((uint32_t *)info->pseudo_palette)[rect->color]);
        else
-               OUT_RING(chan, rect->color);
-       BEGIN_NV04(chan, NvSubGdiRect, 0x0400, 2);
-       OUT_RING(chan, (rect->dx << 16) | rect->dy);
-       OUT_RING(chan, (rect->width << 16) | rect->height);
-       FIRE_RING(chan);
+               PUSH_NVSQ(push, NV04A, 0x03fc, rect->color);
+       PUSH_NVSQ(push, NV04A, 0x0400, (rect->dx << 16) | rect->dy,
+                              0x0404, (rect->width << 16) | rect->height);
+       PUSH_KICK(push);
        return 0;
 }
 
@@ -79,6 +79,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        struct nouveau_fbdev *nfbdev = info->par;
        struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
        struct nouveau_channel *chan = drm->channel;
+       struct nvif_push *push = chan->chan.push;
        uint32_t fg;
        uint32_t bg;
        uint32_t dsize;
@@ -88,7 +89,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        if (image->depth != 1)
                return -ENODEV;
 
-       ret = RING_SPACE(chan, 8);
+       ret = PUSH_WAIT(push, 8);
        if (ret)
                return ret;
 
@@ -101,31 +102,29 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
                bg = image->bg_color;
        }
 
-       BEGIN_NV04(chan, NvSubGdiRect, 0x0be4, 7);
-       OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
-       OUT_RING(chan, ((image->dy + image->height) << 16) |
-                        ((image->dx + image->width) & 0xffff));
-       OUT_RING(chan, bg);
-       OUT_RING(chan, fg);
-       OUT_RING(chan, (image->height << 16) | ALIGN(image->width, 8));
-       OUT_RING(chan, (image->height << 16) | image->width);
-       OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
+       PUSH_NVSQ(push, NV04A, 0x0be4, (image->dy << 16) | (image->dx & 0xffff),
+                              0x0be8, ((image->dy + image->height) << 16) |
+                                      ((image->dx + image->width) & 0xffff),
+                              0x0bec, bg,
+                              0x0bf0, fg,
+                              0x0bf4, (image->height << 16) | ALIGN(image->width, 8),
+                              0x0bf8, (image->height << 16) | image->width,
+                              0x0bfc, (image->dy << 16) | (image->dx & 0xffff));
 
        dsize = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
        while (dsize) {
                int iter_len = dsize > 128 ? 128 : dsize;
 
-               ret = RING_SPACE(chan, iter_len + 1);
+               ret = PUSH_WAIT(push, iter_len + 1);
                if (ret)
                        return ret;
 
-               BEGIN_NV04(chan, NvSubGdiRect, 0x0c00, iter_len);
-               OUT_RINGp(chan, data, iter_len);
+               PUSH_NVSQ(push, NV04A, 0x0c00, data, iter_len);
                data += iter_len;
                dsize -= iter_len;
        }
 
-       FIRE_RING(chan);
+       PUSH_KICK(push);
        return 0;
 }
 
@@ -137,6 +136,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nouveau_channel *chan = drm->channel;
        struct nvif_device *device = &drm->client.device;
+       struct nvif_push *push = chan->chan.push;
        int surface_fmt, pattern_fmt, rect_fmt;
        int ret;
 
@@ -168,110 +168,90 @@ nv04_fbcon_accel_init(struct fb_info *info)
                return -EINVAL;
        }
 
-       ret = nvif_object_init(&chan->user, 0x0062,
+       ret = nvif_object_ctor(&chan->user, "fbconCtxSurf2d", 0x0062,
                               device->info.family >= NV_DEVICE_INFO_V0_CELSIUS ?
                               0x0062 : 0x0042, NULL, 0, &nfbdev->surf2d);
        if (ret)
                return ret;
 
-       ret = nvif_object_init(&chan->user, 0x0019, 0x0019, NULL, 0,
-                              &nfbdev->clip);
+       ret = nvif_object_ctor(&chan->user, "fbconCtxClip", 0x0019, 0x0019,
+                              NULL, 0, &nfbdev->clip);
        if (ret)
                return ret;
 
-       ret = nvif_object_init(&chan->user, 0x0043, 0x0043, NULL, 0,
-                              &nfbdev->rop);
+       ret = nvif_object_ctor(&chan->user, "fbconCtxRop", 0x0043, 0x0043,
+                              NULL, 0, &nfbdev->rop);
        if (ret)
                return ret;
 
-       ret = nvif_object_init(&chan->user, 0x0044, 0x0044, NULL, 0,
-                              &nfbdev->patt);
+       ret = nvif_object_ctor(&chan->user, "fbconCtxPatt", 0x0044, 0x0044,
+                              NULL, 0, &nfbdev->patt);
        if (ret)
                return ret;
 
-       ret = nvif_object_init(&chan->user, 0x004a, 0x004a, NULL, 0,
-                              &nfbdev->gdi);
+       ret = nvif_object_ctor(&chan->user, "fbconGdiRectText", 0x004a, 0x004a,
+                              NULL, 0, &nfbdev->gdi);
        if (ret)
                return ret;
 
-       ret = nvif_object_init(&chan->user, 0x005f,
+       ret = nvif_object_ctor(&chan->user, "fbconImageBlit", 0x005f,
                               device->info.chipset >= 0x11 ? 0x009f : 0x005f,
                               NULL, 0, &nfbdev->blit);
        if (ret)
                return ret;
 
-       if (RING_SPACE(chan, 49 + (device->info.chipset >= 0x11 ? 4 : 0))) {
+       if (PUSH_WAIT(push, 49 + (device->info.chipset >= 0x11 ? 4 : 0))) {
                nouveau_fbcon_gpu_lockup(info);
                return 0;
        }
 
-       BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
-       OUT_RING(chan, nfbdev->surf2d.handle);
-       BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0184, 2);
-       OUT_RING(chan, chan->vram.handle);
-       OUT_RING(chan, chan->vram.handle);
-       BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 4);
-       OUT_RING(chan, surface_fmt);
-       OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
-       OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
-       OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
-
-       BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
-       OUT_RING(chan, nfbdev->rop.handle);
-       BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 1);
-       OUT_RING(chan, 0x55);
-
-       BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
-       OUT_RING(chan, nfbdev->patt.handle);
-       BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 8);
-       OUT_RING(chan, pattern_fmt);
+       PUSH_NVSQ(push, NV042, 0x0000, nfbdev->surf2d.handle);
+       PUSH_NVSQ(push, NV042, 0x0184, chan->vram.handle,
+                              0x0188, chan->vram.handle);
+       PUSH_NVSQ(push, NV042, 0x0300, surface_fmt,
+                              0x0304, info->fix.line_length | (info->fix.line_length << 16),
+                              0x0308, info->fix.smem_start - dev->mode_config.fb_base,
+                              0x030c, info->fix.smem_start - dev->mode_config.fb_base);
+
+       PUSH_NVSQ(push, NV043, 0x0000, nfbdev->rop.handle);
+       PUSH_NVSQ(push, NV043, 0x0300, 0x55);
+
+       PUSH_NVSQ(push, NV044, 0x0000, nfbdev->patt.handle);
+       PUSH_NVSQ(push, NV044, 0x0300, pattern_fmt,
 #ifdef __BIG_ENDIAN
-       OUT_RING(chan, 2);
+                              0x0304, 2,
 #else
-       OUT_RING(chan, 1);
+                              0x0304, 1,
 #endif
-       OUT_RING(chan, 0);
-       OUT_RING(chan, 1);
-       OUT_RING(chan, ~0);
-       OUT_RING(chan, ~0);
-       OUT_RING(chan, ~0);
-       OUT_RING(chan, ~0);
-
-       BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
-       OUT_RING(chan, nfbdev->clip.handle);
-       BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 2);
-       OUT_RING(chan, 0);
-       OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
-
-       BEGIN_NV04(chan, NvSubImageBlit, 0x0000, 1);
-       OUT_RING(chan, nfbdev->blit.handle);
-       BEGIN_NV04(chan, NvSubImageBlit, 0x019c, 1);
-       OUT_RING(chan, nfbdev->surf2d.handle);
-       BEGIN_NV04(chan, NvSubImageBlit, 0x02fc, 1);
-       OUT_RING(chan, 3);
-       if (device->info.chipset >= 0x11 /*XXX: oclass == 0x009f*/) {
-               BEGIN_NV04(chan, NvSubImageBlit, 0x0120, 3);
-               OUT_RING(chan, 0);
-               OUT_RING(chan, 1);
-               OUT_RING(chan, 2);
+                              0x0308, 0,
+                              0x030c, 1,
+                              0x0310, ~0,
+                              0x0314, ~0,
+                              0x0318, ~0,
+                              0x031c, ~0);
+
+       PUSH_NVSQ(push, NV019, 0x0000, nfbdev->clip.handle);
+       PUSH_NVSQ(push, NV019, 0x0300, 0,
+                              0x0304, (info->var.yres_virtual << 16) | info->var.xres_virtual);
+
+       PUSH_NVSQ(push, NV05F, 0x0000, nfbdev->blit.handle);
+       PUSH_NVSQ(push, NV05F, 0x019c, nfbdev->surf2d.handle);
+       PUSH_NVSQ(push, NV05F, 0x02fc, 3);
+       if (nfbdev->blit.oclass == 0x009f) {
+               PUSH_NVSQ(push, NV09F, 0x0120, 0,
+                                      0x0124, 1,
+                                      0x0128, 2);
        }
 
-       BEGIN_NV04(chan, NvSubGdiRect, 0x0000, 1);
-       OUT_RING(chan, nfbdev->gdi.handle);
-       BEGIN_NV04(chan, NvSubGdiRect, 0x0198, 1);
-       OUT_RING(chan, nfbdev->surf2d.handle);
-       BEGIN_NV04(chan, NvSubGdiRect, 0x0188, 2);
-       OUT_RING(chan, nfbdev->patt.handle);
-       OUT_RING(chan, nfbdev->rop.handle);
-       BEGIN_NV04(chan, NvSubGdiRect, 0x0304, 1);
-       OUT_RING(chan, 1);
-       BEGIN_NV04(chan, NvSubGdiRect, 0x0300, 1);
-       OUT_RING(chan, rect_fmt);
-       BEGIN_NV04(chan, NvSubGdiRect, 0x02fc, 1);
-       OUT_RING(chan, 3);
-
-       FIRE_RING(chan);
+       PUSH_NVSQ(push, NV04A, 0x0000, nfbdev->gdi.handle);
+       PUSH_NVSQ(push, NV04A, 0x0198, nfbdev->surf2d.handle);
+       PUSH_NVSQ(push, NV04A, 0x0188, nfbdev->patt.handle,
+                              0x018c, nfbdev->rop.handle);
+       PUSH_NVSQ(push, NV04A, 0x0304, 1);
+       PUSH_NVSQ(push, NV04A, 0x0300, rect_fmt);
+       PUSH_NVSQ(push, NV04A, 0x02fc, 3);
 
+       PUSH_KICK(push);
        return 0;
 }
 
index c41e82b..5b71a5a 100644 (file)
  *
  * Authors: Ben Skeggs
  */
-
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_fence.h"
 
 #include <nvif/if0004.h>
+#include <nvif/push006c.h>
 
 struct nv04_fence_chan {
        struct nouveau_fence_chan base;
@@ -39,12 +39,11 @@ struct nv04_fence_priv {
 static int
 nv04_fence_emit(struct nouveau_fence *fence)
 {
-       struct nouveau_channel *chan = fence->channel;
-       int ret = RING_SPACE(chan, 2);
+       struct nvif_push *push = fence->channel->chan.push;
+       int ret = PUSH_WAIT(push, 2);
        if (ret == 0) {
-               BEGIN_NV04(chan, NvSubSw, 0x0150, 1);
-               OUT_RING  (chan, fence->base.seqno);
-               FIRE_RING (chan);
+               PUSH_NVSQ(push, NV_SW, 0x0150, fence->base.seqno);
+               PUSH_KICK(push);
        }
        return ret;
 }
index 4476b71..c6a0db5 100644 (file)
  *
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
-
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nv10_fence.h"
 
+#include <nvif/push006c.h>
+
+#include <nvhw/class/cl006e.h>
+
 int
 nv10_fence_emit(struct nouveau_fence *fence)
 {
-       struct nouveau_channel *chan = fence->channel;
-       int ret = RING_SPACE(chan, 2);
+       struct nvif_push *push = fence->channel->chan.push;
+       int ret = PUSH_WAIT(push, 2);
        if (ret == 0) {
-               BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
-               OUT_RING  (chan, fence->base.seqno);
-               FIRE_RING (chan);
+               PUSH_MTHD(push, NV06E, SET_REFERENCE, fence->base.seqno);
+               PUSH_KICK(push);
        }
        return ret;
 }
@@ -50,7 +52,7 @@ nv10_fence_sync(struct nouveau_fence *fence,
 u32
 nv10_fence_read(struct nouveau_channel *chan)
 {
-       return nvif_rd32(&chan->user, 0x0048);
+       return NVIF_RD32(&chan->user, NV06E, REFERENCE);
 }
 
 void
@@ -58,7 +60,7 @@ nv10_fence_context_del(struct nouveau_channel *chan)
 {
        struct nv10_fence_chan *fctx = chan->fence;
        nouveau_fence_context_del(&fctx->base);
-       nvif_object_fini(&fctx->sema);
+       nvif_object_dtor(&fctx->sema);
        chan->fence = NULL;
        nouveau_fence_context_free(&fctx->base);
 }
index 5d613d4..cd1e87a 100644 (file)
  *
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nv10_fence.h"
+
+#include <nvif/push006c.h>
 
-#include <nvif/os.h>
 #include <nvif/class.h>
 #include <nvif/cl0002.h>
 
-#include "nouveau_drv.h"
-#include "nouveau_dma.h"
-#include "nv10_fence.h"
+#include <nvhw/class/cl176e.h>
 
 int
 nv17_fence_sync(struct nouveau_fence *fence,
@@ -37,6 +39,8 @@ nv17_fence_sync(struct nouveau_fence *fence,
        struct nouveau_cli *cli = (void *)prev->user.client;
        struct nv10_fence_priv *priv = chan->drm->fence;
        struct nv10_fence_chan *fctx = chan->fence;
+       struct nvif_push *ppush = prev->chan.push;
+       struct nvif_push *npush = chan->chan.push;
        u32 value;
        int ret;
 
@@ -48,23 +52,21 @@ nv17_fence_sync(struct nouveau_fence *fence,
        priv->sequence += 2;
        spin_unlock(&priv->lock);
 
-       ret = RING_SPACE(prev, 5);
+       ret = PUSH_WAIT(ppush, 5);
        if (!ret) {
-               BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
-               OUT_RING  (prev, fctx->sema.handle);
-               OUT_RING  (prev, 0);
-               OUT_RING  (prev, value + 0);
-               OUT_RING  (prev, value + 1);
-               FIRE_RING (prev);
+               PUSH_MTHD(ppush, NV176E, SET_CONTEXT_DMA_SEMAPHORE, fctx->sema.handle,
+                                        SEMAPHORE_OFFSET, 0,
+                                        SEMAPHORE_ACQUIRE, value + 0,
+                                        SEMAPHORE_RELEASE, value + 1);
+               PUSH_KICK(ppush);
        }
 
-       if (!ret && !(ret = RING_SPACE(chan, 5))) {
-               BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
-               OUT_RING  (chan, fctx->sema.handle);
-               OUT_RING  (chan, 0);
-               OUT_RING  (chan, value + 1);
-               OUT_RING  (chan, value + 2);
-               FIRE_RING (chan);
+       if (!ret && !(ret = PUSH_WAIT(npush, 5))) {
+               PUSH_MTHD(npush, NV176E, SET_CONTEXT_DMA_SEMAPHORE, fctx->sema.handle,
+                                        SEMAPHORE_OFFSET, 0,
+                                        SEMAPHORE_ACQUIRE, value + 1,
+                                        SEMAPHORE_RELEASE, value + 2);
+               PUSH_KICK(npush);
        }
 
        mutex_unlock(&cli->mutex);
@@ -90,7 +92,8 @@ nv17_fence_context_new(struct nouveau_channel *chan)
        fctx->base.read = nv10_fence_read;
        fctx->base.sync = nv17_fence_sync;
 
-       ret = nvif_object_init(&chan->user, NvSema, NV_DMA_FROM_MEMORY,
+       ret = nvif_object_ctor(&chan->user, "fenceCtxDma", NvSema,
+                              NV_DMA_FROM_MEMORY,
                               &(struct nv_dma_v0) {
                                        .target = NV_DMA_V0_TARGET_VRAM,
                                        .access = NV_DMA_V0_ACCESS_RDWR,
index 47428f7..71f92e4 100644 (file)
  *
  * Authors: Ben Skeggs
  */
-
+#define NVIF_DEBUG_PRINT_DISABLE
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_fbcon.h"
 #include "nouveau_vmm.h"
 
+#include <nvif/push206e.h>
+
+#include <nvhw/class/cl502d.h>
+
 int
 nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 {
        struct nouveau_fbdev *nfbdev = info->par;
        struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
        struct nouveau_channel *chan = drm->channel;
+       struct nvif_push *push = chan->chan.push;
+       u32 colour;
        int ret;
 
-       ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
+       if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+           info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+               colour = ((uint32_t *)info->pseudo_palette)[rect->color];
+       else
+               colour = rect->color;
+
+       ret = PUSH_WAIT(push, rect->rop == ROP_COPY ? 7 : 11);
        if (ret)
                return ret;
 
        if (rect->rop != ROP_COPY) {
-               BEGIN_NV04(chan, NvSub2D, 0x02ac, 1);
-               OUT_RING(chan, 1);
+               PUSH_MTHD(push, NV502D, SET_OPERATION,
+                         NVDEF(NV502D, SET_OPERATION, V, ROP_AND));
        }
-       BEGIN_NV04(chan, NvSub2D, 0x0588, 1);
-       if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
-           info->fix.visual == FB_VISUAL_DIRECTCOLOR)
-               OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
-       else
-               OUT_RING(chan, rect->color);
-       BEGIN_NV04(chan, NvSub2D, 0x0600, 4);
-       OUT_RING(chan, rect->dx);
-       OUT_RING(chan, rect->dy);
-       OUT_RING(chan, rect->dx + rect->width);
-       OUT_RING(chan, rect->dy + rect->height);
+
+       PUSH_MTHD(push, NV502D, SET_RENDER_SOLID_PRIM_COLOR, colour);
+
+       PUSH_MTHD(push, NV502D, RENDER_SOLID_PRIM_POINT_SET_X(0), rect->dx,
+                               RENDER_SOLID_PRIM_POINT_Y(0), rect->dy,
+                               RENDER_SOLID_PRIM_POINT_SET_X(1), rect->dx + rect->width,
+                               RENDER_SOLID_PRIM_POINT_Y(1), rect->dy + rect->height);
+
        if (rect->rop != ROP_COPY) {
-               BEGIN_NV04(chan, NvSub2D, 0x02ac, 1);
-               OUT_RING(chan, 3);
+               PUSH_MTHD(push, NV502D, SET_OPERATION,
+                         NVDEF(NV502D, SET_OPERATION, V, SRCCOPY));
        }
-       FIRE_RING(chan);
+
+       PUSH_KICK(push);
        return 0;
 }
 
@@ -68,25 +78,25 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
        struct nouveau_fbdev *nfbdev = info->par;
        struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
        struct nouveau_channel *chan = drm->channel;
+       struct nvif_push *push = chan->chan.push;
        int ret;
 
-       ret = RING_SPACE(chan, 12);
+       ret = PUSH_WAIT(push, 12);
        if (ret)
                return ret;
 
-       BEGIN_NV04(chan, NvSub2D, 0x0110, 1);
-       OUT_RING(chan, 0);
-       BEGIN_NV04(chan, NvSub2D, 0x08b0, 4);
-       OUT_RING(chan, region->dx);
-       OUT_RING(chan, region->dy);
-       OUT_RING(chan, region->width);
-       OUT_RING(chan, region->height);
-       BEGIN_NV04(chan, NvSub2D, 0x08d0, 4);
-       OUT_RING(chan, 0);
-       OUT_RING(chan, region->sx);
-       OUT_RING(chan, 0);
-       OUT_RING(chan, region->sy);
-       FIRE_RING(chan);
+       PUSH_MTHD(push, NV502D, WAIT_FOR_IDLE, 0);
+
+       PUSH_MTHD(push, NV502D, SET_PIXELS_FROM_MEMORY_DST_X0, region->dx,
+                               SET_PIXELS_FROM_MEMORY_DST_Y0, region->dy,
+                               SET_PIXELS_FROM_MEMORY_DST_WIDTH, region->width,
+                               SET_PIXELS_FROM_MEMORY_DST_HEIGHT, region->height);
+
+       PUSH_MTHD(push, NV502D, SET_PIXELS_FROM_MEMORY_SRC_X0_FRAC, 0,
+                               SET_PIXELS_FROM_MEMORY_SRC_X0_INT, region->sx,
+                               SET_PIXELS_FROM_MEMORY_SRC_Y0_FRAC, 0,
+                               PIXELS_FROM_MEMORY_SRC_Y0_INT, region->sy);
+       PUSH_KICK(push);
        return 0;
 }
 
@@ -96,52 +106,54 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        struct nouveau_fbdev *nfbdev = info->par;
        struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
        struct nouveau_channel *chan = drm->channel;
+       struct nvif_push *push = chan->chan.push;
        uint32_t dwords, *data = (uint32_t *)image->data;
        uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
-       uint32_t *palette = info->pseudo_palette;
+       uint32_t *palette = info->pseudo_palette, bg, fg;
        int ret;
 
        if (image->depth != 1)
                return -ENODEV;
 
-       ret = RING_SPACE(chan, 11);
-       if (ret)
-               return ret;
-
-       BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
        if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
            info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
-               OUT_RING(chan, palette[image->bg_color] | mask);
-               OUT_RING(chan, palette[image->fg_color] | mask);
+               bg = palette[image->bg_color] | mask;
+               fg = palette[image->fg_color] | mask;
        } else {
-               OUT_RING(chan, image->bg_color);
-               OUT_RING(chan, image->fg_color);
+               bg = image->bg_color;
+               fg = image->fg_color;
        }
-       BEGIN_NV04(chan, NvSub2D, 0x0838, 2);
-       OUT_RING(chan, image->width);
-       OUT_RING(chan, image->height);
-       BEGIN_NV04(chan, NvSub2D, 0x0850, 4);
-       OUT_RING(chan, 0);
-       OUT_RING(chan, image->dx);
-       OUT_RING(chan, 0);
-       OUT_RING(chan, image->dy);
+
+       ret = PUSH_WAIT(push, 11);
+       if (ret)
+               return ret;
+
+       PUSH_MTHD(push, NV502D, SET_PIXELS_FROM_CPU_COLOR0, bg,
+                               SET_PIXELS_FROM_CPU_COLOR1, fg);
+
+       PUSH_MTHD(push, NV502D, SET_PIXELS_FROM_CPU_SRC_WIDTH, image->width,
+                               SET_PIXELS_FROM_CPU_SRC_HEIGHT, image->height);
+
+       PUSH_MTHD(push, NV502D, SET_PIXELS_FROM_CPU_DST_X0_FRAC, 0,
+                               SET_PIXELS_FROM_CPU_DST_X0_INT, image->dx,
+                               SET_PIXELS_FROM_CPU_DST_Y0_FRAC, 0,
+                               SET_PIXELS_FROM_CPU_DST_Y0_INT, image->dy);
 
        dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
        while (dwords) {
-               int push = dwords > 2047 ? 2047 : dwords;
+               int count = dwords > 2047 ? 2047 : dwords;
 
-               ret = RING_SPACE(chan, push + 1);
+               ret = PUSH_WAIT(push, count + 1);
                if (ret)
                        return ret;
 
-               dwords -= push;
+               dwords -= count;
 
-               BEGIN_NI04(chan, NvSub2D, 0x0860, push);
-               OUT_RINGp(chan, data, push);
-               data += push;
+               PUSH_NINC(push, NV502D, PIXELS_FROM_CPU_DATA, data, count);
+               data += count;
        }
 
-       FIRE_RING(chan);
+       PUSH_KICK(push);
        return 0;
 }
 
@@ -152,26 +164,27 @@ nv50_fbcon_accel_init(struct fb_info *info)
        struct drm_device *dev = nfbdev->helper.dev;
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nouveau_channel *chan = drm->channel;
+       struct nvif_push *push = chan->chan.push;
        int ret, format;
 
        switch (info->var.bits_per_pixel) {
        case 8:
-               format = 0xf3;
+               format = NV502D_SET_DST_FORMAT_V_Y8;
                break;
        case 15:
-               format = 0xf8;
+               format = NV502D_SET_DST_FORMAT_V_X1R5G5B5;
                break;
        case 16:
-               format = 0xe8;
+               format = NV502D_SET_DST_FORMAT_V_R5G6B5;
                break;
        case 32:
                switch (info->var.transp.length) {
                case 0: /* depth 24 */
                case 8: /* depth 32, just use 24.. */
-                       format = 0xe6;
+                       format = NV502D_SET_DST_FORMAT_V_X8R8G8B8;
                        break;
                case 2: /* depth 30 */
-                       format = 0xd1;
+                       format = NV502D_SET_DST_FORMAT_V_A2B10G10R10;
                        break;
                default:
                        return -EINVAL;
@@ -181,77 +194,106 @@ nv50_fbcon_accel_init(struct fb_info *info)
                return -EINVAL;
        }
 
-       ret = nvif_object_init(&chan->user, 0x502d, 0x502d, NULL, 0,
-                              &nfbdev->twod);
+       ret = nvif_object_ctor(&chan->user, "fbconTwoD", 0x502d, 0x502d,
+                              NULL, 0, &nfbdev->twod);
        if (ret)
                return ret;
 
-       ret = RING_SPACE(chan, 58);
+       ret = PUSH_WAIT(push, 56);
        if (ret) {
                nouveau_fbcon_gpu_lockup(info);
                return ret;
        }
 
-       BEGIN_NV04(chan, NvSub2D, 0x0000, 1);
-       OUT_RING(chan, nfbdev->twod.handle);
-       BEGIN_NV04(chan, NvSub2D, 0x0184, 3);
-       OUT_RING(chan, chan->vram.handle);
-       OUT_RING(chan, chan->vram.handle);
-       OUT_RING(chan, chan->vram.handle);
-       BEGIN_NV04(chan, NvSub2D, 0x0290, 1);
-       OUT_RING(chan, 0);
-       BEGIN_NV04(chan, NvSub2D, 0x0888, 1);
-       OUT_RING(chan, 1);
-       BEGIN_NV04(chan, NvSub2D, 0x02ac, 1);
-       OUT_RING(chan, 3);
-       BEGIN_NV04(chan, NvSub2D, 0x02a0, 1);
-       OUT_RING(chan, 0x55);
-       BEGIN_NV04(chan, NvSub2D, 0x08c0, 4);
-       OUT_RING(chan, 0);
-       OUT_RING(chan, 1);
-       OUT_RING(chan, 0);
-       OUT_RING(chan, 1);
-       BEGIN_NV04(chan, NvSub2D, 0x0580, 2);
-       OUT_RING(chan, 4);
-       OUT_RING(chan, format);
-       BEGIN_NV04(chan, NvSub2D, 0x02e8, 2);
-       OUT_RING(chan, 2);
-       OUT_RING(chan, 1);
-       BEGIN_NV04(chan, NvSub2D, 0x0804, 1);
-       OUT_RING(chan, format);
-       BEGIN_NV04(chan, NvSub2D, 0x0800, 1);
-       OUT_RING(chan, 1);
-       BEGIN_NV04(chan, NvSub2D, 0x0808, 3);
-       OUT_RING(chan, 0);
-       OUT_RING(chan, 0);
-       OUT_RING(chan, 1);
-       BEGIN_NV04(chan, NvSub2D, 0x081c, 1);
-       OUT_RING(chan, 1);
-       BEGIN_NV04(chan, NvSub2D, 0x0840, 4);
-       OUT_RING(chan, 0);
-       OUT_RING(chan, 1);
-       OUT_RING(chan, 0);
-       OUT_RING(chan, 1);
-       BEGIN_NV04(chan, NvSub2D, 0x0200, 2);
-       OUT_RING(chan, format);
-       OUT_RING(chan, 1);
-       BEGIN_NV04(chan, NvSub2D, 0x0214, 5);
-       OUT_RING(chan, info->fix.line_length);
-       OUT_RING(chan, info->var.xres_virtual);
-       OUT_RING(chan, info->var.yres_virtual);
-       OUT_RING(chan, upper_32_bits(nfbdev->vma->addr));
-       OUT_RING(chan, lower_32_bits(nfbdev->vma->addr));
-       BEGIN_NV04(chan, NvSub2D, 0x0230, 2);
-       OUT_RING(chan, format);
-       OUT_RING(chan, 1);
-       BEGIN_NV04(chan, NvSub2D, 0x0244, 5);
-       OUT_RING(chan, info->fix.line_length);
-       OUT_RING(chan, info->var.xres_virtual);
-       OUT_RING(chan, info->var.yres_virtual);
-       OUT_RING(chan, upper_32_bits(nfbdev->vma->addr));
-       OUT_RING(chan, lower_32_bits(nfbdev->vma->addr));
-       FIRE_RING(chan);
+       PUSH_MTHD(push, NV502D, SET_OBJECT, nfbdev->twod.handle);
+       PUSH_MTHD(push, NV502D, SET_DST_CONTEXT_DMA, chan->vram.handle,
+                               SET_SRC_CONTEXT_DMA, chan->vram.handle,
+                               SET_SEMAPHORE_CONTEXT_DMA, chan->vram.handle);
+
+       PUSH_MTHD(push, NV502D, SET_DST_FORMAT,
+                 NVVAL(NV502D, SET_DST_FORMAT, V, format),
+
+                               SET_DST_MEMORY_LAYOUT,
+                 NVDEF(NV502D, SET_DST_MEMORY_LAYOUT, V, PITCH));
+
+       PUSH_MTHD(push, NV502D, SET_DST_PITCH, info->fix.line_length,
+                               SET_DST_WIDTH, info->var.xres_virtual,
+                               SET_DST_HEIGHT, info->var.yres_virtual,
+
+                               SET_DST_OFFSET_UPPER,
+                 NVVAL(NV502D, SET_DST_OFFSET_UPPER, V, upper_32_bits(nfbdev->vma->addr)),
+
+                               SET_DST_OFFSET_LOWER,
+                 NVVAL(NV502D, SET_DST_OFFSET_LOWER, V, lower_32_bits(nfbdev->vma->addr)));
+
+       PUSH_MTHD(push, NV502D, SET_SRC_FORMAT,
+                 NVVAL(NV502D, SET_SRC_FORMAT, V, format),
+
+                               SET_SRC_MEMORY_LAYOUT,
+                 NVDEF(NV502D, SET_SRC_MEMORY_LAYOUT, V, PITCH));
+
+       PUSH_MTHD(push, NV502D, SET_SRC_PITCH, info->fix.line_length,
+                               SET_SRC_WIDTH, info->var.xres_virtual,
+                               SET_SRC_HEIGHT, info->var.yres_virtual,
+
+                               SET_SRC_OFFSET_UPPER,
+                 NVVAL(NV502D, SET_SRC_OFFSET_UPPER, V, upper_32_bits(nfbdev->vma->addr)),
+
+                               SET_SRC_OFFSET_LOWER,
+                 NVVAL(NV502D, SET_SRC_OFFSET_LOWER, V, lower_32_bits(nfbdev->vma->addr)));
+
+       PUSH_MTHD(push, NV502D, SET_CLIP_ENABLE,
+                 NVDEF(NV502D, SET_CLIP_ENABLE, V, FALSE));
+
+       PUSH_MTHD(push, NV502D, SET_ROP,
+                 NVVAL(NV502D, SET_ROP, V, 0x55));
+
+       PUSH_MTHD(push, NV502D, SET_OPERATION,
+                 NVDEF(NV502D, SET_OPERATION, V, SRCCOPY));
+
+       PUSH_MTHD(push, NV502D, SET_MONOCHROME_PATTERN_COLOR_FORMAT,
+                 NVDEF(NV502D, SET_MONOCHROME_PATTERN_COLOR_FORMAT, V, A8R8G8B8),
+
+                               SET_MONOCHROME_PATTERN_FORMAT,
+                 NVDEF(NV502D, SET_MONOCHROME_PATTERN_FORMAT, V, LE_M1));
+
+       PUSH_MTHD(push, NV502D, RENDER_SOLID_PRIM_MODE,
+                 NVDEF(NV502D, RENDER_SOLID_PRIM_MODE, V, RECTS),
+
+                               SET_RENDER_SOLID_PRIM_COLOR_FORMAT,
+                 NVVAL(NV502D, SET_RENDER_SOLID_PRIM_COLOR_FORMAT, V, format));
+
+       PUSH_MTHD(push, NV502D, SET_PIXELS_FROM_CPU_DATA_TYPE,
+                 NVDEF(NV502D, SET_PIXELS_FROM_CPU_DATA_TYPE, V, INDEX),
+
+                               SET_PIXELS_FROM_CPU_COLOR_FORMAT,
+                 NVVAL(NV502D, SET_PIXELS_FROM_CPU_COLOR_FORMAT, V, format),
+
+                               SET_PIXELS_FROM_CPU_INDEX_FORMAT,
+                 NVDEF(NV502D, SET_PIXELS_FROM_CPU_INDEX_FORMAT, V, I1),
+
+                               SET_PIXELS_FROM_CPU_MONO_FORMAT,
+                 NVDEF(NV502D, SET_PIXELS_FROM_CPU_MONO_FORMAT, V, CGA6_M1),
+
+                               SET_PIXELS_FROM_CPU_WRAP,
+                 NVDEF(NV502D, SET_PIXELS_FROM_CPU_WRAP, V, WRAP_BYTE));
+
+       PUSH_MTHD(push, NV502D, SET_PIXELS_FROM_CPU_MONO_OPACITY,
+                 NVDEF(NV502D, SET_PIXELS_FROM_CPU_MONO_OPACITY, V, OPAQUE));
+
+       PUSH_MTHD(push, NV502D, SET_PIXELS_FROM_CPU_DX_DU_FRAC, 0,
+                               SET_PIXELS_FROM_CPU_DX_DU_INT, 1,
+                               SET_PIXELS_FROM_CPU_DY_DV_FRAC, 0,
+                               SET_PIXELS_FROM_CPU_DY_DV_INT, 1);
+
+       PUSH_MTHD(push, NV502D, SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP,
+                 NVDEF(NV502D, SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP, V, TRUE));
 
+       PUSH_MTHD(push, NV502D, SET_PIXELS_FROM_MEMORY_DU_DX_FRAC, 0,
+                               SET_PIXELS_FROM_MEMORY_DU_DX_INT, 1,
+                               SET_PIXELS_FROM_MEMORY_DV_DY_FRAC, 0,
+                               SET_PIXELS_FROM_MEMORY_DV_DY_INT, 1);
+       PUSH_KICK(push);
        return 0;
 }
 
index a00ecc3..ebb7406 100644 (file)
@@ -51,7 +51,8 @@ nv50_fence_context_new(struct nouveau_channel *chan)
        fctx->base.read = nv10_fence_read;
        fctx->base.sync = nv17_fence_sync;
 
-       ret = nvif_object_init(&chan->user, NvSema, NV_DMA_IN_MEMORY,
+       ret = nvif_object_ctor(&chan->user, "fenceCtxDma", NvSema,
+                              NV_DMA_IN_MEMORY,
                               &(struct nv_dma_v0) {
                                        .target = NV_DMA_V0_TARGET_VRAM,
                                        .access = NV_DMA_V0_ACCESS_RDWR,
index f07da00..7ed36b3 100644 (file)
@@ -21,7 +21,6 @@
  *
  * Authors: Ben Skeggs
  */
-
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_fence.h"
 
 #include "nv50_display.h"
 
+#include <nvif/push206e.h>
+
+#include <nvhw/class/cl826f.h>
+
 static int
 nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
 {
-       int ret = RING_SPACE(chan, 8);
+       struct nvif_push *push = chan->chan.push;
+       int ret = PUSH_WAIT(push, 8);
        if (ret == 0) {
-               BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
-               OUT_RING  (chan, chan->vram.handle);
-               BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5);
-               OUT_RING  (chan, upper_32_bits(virtual));
-               OUT_RING  (chan, lower_32_bits(virtual));
-               OUT_RING  (chan, sequence);
-               OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
-               OUT_RING  (chan, 0x00000000);
-               FIRE_RING (chan);
+               PUSH_MTHD(push, NV826F, SET_CONTEXT_DMA_SEMAPHORE, chan->vram.handle);
+
+               PUSH_MTHD(push, NV826F, SEMAPHOREA,
+                         NVVAL(NV826F, SEMAPHOREA, OFFSET_UPPER, upper_32_bits(virtual)),
+
+                                       SEMAPHOREB, lower_32_bits(virtual),
+                                       SEMAPHOREC, sequence,
+
+                                       SEMAPHORED,
+                         NVDEF(NV826F, SEMAPHORED, OPERATION, RELEASE),
+
+                                       NON_STALLED_INTERRUPT, 0);
+               PUSH_KICK(push);
        }
        return ret;
 }
@@ -50,16 +58,20 @@ nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
 static int
 nv84_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
 {
-       int ret = RING_SPACE(chan, 7);
+       struct nvif_push *push = chan->chan.push;
+       int ret = PUSH_WAIT(push, 7);
        if (ret == 0) {
-               BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
-               OUT_RING  (chan, chan->vram.handle);
-               BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
-               OUT_RING  (chan, upper_32_bits(virtual));
-               OUT_RING  (chan, lower_32_bits(virtual));
-               OUT_RING  (chan, sequence);
-               OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
-               FIRE_RING (chan);
+               PUSH_MTHD(push, NV826F, SET_CONTEXT_DMA_SEMAPHORE, chan->vram.handle);
+
+               PUSH_MTHD(push, NV826F, SEMAPHOREA,
+                         NVVAL(NV826F, SEMAPHOREA, OFFSET_UPPER, upper_32_bits(virtual)),
+
+                                       SEMAPHOREB, lower_32_bits(virtual),
+                                       SEMAPHOREC, sequence,
+
+                                       SEMAPHORED,
+                         NVDEF(NV826F, SEMAPHORED, OPERATION, ACQ_GEQ));
+               PUSH_KICK(push);
        }
        return ret;
 }
index cb56163..7908a1a 100644 (file)
  *
  * Authors: Ben Skeggs
  */
-
+#define NVIF_DEBUG_PRINT_DISABLE
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_fbcon.h"
 #include "nouveau_vmm.h"
 
+#include <nvif/push906f.h>
+
+#include <nvhw/class/cl902d.h>
+
 int
 nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 {
        struct nouveau_fbdev *nfbdev = info->par;
        struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
        struct nouveau_channel *chan = drm->channel;
+       struct nvif_push *push = chan->chan.push;
+       u32 colour;
        int ret;
 
-       ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
+       if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+           info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+               colour = ((uint32_t *)info->pseudo_palette)[rect->color];
+       else
+               colour = rect->color;
+
+       ret = PUSH_WAIT(push, rect->rop == ROP_COPY ? 7 : 9);
        if (ret)
                return ret;
 
        if (rect->rop != ROP_COPY) {
-               BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1);
-               OUT_RING  (chan, 1);
+               PUSH_IMMD(push, NV902D, SET_OPERATION,
+                         NVDEF(NV902D, SET_OPERATION, V, ROP_AND));
        }
-       BEGIN_NVC0(chan, NvSub2D, 0x0588, 1);
-       if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
-           info->fix.visual == FB_VISUAL_DIRECTCOLOR)
-               OUT_RING  (chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
-       else
-               OUT_RING  (chan, rect->color);
-       BEGIN_NVC0(chan, NvSub2D, 0x0600, 4);
-       OUT_RING  (chan, rect->dx);
-       OUT_RING  (chan, rect->dy);
-       OUT_RING  (chan, rect->dx + rect->width);
-       OUT_RING  (chan, rect->dy + rect->height);
+
+       PUSH_MTHD(push, NV902D, SET_RENDER_SOLID_PRIM_COLOR, colour);
+
+       PUSH_MTHD(push, NV902D, RENDER_SOLID_PRIM_POINT_SET_X(0), rect->dx,
+                               RENDER_SOLID_PRIM_POINT_Y(0), rect->dy,
+                               RENDER_SOLID_PRIM_POINT_SET_X(1), rect->dx + rect->width,
+                               RENDER_SOLID_PRIM_POINT_Y(1), rect->dy + rect->height);
+
        if (rect->rop != ROP_COPY) {
-               BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1);
-               OUT_RING  (chan, 3);
+               PUSH_IMMD(push, NV902D, SET_OPERATION,
+                         NVDEF(NV902D, SET_OPERATION, V, SRCCOPY));
        }
-       FIRE_RING(chan);
+
+       PUSH_KICK(push);
        return 0;
 }
 
@@ -68,25 +78,25 @@ nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
        struct nouveau_fbdev *nfbdev = info->par;
        struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
        struct nouveau_channel *chan = drm->channel;
+       struct nvif_push *push = chan->chan.push;
        int ret;
 
-       ret = RING_SPACE(chan, 12);
+       ret = PUSH_WAIT(push, 11);
        if (ret)
                return ret;
 
-       BEGIN_NVC0(chan, NvSub2D, 0x0110, 1);
-       OUT_RING  (chan, 0);
-       BEGIN_NVC0(chan, NvSub2D, 0x08b0, 4);
-       OUT_RING  (chan, region->dx);
-       OUT_RING  (chan, region->dy);
-       OUT_RING  (chan, region->width);
-       OUT_RING  (chan, region->height);
-       BEGIN_NVC0(chan, NvSub2D, 0x08d0, 4);
-       OUT_RING  (chan, 0);
-       OUT_RING  (chan, region->sx);
-       OUT_RING  (chan, 0);
-       OUT_RING  (chan, region->sy);
-       FIRE_RING(chan);
+       PUSH_IMMD(push, NV902D, WAIT_FOR_IDLE, 0);
+
+       PUSH_MTHD(push, NV902D, SET_PIXELS_FROM_MEMORY_DST_X0, region->dx,
+                               SET_PIXELS_FROM_MEMORY_DST_Y0, region->dy,
+                               SET_PIXELS_FROM_MEMORY_DST_WIDTH, region->width,
+                               SET_PIXELS_FROM_MEMORY_DST_HEIGHT, region->height);
+
+       PUSH_MTHD(push, NV902D, SET_PIXELS_FROM_MEMORY_SRC_X0_FRAC, 0,
+                               SET_PIXELS_FROM_MEMORY_SRC_X0_INT, region->sx,
+                               SET_PIXELS_FROM_MEMORY_SRC_Y0_FRAC, 0,
+                               PIXELS_FROM_MEMORY_SRC_Y0_INT, region->sy);
+       PUSH_KICK(push);
        return 0;
 }
 
@@ -96,52 +106,54 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        struct nouveau_fbdev *nfbdev = info->par;
        struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
        struct nouveau_channel *chan = drm->channel;
+       struct nvif_push *push = chan->chan.push;
        uint32_t dwords, *data = (uint32_t *)image->data;
        uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
-       uint32_t *palette = info->pseudo_palette;
+       uint32_t *palette = info->pseudo_palette, bg, fg;
        int ret;
 
        if (image->depth != 1)
                return -ENODEV;
 
-       ret = RING_SPACE(chan, 11);
-       if (ret)
-               return ret;
-
-       BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
        if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
            info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
-               OUT_RING  (chan, palette[image->bg_color] | mask);
-               OUT_RING  (chan, palette[image->fg_color] | mask);
+               bg = palette[image->bg_color] | mask;
+               fg = palette[image->fg_color] | mask;
        } else {
-               OUT_RING  (chan, image->bg_color);
-               OUT_RING  (chan, image->fg_color);
+               bg = image->bg_color;
+               fg = image->fg_color;
        }
-       BEGIN_NVC0(chan, NvSub2D, 0x0838, 2);
-       OUT_RING  (chan, image->width);
-       OUT_RING  (chan, image->height);
-       BEGIN_NVC0(chan, NvSub2D, 0x0850, 4);
-       OUT_RING  (chan, 0);
-       OUT_RING  (chan, image->dx);
-       OUT_RING  (chan, 0);
-       OUT_RING  (chan, image->dy);
+
+       ret = PUSH_WAIT(push, 11);
+       if (ret)
+               return ret;
+
+       PUSH_MTHD(push, NV902D, SET_PIXELS_FROM_CPU_COLOR0, bg,
+                               SET_PIXELS_FROM_CPU_COLOR1, fg);
+
+       PUSH_MTHD(push, NV902D, SET_PIXELS_FROM_CPU_SRC_WIDTH, image->width,
+                               SET_PIXELS_FROM_CPU_SRC_HEIGHT, image->height);
+
+       PUSH_MTHD(push, NV902D, SET_PIXELS_FROM_CPU_DST_X0_FRAC, 0,
+                               SET_PIXELS_FROM_CPU_DST_X0_INT, image->dx,
+                               SET_PIXELS_FROM_CPU_DST_Y0_FRAC, 0,
+                               SET_PIXELS_FROM_CPU_DST_Y0_INT, image->dy);
 
        dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
        while (dwords) {
-               int push = dwords > 2047 ? 2047 : dwords;
+               int count = dwords > 2047 ? 2047 : dwords;
 
-               ret = RING_SPACE(chan, push + 1);
+               ret = PUSH_WAIT(push, count + 1);
                if (ret)
                        return ret;
 
-               dwords -= push;
+               dwords -= count;
 
-               BEGIN_NIC0(chan, NvSub2D, 0x0860, push);
-               OUT_RINGp(chan, data, push);
-               data += push;
+               PUSH_NINC(push, NV902D, PIXELS_FROM_CPU_DATA, data, count);
+               data += count;
        }
 
-       FIRE_RING(chan);
+       PUSH_KICK(push);
        return 0;
 }
 
@@ -152,31 +164,32 @@ nvc0_fbcon_accel_init(struct fb_info *info)
        struct drm_device *dev = nfbdev->helper.dev;
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nouveau_channel *chan = drm->channel;
+       struct nvif_push *push = chan->chan.push;
        int ret, format;
 
-       ret = nvif_object_init(&chan->user, 0x902d, 0x902d, NULL, 0,
-                              &nfbdev->twod);
+       ret = nvif_object_ctor(&chan->user, "fbconTwoD", 0x902d, 0x902d,
+                              NULL, 0, &nfbdev->twod);
        if (ret)
                return ret;
 
        switch (info->var.bits_per_pixel) {
        case 8:
-               format = 0xf3;
+               format = NV902D_SET_DST_FORMAT_V_Y8;
                break;
        case 15:
-               format = 0xf8;
+               format = NV902D_SET_DST_FORMAT_V_X1R5G5B5;
                break;
        case 16:
-               format = 0xe8;
+               format = NV902D_SET_DST_FORMAT_V_R5G6B5;
                break;
        case 32:
                switch (info->var.transp.length) {
                case 0: /* depth 24 */
                case 8: /* depth 32, just use 24.. */
-                       format = 0xe6;
+                       format = NV902D_SET_DST_FORMAT_V_X8R8G8B8;
                        break;
                case 2: /* depth 30 */
-                       format = 0xd1;
+                       format = NV902D_SET_DST_FORMAT_V_A2B10G10R10;
                        break;
                default:
                        return -EINVAL;
@@ -186,74 +199,99 @@ nvc0_fbcon_accel_init(struct fb_info *info)
                return -EINVAL;
        }
 
-       ret = RING_SPACE(chan, 58);
+       ret = PUSH_WAIT(push, 52);
        if (ret) {
                WARN_ON(1);
                nouveau_fbcon_gpu_lockup(info);
                return ret;
        }
 
-       BEGIN_NVC0(chan, NvSub2D, 0x0000, 1);
-       OUT_RING  (chan, nfbdev->twod.handle);
-       BEGIN_NVC0(chan, NvSub2D, 0x0290, 1);
-       OUT_RING  (chan, 0);
-       BEGIN_NVC0(chan, NvSub2D, 0x0888, 1);
-       OUT_RING  (chan, 1);
-       BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1);
-       OUT_RING  (chan, 3);
-       BEGIN_NVC0(chan, NvSub2D, 0x02a0, 1);
-       OUT_RING  (chan, 0x55);
-       BEGIN_NVC0(chan, NvSub2D, 0x08c0, 4);
-       OUT_RING  (chan, 0);
-       OUT_RING  (chan, 1);
-       OUT_RING  (chan, 0);
-       OUT_RING  (chan, 1);
-       BEGIN_NVC0(chan, NvSub2D, 0x0580, 2);
-       OUT_RING  (chan, 4);
-       OUT_RING  (chan, format);
-       BEGIN_NVC0(chan, NvSub2D, 0x02e8, 2);
-       OUT_RING  (chan, 2);
-       OUT_RING  (chan, 1);
-
-       BEGIN_NVC0(chan, NvSub2D, 0x0804, 1);
-       OUT_RING  (chan, format);
-       BEGIN_NVC0(chan, NvSub2D, 0x0800, 1);
-       OUT_RING  (chan, 1);
-       BEGIN_NVC0(chan, NvSub2D, 0x0808, 3);
-       OUT_RING  (chan, 0);
-       OUT_RING  (chan, 0);
-       OUT_RING  (chan, 1);
-       BEGIN_NVC0(chan, NvSub2D, 0x081c, 1);
-       OUT_RING  (chan, 1);
-       BEGIN_NVC0(chan, NvSub2D, 0x0840, 4);
-       OUT_RING  (chan, 0);
-       OUT_RING  (chan, 1);
-       OUT_RING  (chan, 0);
-       OUT_RING  (chan, 1);
-       BEGIN_NVC0(chan, NvSub2D, 0x0200, 10);
-       OUT_RING  (chan, format);
-       OUT_RING  (chan, 1);
-       OUT_RING  (chan, 0);
-       OUT_RING  (chan, 1);
-       OUT_RING  (chan, 0);
-       OUT_RING  (chan, info->fix.line_length);
-       OUT_RING  (chan, info->var.xres_virtual);
-       OUT_RING  (chan, info->var.yres_virtual);
-       OUT_RING  (chan, upper_32_bits(nfbdev->vma->addr));
-       OUT_RING  (chan, lower_32_bits(nfbdev->vma->addr));
-       BEGIN_NVC0(chan, NvSub2D, 0x0230, 10);
-       OUT_RING  (chan, format);
-       OUT_RING  (chan, 1);
-       OUT_RING  (chan, 0);
-       OUT_RING  (chan, 1);
-       OUT_RING  (chan, 0);
-       OUT_RING  (chan, info->fix.line_length);
-       OUT_RING  (chan, info->var.xres_virtual);
-       OUT_RING  (chan, info->var.yres_virtual);
-       OUT_RING  (chan, upper_32_bits(nfbdev->vma->addr));
-       OUT_RING  (chan, lower_32_bits(nfbdev->vma->addr));
-       FIRE_RING (chan);
+       PUSH_MTHD(push, NV902D, SET_OBJECT, nfbdev->twod.handle);
+
+       PUSH_MTHD(push, NV902D, SET_DST_FORMAT,
+                 NVVAL(NV902D, SET_DST_FORMAT, V, format),
+
+                               SET_DST_MEMORY_LAYOUT,
+                 NVDEF(NV902D, SET_DST_MEMORY_LAYOUT, V, PITCH));
+
+       PUSH_MTHD(push, NV902D, SET_DST_PITCH, info->fix.line_length,
+                               SET_DST_WIDTH, info->var.xres_virtual,
+                               SET_DST_HEIGHT, info->var.yres_virtual,
+
+                               SET_DST_OFFSET_UPPER,
+                 NVVAL(NV902D, SET_DST_OFFSET_UPPER, V, upper_32_bits(nfbdev->vma->addr)),
+
+                               SET_DST_OFFSET_LOWER,
+                 NVVAL(NV902D, SET_DST_OFFSET_LOWER, V, lower_32_bits(nfbdev->vma->addr)));
+
+       PUSH_MTHD(push, NV902D, SET_SRC_FORMAT,
+                 NVVAL(NV902D, SET_SRC_FORMAT, V, format),
+
+                               SET_SRC_MEMORY_LAYOUT,
+                 NVDEF(NV902D, SET_SRC_MEMORY_LAYOUT, V, PITCH));
+
+       PUSH_MTHD(push, NV902D, SET_SRC_PITCH, info->fix.line_length,
+                               SET_SRC_WIDTH, info->var.xres_virtual,
+                               SET_SRC_HEIGHT, info->var.yres_virtual,
+
+                               SET_SRC_OFFSET_UPPER,
+                 NVVAL(NV902D, SET_SRC_OFFSET_UPPER, V, upper_32_bits(nfbdev->vma->addr)),
+
+                               SET_SRC_OFFSET_LOWER,
+                 NVVAL(NV902D, SET_SRC_OFFSET_LOWER, V, lower_32_bits(nfbdev->vma->addr)));
+
+       PUSH_IMMD(push, NV902D, SET_CLIP_ENABLE,
+                 NVDEF(NV902D, SET_CLIP_ENABLE, V, FALSE));
+
+       PUSH_IMMD(push, NV902D, SET_ROP,
+                 NVVAL(NV902D, SET_ROP, V, 0x55));
+
+       PUSH_IMMD(push, NV902D, SET_OPERATION,
+                 NVDEF(NV902D, SET_OPERATION, V, SRCCOPY));
+
+       PUSH_MTHD(push, NV902D, SET_MONOCHROME_PATTERN_COLOR_FORMAT,
+                 NVDEF(NV902D, SET_MONOCHROME_PATTERN_COLOR_FORMAT, V, A8R8G8B8),
+
+                               SET_MONOCHROME_PATTERN_FORMAT,
+                 NVDEF(NV902D, SET_MONOCHROME_PATTERN_FORMAT, V, LE_M1));
+
+       PUSH_MTHD(push, NV902D, RENDER_SOLID_PRIM_MODE,
+                 NVDEF(NV902D, RENDER_SOLID_PRIM_MODE, V, RECTS),
+
+                               SET_RENDER_SOLID_PRIM_COLOR_FORMAT,
+                 NVVAL(NV902D, SET_RENDER_SOLID_PRIM_COLOR_FORMAT, V, format));
+
+       PUSH_MTHD(push, NV902D, SET_PIXELS_FROM_CPU_DATA_TYPE,
+                 NVDEF(NV902D, SET_PIXELS_FROM_CPU_DATA_TYPE, V, INDEX),
+
+                               SET_PIXELS_FROM_CPU_COLOR_FORMAT,
+                 NVVAL(NV902D, SET_PIXELS_FROM_CPU_COLOR_FORMAT, V, format),
+
+                               SET_PIXELS_FROM_CPU_INDEX_FORMAT,
+                 NVDEF(NV902D, SET_PIXELS_FROM_CPU_INDEX_FORMAT, V, I1),
+
+                               SET_PIXELS_FROM_CPU_MONO_FORMAT,
+                 NVDEF(NV902D, SET_PIXELS_FROM_CPU_MONO_FORMAT, V, CGA6_M1),
+
+                               SET_PIXELS_FROM_CPU_WRAP,
+                 NVDEF(NV902D, SET_PIXELS_FROM_CPU_WRAP, V, WRAP_BYTE));
+
+       PUSH_IMMD(push, NV902D, SET_PIXELS_FROM_CPU_MONO_OPACITY,
+                 NVDEF(NV902D, SET_PIXELS_FROM_CPU_MONO_OPACITY, V, OPAQUE));
+
+       PUSH_MTHD(push, NV902D, SET_PIXELS_FROM_CPU_DX_DU_FRAC, 0,
+                               SET_PIXELS_FROM_CPU_DX_DU_INT, 1,
+                               SET_PIXELS_FROM_CPU_DY_DV_FRAC, 0,
+                               SET_PIXELS_FROM_CPU_DY_DV_INT, 1);
+
+       PUSH_IMMD(push, NV902D, SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP,
+                 NVDEF(NV902D, SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP, V, TRUE));
 
+       PUSH_MTHD(push, NV902D, SET_PIXELS_FROM_MEMORY_DU_DX_FRAC, 0,
+                               SET_PIXELS_FROM_MEMORY_DU_DX_INT, 1,
+                               SET_PIXELS_FROM_MEMORY_DV_DY_FRAC, 0,
+                               SET_PIXELS_FROM_MEMORY_DV_DY_INT, 1);
+       PUSH_KICK(push);
        return 0;
 }
 
index b797757..e1461c0 100644 (file)
  *
  * Authors: Ben Skeggs
  */
-
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_fence.h"
 
 #include "nv50_display.h"
 
+#include <nvif/push906f.h>
+
+#include <nvhw/class/cl906f.h>
+
 static int
 nvc0_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
 {
-       int ret = RING_SPACE(chan, 6);
+       struct nvif_push *push = chan->chan.push;
+       int ret = PUSH_WAIT(push, 6);
        if (ret == 0) {
-               BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5);
-               OUT_RING  (chan, upper_32_bits(virtual));
-               OUT_RING  (chan, lower_32_bits(virtual));
-               OUT_RING  (chan, sequence);
-               OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
-               OUT_RING  (chan, 0x00000000);
-               FIRE_RING (chan);
+               PUSH_MTHD(push, NV906F, SEMAPHOREA,
+                         NVVAL(NV906F, SEMAPHOREA, OFFSET_UPPER, upper_32_bits(virtual)),
+
+                                       SEMAPHOREB, lower_32_bits(virtual),
+                                       SEMAPHOREC, sequence,
+
+                                       SEMAPHORED,
+                         NVDEF(NV906F, SEMAPHORED, OPERATION, RELEASE) |
+                         NVDEF(NV906F, SEMAPHORED, RELEASE_WFI, EN) |
+                         NVDEF(NV906F, SEMAPHORED, RELEASE_SIZE, 16BYTE),
+
+                                       NON_STALL_INTERRUPT, 0);
+               PUSH_KICK(push);
        }
        return ret;
 }
@@ -47,15 +57,19 @@ nvc0_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
 static int
 nvc0_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
 {
-       int ret = RING_SPACE(chan, 5);
+       struct nvif_push *push = chan->chan.push;
+       int ret = PUSH_WAIT(push, 5);
        if (ret == 0) {
-               BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
-               OUT_RING  (chan, upper_32_bits(virtual));
-               OUT_RING  (chan, lower_32_bits(virtual));
-               OUT_RING  (chan, sequence);
-               OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL |
-                                NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
-               FIRE_RING (chan);
+               PUSH_MTHD(push, NV906F, SEMAPHOREA,
+                         NVVAL(NV906F, SEMAPHOREA, OFFSET_UPPER, upper_32_bits(virtual)),
+
+                                       SEMAPHOREB, lower_32_bits(virtual),
+                                       SEMAPHOREC, sequence,
+
+                                       SEMAPHORED,
+                         NVDEF(NV906F, SEMAPHORED, OPERATION, ACQ_GEQ) |
+                         NVDEF(NV906F, SEMAPHORED, ACQUIRE_SWITCH, ENABLED));
+               PUSH_KICK(push);
        }
        return ret;
 }
index 12db549..12644f8 100644 (file)
@@ -48,9 +48,9 @@ nvif_client_resume(struct nvif_client *client)
 }
 
 void
-nvif_client_fini(struct nvif_client *client)
+nvif_client_dtor(struct nvif_client *client)
 {
-       nvif_object_fini(&client->object);
+       nvif_object_dtor(&client->object);
        if (client->driver) {
                if (client->driver->fini)
                        client->driver->fini(client->object.priv);
@@ -59,7 +59,7 @@ nvif_client_fini(struct nvif_client *client)
 }
 
 int
-nvif_client_init(struct nvif_client *parent, const char *name, u64 device,
+nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
                 struct nvif_client *client)
 {
        struct nvif_client_v0 args = { .device = device };
@@ -70,8 +70,9 @@ nvif_client_init(struct nvif_client *parent, const char *name, u64 device,
        int ret;
 
        strncpy(args.name, name, sizeof(args.name));
-       ret = nvif_object_init(parent != client ? &parent->object : NULL,
-                              0, NVIF_CLASS_CLIENT, &args, sizeof(args),
+       ret = nvif_object_ctor(parent != client ? &parent->object : NULL,
+                              name ? name : "nvifClient", 0,
+                              NVIF_CLASS_CLIENT, &args, sizeof(args),
                               &client->object);
        if (ret)
                return ret;
@@ -88,6 +89,6 @@ nvif_client_init(struct nvif_client *parent, const char *name, u64 device,
        }
 
        if (ret)
-               nvif_client_fini(client);
+               nvif_client_dtor(client);
        return ret;
 }
index 0e92db4..8c3d883 100644 (file)
@@ -39,20 +39,20 @@ nvif_device_time(struct nvif_device *device)
 }
 
 void
-nvif_device_fini(struct nvif_device *device)
+nvif_device_dtor(struct nvif_device *device)
 {
-       nvif_user_fini(device);
+       nvif_user_dtor(device);
        kfree(device->runlist);
        device->runlist = NULL;
-       nvif_object_fini(&device->object);
+       nvif_object_dtor(&device->object);
 }
 
 int
-nvif_device_init(struct nvif_object *parent, u32 handle, s32 oclass,
-                void *data, u32 size, struct nvif_device *device)
+nvif_device_ctor(struct nvif_object *parent, const char *name, u32 handle,
+                s32 oclass, void *data, u32 size, struct nvif_device *device)
 {
-       int ret = nvif_object_init(parent, handle, oclass, data, size,
-                                  &device->object);
+       int ret = nvif_object_ctor(parent, name ? name : "nvifDevice", handle,
+                                  oclass, data, size, &device->object);
        device->runlist = NULL;
        device->user.func = NULL;
        if (ret == 0) {
index 61638b3..8d0d30e 100644 (file)
 void
 nvif_disp_dtor(struct nvif_disp *disp)
 {
-       nvif_object_fini(&disp->object);
+       nvif_object_dtor(&disp->object);
 }
 
 int
-nvif_disp_ctor(struct nvif_device *device, s32 oclass, struct nvif_disp *disp)
+nvif_disp_ctor(struct nvif_device *device, const char *name, s32 oclass,
+              struct nvif_disp *disp)
 {
        static const struct nvif_mclass disps[] = {
                { TU102_DISP, -1 },
@@ -56,6 +57,6 @@ nvif_disp_ctor(struct nvif_device *device, s32 oclass, struct nvif_disp *disp)
        if (cid < 0)
                return cid;
 
-       return nvif_object_init(&device->object, 0, disps[cid].oclass,
-                               NULL, 0, &disp->object);
+       return nvif_object_ctor(&device->object, name ? name : "nvifDisp", 0,
+                               disps[cid].oclass, NULL, 0, &disp->object);
 }
index 7013309..5e00dd0 100644 (file)
@@ -53,6 +53,6 @@ nvif_driver_init(const char *drv, const char *cfg, const char *dbg,
        }
 
        if (ret == 0)
-               ret = nvif_client_init(client, name, device, client);
+               ret = nvif_client_ctor(client, name, device, client);
        return ret;
 }
index b6ebb3b..0e1b7b4 100644 (file)
 #include <nvif/if000a.h>
 
 int
-nvif_mem_init_map(struct nvif_mmu *mmu, u8 type, u64 size, struct nvif_mem *mem)
+nvif_mem_ctor_map(struct nvif_mmu *mmu, const char *name, u8 type, u64 size,
+                 struct nvif_mem *mem)
 {
-       int ret = nvif_mem_init(mmu, mmu->mem, NVIF_MEM_MAPPABLE | type, 0,
-                               size, NULL, 0, mem);
+       int ret = nvif_mem_ctor(mmu, name, mmu->mem, NVIF_MEM_MAPPABLE | type,
+                               0, size, NULL, 0, mem);
        if (ret == 0) {
                ret = nvif_object_map(&mem->object, NULL, 0);
                if (ret)
-                       nvif_mem_fini(mem);
+                       nvif_mem_dtor(mem);
        }
        return ret;
 }
 
 void
-nvif_mem_fini(struct nvif_mem *mem)
+nvif_mem_dtor(struct nvif_mem *mem)
 {
-       nvif_object_fini(&mem->object);
+       nvif_object_dtor(&mem->object);
 }
 
 int
-nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page,
-                  u64 size, void *argv, u32 argc, struct nvif_mem *mem)
+nvif_mem_ctor_type(struct nvif_mmu *mmu, const char *name, s32 oclass,
+                  int type, u8 page, u64 size, void *argv, u32 argc,
+                  struct nvif_mem *mem)
 {
        struct nvif_mem_v0 *args;
        u8 stack[128];
@@ -67,8 +69,8 @@ nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page,
        args->size = size;
        memcpy(args->data, argv, argc);
 
-       ret = nvif_object_init(&mmu->object, 0, oclass, args,
-                              sizeof(*args) + argc, &mem->object);
+       ret = nvif_object_ctor(&mmu->object, name ? name : "nvifMem", 0, oclass,
+                              args, sizeof(*args) + argc, &mem->object);
        if (ret == 0) {
                mem->type = mmu->type[type].type;
                mem->page = args->page;
@@ -83,8 +85,8 @@ nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page,
 }
 
 int
-nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page,
-             u64 size, void *argv, u32 argc, struct nvif_mem *mem)
+nvif_mem_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, u8 type,
+             u8 page, u64 size, void *argv, u32 argc, struct nvif_mem *mem)
 {
        int ret = -EINVAL, i;
 
@@ -92,8 +94,8 @@ nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page,
 
        for (i = 0; ret && i < mmu->type_nr; i++) {
                if ((mmu->type[i].type & type) == type) {
-                       ret = nvif_mem_init_type(mmu, oclass, i, page, size,
-                                                argv, argc, mem);
+                       ret = nvif_mem_ctor_type(mmu, name, oclass, i, page,
+                                                size, argv, argc, mem);
                }
        }
 
index 47efc40..3709cbb 100644 (file)
 #include <nvif/if0008.h>
 
 void
-nvif_mmu_fini(struct nvif_mmu *mmu)
+nvif_mmu_dtor(struct nvif_mmu *mmu)
 {
        kfree(mmu->kind);
        kfree(mmu->type);
        kfree(mmu->heap);
-       nvif_object_fini(&mmu->object);
+       nvif_object_dtor(&mmu->object);
 }
 
 int
-nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
+nvif_mmu_ctor(struct nvif_object *parent, const char *name, s32 oclass,
+             struct nvif_mmu *mmu)
 {
        static const struct nvif_mclass mems[] = {
                { NVIF_CLASS_MEM_GF100, -1 },
@@ -50,8 +51,8 @@ nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
        mmu->type = NULL;
        mmu->kind = NULL;
 
-       ret = nvif_object_init(parent, 0, oclass, &args, sizeof(args),
-                              &mmu->object);
+       ret = nvif_object_ctor(parent, name ? name : "nvifMmu", 0, oclass,
+                              &args, sizeof(args), &mmu->object);
        if (ret)
                goto done;
 
@@ -127,6 +128,6 @@ nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
 
 done:
        if (ret)
-               nvif_mmu_fini(mmu);
+               nvif_mmu_dtor(mmu);
        return ret;
 }
index 278b393..143c8dc 100644 (file)
@@ -142,7 +142,7 @@ nvif_notify(const void *header, u32 length, const void *data, u32 size)
 }
 
 int
-nvif_notify_fini(struct nvif_notify *notify)
+nvif_notify_dtor(struct nvif_notify *notify)
 {
        struct nvif_object *object = notify->object;
        struct {
@@ -162,9 +162,9 @@ nvif_notify_fini(struct nvif_notify *notify)
 }
 
 int
-nvif_notify_init(struct nvif_object *object, int (*func)(struct nvif_notify *),
-                bool work, u8 event, void *data, u32 size, u32 reply,
-                struct nvif_notify *notify)
+nvif_notify_ctor(struct nvif_object *object, const char *name,
+                int (*func)(struct nvif_notify *), bool work, u8 event,
+                void *data, u32 size, u32 reply, struct nvif_notify *notify)
 {
        struct {
                struct nvif_ioctl_v0 ioctl;
@@ -174,6 +174,7 @@ nvif_notify_init(struct nvif_object *object, int (*func)(struct nvif_notify *),
        int ret = -ENOMEM;
 
        notify->object = object;
+       notify->name = name ? name : "nvifNotify";
        notify->flags = 0;
        atomic_set(&notify->putcnt, 1);
        notify->func = func;
@@ -204,6 +205,6 @@ nvif_notify_init(struct nvif_object *object, int (*func)(struct nvif_notify *),
        kfree(args);
 done:
        if (ret)
-               nvif_notify_fini(notify);
+               nvif_notify_dtor(notify);
        return ret;
 }
index ef3f628..671a5c0 100644 (file)
@@ -242,7 +242,7 @@ nvif_object_map(struct nvif_object *object, void *argv, u32 argc)
 }
 
 void
-nvif_object_fini(struct nvif_object *object)
+nvif_object_dtor(struct nvif_object *object)
 {
        struct {
                struct nvif_ioctl_v0 ioctl;
@@ -260,8 +260,8 @@ nvif_object_fini(struct nvif_object *object)
 }
 
 int
-nvif_object_init(struct nvif_object *parent, u32 handle, s32 oclass,
-                void *data, u32 size, struct nvif_object *object)
+nvif_object_ctor(struct nvif_object *parent, const char *name, u32 handle,
+                s32 oclass, void *data, u32 size, struct nvif_object *object)
 {
        struct {
                struct nvif_ioctl_v0 ioctl;
@@ -270,6 +270,7 @@ nvif_object_init(struct nvif_object *parent, u32 handle, s32 oclass,
        int ret = 0;
 
        object->client = NULL;
+       object->name = name ? name : "nvifObject";
        object->handle = handle;
        object->oclass = oclass;
        object->map.ptr = NULL;
@@ -277,10 +278,12 @@ nvif_object_init(struct nvif_object *parent, u32 handle, s32 oclass,
 
        if (parent) {
                if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL))) {
-                       nvif_object_fini(object);
+                       nvif_object_dtor(object);
                        return -ENOMEM;
                }
 
+               object->parent = parent->parent;
+
                args->ioctl.version = 0;
                args->ioctl.type = NVIF_IOCTL_V0_NEW;
                args->new.version = 0;
@@ -300,6 +303,6 @@ nvif_object_init(struct nvif_object *parent, u32 handle, s32 oclass,
        }
 
        if (ret)
-               nvif_object_fini(object);
+               nvif_object_dtor(object);
        return ret;
 }
index 10da3cd..d89f5b6 100644 (file)
 #include <nvif/class.h>
 
 void
-nvif_user_fini(struct nvif_device *device)
+nvif_user_dtor(struct nvif_device *device)
 {
        if (device->user.func) {
-               nvif_object_fini(&device->user.object);
+               nvif_object_dtor(&device->user.object);
                device->user.func = NULL;
        }
 }
 
 int
-nvif_user_init(struct nvif_device *device)
+nvif_user_ctor(struct nvif_device *device, const char *name)
 {
        struct {
                s32 oclass;
@@ -53,7 +53,8 @@ nvif_user_init(struct nvif_device *device)
        if (cid < 0)
                return cid;
 
-       ret = nvif_object_init(&device->object, 0, users[cid].oclass, NULL, 0,
+       ret = nvif_object_ctor(&device->object, name ? name : "nvifUsermode",
+                              0, users[cid].oclass, NULL, 0,
                               &device->user.object);
        if (ret)
                return ret;
index 11487c0..6053d6d 100644 (file)
@@ -105,15 +105,15 @@ nvif_vmm_get(struct nvif_vmm *vmm, enum nvif_vmm_get type, bool sparse,
 }
 
 void
-nvif_vmm_fini(struct nvif_vmm *vmm)
+nvif_vmm_dtor(struct nvif_vmm *vmm)
 {
        kfree(vmm->page);
-       nvif_object_fini(&vmm->object);
+       nvif_object_dtor(&vmm->object);
 }
 
 int
-nvif_vmm_init(struct nvif_mmu *mmu, s32 oclass, bool managed, u64 addr,
-             u64 size, void *argv, u32 argc, struct nvif_vmm *vmm)
+nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, bool managed,
+             u64 addr, u64 size, void *argv, u32 argc, struct nvif_vmm *vmm)
 {
        struct nvif_vmm_v0 *args;
        u32 argn = sizeof(*args) + argc;
@@ -130,8 +130,8 @@ nvif_vmm_init(struct nvif_mmu *mmu, s32 oclass, bool managed, u64 addr,
        args->size = size;
        memcpy(args->data, argv, argc);
 
-       ret = nvif_object_init(&mmu->object, 0, oclass, args, argn,
-                              &vmm->object);
+       ret = nvif_object_ctor(&mmu->object, name ? name : "nvifVmm", 0,
+                              oclass, args, argn, &vmm->object);
        if (ret)
                goto done;
 
@@ -163,7 +163,7 @@ nvif_vmm_init(struct nvif_mmu *mmu, s32 oclass, bool managed, u64 addr,
 
 done:
        if (ret)
-               nvif_vmm_fini(vmm);
+               nvif_vmm_dtor(vmm);
        kfree(args);
        return ret;
 }
index 5b90c2a..9f4ac26 100644 (file)
@@ -2046,7 +2046,7 @@ nv120_chipset = {
        .mmu = gm200_mmu_new,
        .mxm = nv50_mxm_new,
        .pci = gk104_pci_new,
-       .pmu = gm107_pmu_new,
+       .pmu = gm200_pmu_new,
        .therm = gm200_therm_new,
        .timer = gk20a_timer_new,
        .top = gk104_top_new,
@@ -2084,7 +2084,7 @@ nv124_chipset = {
        .mmu = gm200_mmu_new,
        .mxm = nv50_mxm_new,
        .pci = gk104_pci_new,
-       .pmu = gm107_pmu_new,
+       .pmu = gm200_pmu_new,
        .therm = gm200_therm_new,
        .timer = gk20a_timer_new,
        .top = gk104_top_new,
@@ -2122,7 +2122,7 @@ nv126_chipset = {
        .mmu = gm200_mmu_new,
        .mxm = nv50_mxm_new,
        .pci = gk104_pci_new,
-       .pmu = gm107_pmu_new,
+       .pmu = gm200_pmu_new,
        .therm = gm200_therm_new,
        .timer = gk20a_timer_new,
        .top = gk104_top_new,
@@ -2184,7 +2184,7 @@ nv130_chipset = {
        .mmu = gp100_mmu_new,
        .therm = gp100_therm_new,
        .pci = gp100_pci_new,
-       .pmu = gp100_pmu_new,
+       .pmu = gm200_pmu_new,
        .timer = gk20a_timer_new,
        .top = gk104_top_new,
        .ce[0] = gp100_ce_new,
index e55054b..9cf2cfe 100644 (file)
@@ -21,6 +21,8 @@ struct nv50_disp_chan {
 
        struct nvkm_memory *memory;
        u64 push;
+
+       u32 suspend_put;
 };
 
 struct nv50_disp_chan_func {
index d162b9c..689e3cd 100644 (file)
@@ -182,6 +182,8 @@ gf119_disp_core_fini(struct nv50_disp_chan *chan)
                nvkm_error(subdev, "core fini: %08x\n",
                           nvkm_rd32(device, 0x610490));
        }
+
+       chan->suspend_put = nvkm_rd32(device, 0x640000);
 }
 
 static int
@@ -195,7 +197,7 @@ gf119_disp_core_init(struct nv50_disp_chan *chan)
        nvkm_wr32(device, 0x610498, 0x00010000);
        nvkm_wr32(device, 0x61049c, 0x00000001);
        nvkm_mask(device, 0x610490, 0x00000010, 0x00000010);
-       nvkm_wr32(device, 0x640000, 0x00000000);
+       nvkm_wr32(device, 0x640000, chan->suspend_put);
        nvkm_wr32(device, 0x610490, 0x01000013);
 
        /* wait for it to go inactive */
index 5b7f993..1b435be 100644 (file)
@@ -36,7 +36,7 @@ gp102_disp_core_init(struct nv50_disp_chan *chan)
        nvkm_wr32(device, 0x611498, 0x00010000);
        nvkm_wr32(device, 0x61149c, 0x00000001);
        nvkm_mask(device, 0x610490, 0x00000010, 0x00000010);
-       nvkm_wr32(device, 0x640000, 0x00000000);
+       nvkm_wr32(device, 0x640000, chan->suspend_put);
        nvkm_wr32(device, 0x610490, 0x01000013);
 
        /* wait for it to go inactive */
index 4592d0e..e20a48f 100644 (file)
@@ -167,6 +167,7 @@ gv100_disp_core_fini(struct nv50_disp_chan *chan)
        nvkm_mask(device, 0x6104e0, 0x00000010, 0x00000000);
        gv100_disp_core_idle(chan);
        nvkm_mask(device, 0x6104e0, 0x00000002, 0x00000000);
+       chan->suspend_put = nvkm_rd32(device, 0x680000);
 }
 
 static int
@@ -181,7 +182,7 @@ gv100_disp_core_init(struct nv50_disp_chan *chan)
        nvkm_wr32(device, 0x610b2c, 0x00000040);
 
        nvkm_mask(device, 0x6104e0, 0x00000010, 0x00000010);
-       nvkm_wr32(device, 0x680000, 0x00000000);
+       nvkm_wr32(device, 0x680000, chan->suspend_put);
        nvkm_wr32(device, 0x6104e0, 0x00000013);
        return gv100_disp_core_idle(chan);
 }
index 55db9a2..660310b 100644 (file)
@@ -179,6 +179,8 @@ nv50_disp_core_fini(struct nv50_disp_chan *chan)
                nvkm_error(subdev, "core fini: %08x\n",
                           nvkm_rd32(device, 0x610200));
        }
+
+       chan->suspend_put = nvkm_rd32(device, 0x640000);
 }
 
 static int
@@ -198,7 +200,7 @@ nv50_disp_core_init(struct nv50_disp_chan *chan)
        nvkm_wr32(device, 0x610208, 0x00010000);
        nvkm_wr32(device, 0x61020c, 0x00000000);
        nvkm_mask(device, 0x610200, 0x00000010, 0x00000010);
-       nvkm_wr32(device, 0x640000, 0x00000000);
+       nvkm_wr32(device, 0x640000, chan->suspend_put);
        nvkm_wr32(device, 0x610200, 0x01000013);
 
        /* wait for it to go inactive */
index edf7dd0..76425e8 100644 (file)
@@ -53,6 +53,8 @@ gf119_disp_dmac_fini(struct nv50_disp_chan *chan)
                nvkm_error(subdev, "ch %d fini: %08x\n", user,
                           nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
        }
+
+       chan->suspend_put = nvkm_rd32(device, 0x640000 + (ctrl * 0x1000));
 }
 
 static int
@@ -68,7 +70,7 @@ gf119_disp_dmac_init(struct nv50_disp_chan *chan)
        nvkm_wr32(device, 0x610498 + (ctrl * 0x0010), 0x00010000);
        nvkm_wr32(device, 0x61049c + (ctrl * 0x0010), 0x00000001);
        nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
-       nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
+       nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), chan->suspend_put);
        nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
 
        /* wait for it to go inactive */
index f21a433..da258df 100644 (file)
@@ -38,7 +38,7 @@ gp102_disp_dmac_init(struct nv50_disp_chan *chan)
        nvkm_wr32(device, 0x611498 + (ctrl * 0x0010), 0x00010000);
        nvkm_wr32(device, 0x61149c + (ctrl * 0x0010), 0x00000001);
        nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
-       nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
+       nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), chan->suspend_put);
        nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
 
        /* wait for it to go inactive */
index eac0e42..fdb624a 100644 (file)
@@ -50,10 +50,12 @@ void
 gv100_disp_dmac_fini(struct nv50_disp_chan *chan)
 {
        struct nvkm_device *device = chan->disp->base.engine.subdev.device;
+       const u32 uoff = (chan->chid.ctrl - 1) * 0x1000;
        const u32 coff = chan->chid.ctrl * 0x04;
        nvkm_mask(device, 0x6104e0 + coff, 0x00000010, 0x00000000);
        gv100_disp_dmac_idle(chan);
        nvkm_mask(device, 0x6104e0 + coff, 0x00000002, 0x00000000);
+       chan->suspend_put = nvkm_rd32(device, 0x690000 + uoff);
 }
 
 int
@@ -71,7 +73,7 @@ gv100_disp_dmac_init(struct nv50_disp_chan *chan)
        nvkm_wr32(device, 0x610b2c + poff, 0x00000040);
 
        nvkm_mask(device, 0x6104e0 + coff, 0x00000010, 0x00000010);
-       nvkm_wr32(device, 0x690000 + uoff, 0x00000000);
+       nvkm_wr32(device, 0x690000 + uoff, chan->suspend_put);
        nvkm_wr32(device, 0x6104e0 + coff, 0x00000013);
        return gv100_disp_dmac_idle(chan);
 }
index 9e8a9d7..d0a7da9 100644 (file)
@@ -94,6 +94,8 @@ nv50_disp_dmac_fini(struct nv50_disp_chan *chan)
                nvkm_error(subdev, "ch %d fini timeout, %08x\n", user,
                           nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
        }
+
+       chan->suspend_put = nvkm_rd32(device, 0x640000 + (ctrl * 0x1000));
 }
 
 static int
@@ -109,7 +111,7 @@ nv50_disp_dmac_init(struct nv50_disp_chan *chan)
        nvkm_wr32(device, 0x610208 + (ctrl * 0x0010), 0x00010000);
        nvkm_wr32(device, 0x61020c + (ctrl * 0x0010), ctrl);
        nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000010, 0x00000010);
-       nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
+       nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), chan->suspend_put);
        nvkm_wr32(device, 0x610200 + (ctrl * 0x0010), 0x00000013);
 
        /* wait for it to go inactive */
index 7147dc6..1ccfc83 100644 (file)
@@ -23,55 +23,55 @@ void pack_hdmi_infoframe(struct packed_hdmi_infoframe *packed_frame,
                 */
        case 17:
                subpack1_high = (raw_frame[16] << 16);
-               /* fall through */
+               fallthrough;
        case 16:
                subpack1_high |= (raw_frame[15] << 8);
-               /* fall through */
+               fallthrough;
        case 15:
                subpack1_high |= raw_frame[14];
-               /* fall through */
+               fallthrough;
        case 14:
                subpack1_low = (raw_frame[13] << 24);
-               /* fall through */
+               fallthrough;
        case 13:
                subpack1_low |= (raw_frame[12] << 16);
-               /* fall through */
+               fallthrough;
        case 12:
                subpack1_low |= (raw_frame[11] << 8);
-               /* fall through */
+               fallthrough;
        case 11:
                subpack1_low |= raw_frame[10];
-               /* fall through */
+               fallthrough;
        case 10:
                subpack0_high = (raw_frame[9] << 16);
-               /* fall through */
+               fallthrough;
        case 9:
                subpack0_high |= (raw_frame[8] << 8);
-               /* fall through */
+               fallthrough;
        case 8:
                subpack0_high |= raw_frame[7];
-               /* fall through */
+               fallthrough;
        case 7:
                subpack0_low = (raw_frame[6] << 24);
-               /* fall through */
+               fallthrough;
        case 6:
                subpack0_low |= (raw_frame[5] << 16);
-               /* fall through */
+               fallthrough;
        case 5:
                subpack0_low |= (raw_frame[4] << 8);
-               /* fall through */
+               fallthrough;
        case 4:
                subpack0_low |= raw_frame[3];
-               /* fall through */
+               fallthrough;
        case 3:
                header = (raw_frame[2] << 16);
-               /* fall through */
+               fallthrough;
        case 2:
                header |= (raw_frame[1] << 8);
-               /* fall through */
+               fallthrough;
        case 1:
                header |= raw_frame[0];
-               /* fall through */
+               fallthrough;
        case 0:
                break;
        }
index bf6d41f..bb32bef 100644 (file)
@@ -24,7 +24,7 @@
 #include "hdmi.h"
 
 void
-gm200_hdmi_scdc(struct nvkm_ior *ior, int head, u8 scdc)
+gm200_hdmi_scdc(struct nvkm_ior *ior, u8 scdc)
 {
        struct nvkm_device *device = ior->disp->engine.subdev.device;
        const u32 soff = nv50_ior_base(ior);
index 1a200a9..09f3038 100644 (file)
@@ -66,7 +66,7 @@ struct nvkm_ior_func {
                void (*ctrl)(struct nvkm_ior *, int head, bool enable,
                             u8 max_ac_packet, u8 rekey, u8 *avi, u8 avi_size,
                             u8 *vendor, u8 vendor_size);
-               void (*scdc)(struct nvkm_ior *, int head, u8 scdc);
+               void (*scdc)(struct nvkm_ior *, u8 scdc);
        } hdmi;
 
        struct {
@@ -156,7 +156,7 @@ void gf119_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
 void gk104_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
 void gv100_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
 
-void gm200_hdmi_scdc(struct nvkm_ior *, int, u8);
+void gm200_hdmi_scdc(struct nvkm_ior *, u8);
 
 void gt215_hda_hpd(struct nvkm_ior *, int, bool);
 void gt215_hda_eld(struct nvkm_ior *, int, u8 *, u8);
index fb5de44..ecde98d 100644 (file)
@@ -205,8 +205,7 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
                                           vendor, vendor_size);
 
                if (outp->ior->func->hdmi.scdc)
-                       outp->ior->func->hdmi.scdc(
-                                       outp->ior, hidx, args->v0.scdc);
+                       outp->ior->func->hdmi.scdc(outp->ior, args->v0.scdc);
 
                return 0;
        }
index 7f1adab..5159d5d 100644 (file)
@@ -122,7 +122,7 @@ nv04_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
                break;
        case NV_MEM_ACCESS_WO:
                dmaobj->flags0 |= 0x00008000;
-               /* fall through */
+               fallthrough;
        case NV_MEM_ACCESS_RW:
                dmaobj->flags2 |= 0x00000002;
                break;
index 93493b3..c1d1b1a 100644 (file)
@@ -117,10 +117,10 @@ nv04_fifo_swmthd(struct nvkm_device *device, u32 chid, u32 addr, u32 data)
        switch (mthd) {
        case 0x0000 ... 0x0000: /* subchannel's engine -> software */
                nvkm_wr32(device, 0x003280, (engine &= ~mask));
-               /* fall through */
+               fallthrough;
        case 0x0180 ... 0x01fc: /* handle -> instance */
                data = nvkm_rd32(device, 0x003258) & 0x0000ffff;
-               /* fall through */
+               fallthrough;
        case 0x0100 ... 0x017c:
        case 0x0200 ... 0x1ffc: /* pass method down to sw */
                if (!(engine & mask) && sw)
index 47c1682..2d61fd8 100644 (file)
@@ -81,7 +81,7 @@ nv40_fifo_init(struct nvkm_fifo *base)
        case 0x49:
        case 0x4b:
                nvkm_wr32(device, 0x002230, 0x00000001);
-               /* fall through */
+               fallthrough;
        case 0x40:
        case 0x41:
        case 0x42:
index f2f5636..749f73f 100644 (file)
@@ -741,7 +741,7 @@ gf100_gr_fecs_ctrl_ctxsw(struct gf100_gr *gr, u32 mthd)
        return -ETIMEDOUT;
 }
 
-int
+static int
 gf100_gr_fecs_start_ctxsw(struct nvkm_gr *base)
 {
        struct gf100_gr *gr = gf100_gr(base);
@@ -756,7 +756,7 @@ gf100_gr_fecs_start_ctxsw(struct nvkm_gr *base)
        return ret;
 }
 
-int
+static int
 gf100_gr_fecs_stop_ctxsw(struct nvkm_gr *base)
 {
        struct gf100_gr *gr = gf100_gr(base);
@@ -2032,7 +2032,7 @@ gf100_gr_fini(struct nvkm_gr *base, bool suspend)
        return 0;
 }
 
-void *
+static void *
 gf100_gr_dtor(struct nvkm_gr *base)
 {
        struct gf100_gr *gr = gf100_gr(base);
@@ -2103,7 +2103,7 @@ gf100_gr_new_(const struct gf100_gr_fwif *fwif,
 
        fwif = nvkm_firmware_load(&gr->base.engine.subdev, fwif, "Gr", gr);
        if (IS_ERR(fwif))
-               return -ENODEV;
+               return PTR_ERR(fwif);
 
        gr->func = fwif->func;
 
index 88bcb57..dfd5dd7 100644 (file)
@@ -404,6 +404,7 @@ int gf100_gr_nofw(struct gf100_gr *, int, const struct gf100_gr_fwif *);
 
 int gk20a_gr_load_sw(struct gf100_gr *, const char *path, int ver);
 
+int gm200_gr_nofw(struct gf100_gr *, int, const struct gf100_gr_fwif *);
 int gm200_gr_load(struct gf100_gr *, int, const struct gf100_gr_fwif *);
 extern const struct nvkm_acr_lsf_func gm200_gr_gpccs_acr;
 extern const struct nvkm_acr_lsf_func gm200_gr_fecs_acr;
index e56880f..6d4d728 100644 (file)
@@ -33,7 +33,7 @@ struct gk20a_fw_av
        u32 data;
 };
 
-int
+static int
 gk20a_gr_av_to_init(struct gf100_gr *gr, const char *path, const char *name,
                    int ver, struct gf100_gr_pack **ppack)
 {
@@ -83,7 +83,7 @@ struct gk20a_fw_aiv
        u32 data;
 };
 
-int
+static int
 gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *path, const char *name,
                     int ver, struct gf100_gr_pack **ppack)
 {
@@ -126,7 +126,7 @@ end:
        return ret;
 }
 
-int
+static int
 gk20a_gr_av_to_method(struct gf100_gr *gr, const char *path, const char *name,
                      int ver, struct gf100_gr_pack **ppack)
 {
index 3d67cfb..8151370 100644 (file)
 
 #include <nvif/class.h>
 
+int
+gm200_gr_nofw(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
+{
+       nvkm_warn(&gr->base.engine.subdev, "firmware unavailable\n");
+       return -ENODEV;
+}
+
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
@@ -275,7 +282,8 @@ MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin");
 
 static const struct gf100_gr_fwif
 gm200_gr_fwif[] = {
-       { 0, gm200_gr_load, &gm200_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+       {  0, gm200_gr_load, &gm200_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+       { -1, gm200_gr_nofw },
        {}
 };
 
index 09d8c5d..1aab691 100644 (file)
@@ -175,7 +175,8 @@ MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin");
 
 static const struct gf100_gr_fwif
 gm20b_gr_fwif[] = {
-       { 0, gm20b_gr_load, &gm20b_gr, &gm20b_gr_fecs_acr },
+       {  0, gm20b_gr_load, &gm20b_gr, &gm20b_gr_fecs_acr },
+       { -1, gm200_gr_nofw },
        {}
 };
 
index 33c8634..ddba7ce 100644 (file)
@@ -150,7 +150,8 @@ MODULE_FIRMWARE("nvidia/gp100/gr/sw_method_init.bin");
 
 static const struct gf100_gr_fwif
 gp100_gr_fwif[] = {
-       { 0, gm200_gr_load, &gp100_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+       {  0, gm200_gr_load, &gp100_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+       { -1, gm200_gr_nofw },
        {}
 };
 
index 7baf67f..c083f37 100644 (file)
@@ -146,7 +146,8 @@ MODULE_FIRMWARE("nvidia/gp102/gr/sw_method_init.bin");
 
 static const struct gf100_gr_fwif
 gp102_gr_fwif[] = {
-       { 0, gm200_gr_load, &gp102_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+       {  0, gm200_gr_load, &gp102_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+       { -1, gm200_gr_nofw },
        {}
 };
 
index d9b8ef8..f6a31e9 100644 (file)
@@ -87,7 +87,8 @@ MODULE_FIRMWARE("nvidia/gp106/gr/sw_method_init.bin");
 
 static const struct gf100_gr_fwif
 gp104_gr_fwif[] = {
-       { 0, gm200_gr_load, &gp104_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+       {  0, gm200_gr_load, &gp104_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+       { -1, gm200_gr_nofw },
        {}
 };
 
index 2b1ad55..2c80c6a 100644 (file)
@@ -76,7 +76,8 @@ MODULE_FIRMWARE("nvidia/gp107/gr/sw_method_init.bin");
 
 static const struct gf100_gr_fwif
 gp107_gr_fwif[] = {
-       { 0, gm200_gr_load, &gp107_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+       {  0, gm200_gr_load, &gp107_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+       { -1, gm200_gr_nofw },
        {}
 };
 
index 113e4c1..2be8f41 100644 (file)
@@ -86,7 +86,8 @@ MODULE_FIRMWARE("nvidia/gp108/gr/sw_method_init.bin");
 
 static const struct gf100_gr_fwif
 gp108_gr_fwif[] = {
-       { 0, gm200_gr_load, &gp107_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
+       {  0, gm200_gr_load, &gp107_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
+       { -1, gm200_gr_nofw },
        {}
 };
 
index eaf913e..6edc4bc 100644 (file)
@@ -88,7 +88,8 @@ MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin");
 
 static const struct gf100_gr_fwif
 gp10b_gr_fwif[] = {
-       { 0, gm200_gr_load, &gp10b_gr, &gm20b_gr_fecs_acr, &gp10b_gr_gpccs_acr },
+       {  0, gm200_gr_load, &gp10b_gr, &gm20b_gr_fecs_acr, &gp10b_gr_gpccs_acr },
+       { -1, gm200_gr_nofw },
        {}
 };
 
index 70639d8..2189a8f 100644 (file)
@@ -135,7 +135,8 @@ MODULE_FIRMWARE("nvidia/gv100/gr/sw_method_init.bin");
 
 static const struct gf100_gr_fwif
 gv100_gr_fwif[] = {
-       { 0, gm200_gr_load, &gv100_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
+       {  0, gm200_gr_load, &gv100_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
+       { -1, gm200_gr_nofw },
        {}
 };
 
index a9efa4d..6039f99 100644 (file)
@@ -192,7 +192,8 @@ MODULE_FIRMWARE("nvidia/tu116/gr/sw_method_init.bin");
 
 static const struct gf100_gr_fwif
 tu102_gr_fwif[] = {
-       { 0, gm200_gr_load, &tu102_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
+       {  0, gm200_gr_load, &tu102_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
+       { -1, gm200_gr_nofw },
        {}
 };
 
index 368f2a0..bccf7ac 100644 (file)
 #include <nvfw/flcn.h>
 #include <nvfw/sec2.h>
 
+int
+gp102_sec2_nofw(struct nvkm_sec2 *sec2, int ver,
+               const struct nvkm_sec2_fwif *fwif)
+{
+       nvkm_warn(&sec2->engine.subdev, "firmware unavailable\n");
+       return 0;
+}
+
 static int
-gp102_sec2_acr_bootstrap_falcon_callback(void *priv, struct nv_falcon_msg *hdr)
+gp102_sec2_acr_bootstrap_falcon_callback(void *priv, struct nvfw_falcon_msg *hdr)
 {
        struct nv_sec2_acr_bootstrap_falcon_msg *msg =
                container_of(hdr, typeof(*msg), msg.hdr);
@@ -115,6 +123,9 @@ gp102_sec2_acr_0 = {
        .bld_write = gp102_sec2_acr_bld_write,
        .bld_patch = gp102_sec2_acr_bld_patch,
        .boot = gp102_sec2_acr_boot,
+       .bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_FECS) |
+                            BIT_ULL(NVKM_ACR_LSF_GPCCS) |
+                            BIT_ULL(NVKM_ACR_LSF_SEC2),
        .bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon,
 };
 
@@ -294,6 +305,9 @@ gp102_sec2_acr_1 = {
        .bld_write = gp102_sec2_acr_bld_write_1,
        .bld_patch = gp102_sec2_acr_bld_patch_1,
        .boot = gp102_sec2_acr_boot,
+       .bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_FECS) |
+                            BIT_ULL(NVKM_ACR_LSF_GPCCS) |
+                            BIT_ULL(NVKM_ACR_LSF_SEC2),
        .bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon,
 };
 
@@ -322,8 +336,9 @@ MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin");
 
 static const struct nvkm_sec2_fwif
 gp102_sec2_fwif[] = {
-       { 1, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_1 },
-       { 0, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_0 },
+       {  1, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_1 },
+       {  0, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_0 },
+       { -1, gp102_sec2_nofw, &gp102_sec2 },
        {}
 };
 
index bb88117..8cbc0b7 100644 (file)
@@ -20,6 +20,7 @@ struct nvkm_sec2_fwif {
        const struct nvkm_acr_lsf_func *acr;
 };
 
+int gp102_sec2_nofw(struct nvkm_sec2 *, int, const struct nvkm_sec2_fwif *);
 int gp102_sec2_load(struct nvkm_sec2 *, int, const struct nvkm_sec2_fwif *);
 extern const struct nvkm_sec2_func gp102_sec2;
 extern const struct nvkm_acr_lsf_func gp102_sec2_acr_1;
index a829565..a231c1c 100644 (file)
@@ -49,13 +49,6 @@ tu102_sec2 = {
        .initmsg = gp102_sec2_initmsg,
 };
 
-static int
-tu102_sec2_nofw(struct nvkm_sec2 *sec2, int ver,
-               const struct nvkm_sec2_fwif *fwif)
-{
-       return 0;
-}
-
 MODULE_FIRMWARE("nvidia/tu102/sec2/desc.bin");
 MODULE_FIRMWARE("nvidia/tu102/sec2/image.bin");
 MODULE_FIRMWARE("nvidia/tu102/sec2/sig.bin");
@@ -75,7 +68,7 @@ MODULE_FIRMWARE("nvidia/tu117/sec2/sig.bin");
 static const struct nvkm_sec2_fwif
 tu102_sec2_fwif[] = {
        {  0, gp102_sec2_load, &tu102_sec2, &gp102_sec2_acr_1 },
-       { -1, tu102_sec2_nofw, &tu102_sec2 }
+       { -1, gp102_sec2_nofw, &tu102_sec2 }
 };
 
 int
index 40e3f3f..44cf6a8 100644 (file)
@@ -58,7 +58,7 @@ nvkm_falcon_cmdq_push(struct nvkm_falcon_cmdq *cmdq, void *data, u32 size)
 static void
 nvkm_falcon_cmdq_rewind(struct nvkm_falcon_cmdq *cmdq)
 {
-       struct nv_falcon_cmd cmd;
+       struct nvfw_falcon_cmd cmd;
 
        cmd.unit_id = NV_FALCON_CMD_UNIT_ID_REWIND;
        cmd.size = sizeof(cmd);
@@ -97,7 +97,7 @@ nvkm_falcon_cmdq_close(struct nvkm_falcon_cmdq *cmdq)
 }
 
 static int
-nvkm_falcon_cmdq_write(struct nvkm_falcon_cmdq *cmdq, struct nv_falcon_cmd *cmd)
+nvkm_falcon_cmdq_write(struct nvkm_falcon_cmdq *cmdq, struct nvfw_falcon_cmd *cmd)
 {
        static unsigned timeout = 2000;
        unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
@@ -121,7 +121,7 @@ nvkm_falcon_cmdq_write(struct nvkm_falcon_cmdq *cmdq, struct nv_falcon_cmd *cmd)
 #define CMD_FLAGS_INTR BIT(1)
 
 int
-nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *cmdq, struct nv_falcon_cmd *cmd,
+nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *cmdq, struct nvfw_falcon_cmd *cmd,
                      nvkm_falcon_qmgr_callback cb, void *priv,
                      unsigned long timeout)
 {
index cbfe09a..e74371d 100644 (file)
@@ -74,7 +74,7 @@ nvkm_falcon_msgq_pop(struct nvkm_falcon_msgq *msgq, void *data, u32 size)
 }
 
 static int
-nvkm_falcon_msgq_read(struct nvkm_falcon_msgq *msgq, struct nv_falcon_msg *hdr)
+nvkm_falcon_msgq_read(struct nvkm_falcon_msgq *msgq, struct nvfw_falcon_msg *hdr)
 {
        int ret = 0;
 
@@ -112,7 +112,7 @@ close:
 }
 
 static int
-nvkm_falcon_msgq_exec(struct nvkm_falcon_msgq *msgq, struct nv_falcon_msg *hdr)
+nvkm_falcon_msgq_exec(struct nvkm_falcon_msgq *msgq, struct nvfw_falcon_msg *hdr)
 {
        struct nvkm_falcon_qmgr_seq *seq;
 
@@ -144,7 +144,7 @@ nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq *msgq)
         * stack space to work with.
         */
        u8 msg_buffer[MSG_BUF_SIZE];
-       struct nv_falcon_msg *hdr = (void *)msg_buffer;
+       struct nvfw_falcon_msg *hdr = (void *)msg_buffer;
 
        while (nvkm_falcon_msgq_read(msgq, hdr) > 0)
                nvkm_falcon_msgq_exec(msgq, hdr);
@@ -155,7 +155,7 @@ nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq *msgq,
                              void *data, u32 size)
 {
        struct nvkm_falcon *falcon = msgq->qmgr->falcon;
-       struct nv_falcon_msg *hdr = data;
+       struct nvfw_falcon_msg *hdr = data;
        int ret;
 
        msgq->head_reg = falcon->func->msgq.head;
index a45cd70..976cb7b 100644 (file)
@@ -3,7 +3,7 @@
 #define __NVKM_FALCON_QMGR_H__
 #include <core/falcon.h>
 
-#define HDR_SIZE sizeof(struct nv_falcon_msg)
+#define HDR_SIZE sizeof(struct nvfw_falcon_msg)
 #define QUEUE_ALIGNMENT 4
 /* max size of the messages we can receive */
 #define MSG_BUF_SIZE 128
index 0d063b8..bef790a 100644 (file)
@@ -45,9 +45,8 @@ wpr_header_v1_dump(struct nvkm_subdev *subdev, const struct wpr_header_v1 *hdr)
        nvkm_debug(subdev, "\tstatus        : %d\n", hdr->status);
 }
 
-void
-lsb_header_tail_dump(struct nvkm_subdev *subdev,
-                       struct lsb_header_tail *hdr)
+static void
+lsb_header_tail_dump(struct nvkm_subdev *subdev, struct lsb_header_tail *hdr)
 {
        nvkm_debug(subdev, "lsbHeader\n");
        nvkm_debug(subdev, "\tucodeOff      : 0x%x\n", hdr->ucode_off);
index e4866a0..c962df9 100644 (file)
@@ -141,14 +141,24 @@ nvkm_acr_bootstrap_falcons(struct nvkm_device *device, unsigned long mask)
        struct nvkm_acr *acr = device->acr;
        unsigned long id;
 
+       /* If there's no LS FW managing bootstrapping of other LS falcons,
+        * we depend on the HS firmware being able to do it instead.
+        */
        if (!acrflcn) {
-               int ret = nvkm_acr_reload(acr);
-               if (ret)
-                       return ret;
+               /* Which isn't possible everywhere... */
+               if ((mask & acr->func->bootstrap_falcons) == mask) {
+                       int ret = nvkm_acr_reload(acr);
+                       if (ret)
+                               return ret;
 
-               return acr->done ? 0 : -EINVAL;
+                       return acr->done ? 0 : -EINVAL;
+               }
+               return -ENOSYS;
        }
 
+       if ((mask & acrflcn->func->bootstrap_falcons) != mask)
+               return -ENOSYS;
+
        if (acrflcn->func->bootstrap_multiple_falcons) {
                return acrflcn->func->
                        bootstrap_multiple_falcons(acrflcn->falcon, mask);
@@ -167,13 +177,10 @@ bool
 nvkm_acr_managed_falcon(struct nvkm_device *device, enum nvkm_acr_lsf_id id)
 {
        struct nvkm_acr *acr = device->acr;
-       struct nvkm_acr_lsf *lsf;
 
        if (acr) {
-               list_for_each_entry(lsf, &acr->lsf, head) {
-                       if (lsf->id == id)
-                               return true;
-               }
+               if (acr->managed_falcons & BIT_ULL(id))
+                       return true;
        }
 
        return false;
@@ -213,6 +220,7 @@ nvkm_acr_oneinit(struct nvkm_subdev *subdev)
        struct nvkm_acr_lsfw *lsfw, *lsft;
        struct nvkm_acr_lsf *lsf;
        u32 wpr_size = 0;
+       u64 falcons;
        int ret, i;
 
        if (list_empty(&acr->hsfw)) {
@@ -248,12 +256,28 @@ nvkm_acr_oneinit(struct nvkm_subdev *subdev)
                lsf->falcon = lsfw->falcon;
                lsf->id = lsfw->id;
                list_add_tail(&lsf->head, &acr->lsf);
+               acr->managed_falcons |= BIT_ULL(lsf->id);
        }
 
        /* Ensure the falcon that'll provide ACR functions is booted first. */
        lsf = nvkm_acr_falcon(device);
-       if (lsf)
+       if (lsf) {
+               falcons = lsf->func->bootstrap_falcons;
                list_move(&lsf->head, &acr->lsf);
+       } else {
+               falcons = acr->func->bootstrap_falcons;
+       }
+
+       /* Cull falcons that can't be bootstrapped, or the HSFW can fail to
+        * boot and leave the GPU in a weird state.
+        */
+       list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) {
+               if (!(falcons & BIT_ULL(lsfw->id))) {
+                       nvkm_warn(subdev, "%s falcon cannot be bootstrapped\n",
+                                 nvkm_acr_lsf_id(lsfw->id));
+                       nvkm_acr_lsfw_del(lsfw);
+               }
+       }
 
        if (!acr->wpr_fw || acr->wpr_comp)
                wpr_size = acr->func->wpr_layout(acr);
index 9a63940..cd41b2e 100644 (file)
 #include <nvfw/acr.h>
 #include <nvfw/flcn.h>
 
+const struct nvkm_acr_func
+gm200_acr = {
+};
+
+int
+gm200_acr_nofw(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
+{
+       nvkm_warn(&acr->subdev, "firmware unavailable\n");
+       return 0;
+}
+
 int
 gm200_acr_init(struct nvkm_acr *acr)
 {
@@ -425,7 +436,7 @@ gm200_acr_load_fwif[] = {
 };
 
 static const struct nvkm_acr_func
-gm200_acr = {
+gm200_acr_0 = {
        .load = gm200_acr_load_fwif,
        .unload = gm200_acr_unload_fwif,
        .wpr_parse = gm200_acr_wpr_parse,
@@ -435,6 +446,8 @@ gm200_acr = {
        .wpr_patch = gm200_acr_wpr_patch,
        .wpr_check = gm200_acr_wpr_check,
        .init = gm200_acr_init,
+       .bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_FECS) |
+                            BIT_ULL(NVKM_ACR_LSF_GPCCS),
 };
 
 static int
@@ -459,7 +472,8 @@ gm200_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
 
 static const struct nvkm_acr_fwif
 gm200_acr_fwif[] = {
-       { 0, gm200_acr_load, &gm200_acr },
+       {  0, gm200_acr_load, &gm200_acr_0 },
+       { -1, gm200_acr_nofw, &gm200_acr },
        {}
 };
 
index 034a6ed..b1ecc58 100644 (file)
@@ -123,7 +123,8 @@ gm20b_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
 
 static const struct nvkm_acr_fwif
 gm20b_acr_fwif[] = {
-       { 0, gm20b_acr_load, &gm20b_acr },
+       {  0, gm20b_acr_load, &gm20b_acr },
+       { -1, gm200_acr_nofw, &gm200_acr },
        {}
 };
 
index 49e11c4..80eb9d8 100644 (file)
@@ -270,7 +270,8 @@ gp102_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
 
 static const struct nvkm_acr_fwif
 gp102_acr_fwif[] = {
-       { 0, gp102_acr_load, &gp102_acr },
+       {  0, gp102_acr_load, &gp102_acr },
+       { -1, gm200_acr_nofw, &gm200_acr },
        {}
 };
 
index f10dc91..67a7c14 100644 (file)
@@ -100,7 +100,8 @@ gp108_acr = {
 
 static const struct nvkm_acr_fwif
 gp108_acr_fwif[] = {
-       { 0, gp102_acr_load, &gp108_acr },
+       {  0, gp102_acr_load, &gp108_acr },
+       { -1, gm200_acr_nofw, &gm200_acr },
        {}
 };
 
index 39de642..8249f0d 100644 (file)
@@ -46,7 +46,8 @@ gp10b_acr = {
 
 static const struct nvkm_acr_fwif
 gp10b_acr_fwif[] = {
-       { 0, gm20b_acr_load, &gp10b_acr },
+       {  0, gm20b_acr_load, &gp10b_acr },
+       { -1, gm200_acr_nofw, &gm200_acr },
        {}
 };
 
index 07d1830..9b1cf67 100644 (file)
@@ -60,7 +60,7 @@ nvkm_acr_lsfw_add(const struct nvkm_acr_lsf_func *func, struct nvkm_acr *acr,
 {
        struct nvkm_acr_lsfw *lsfw;
 
-       if (!acr)
+       if (!acr || list_empty(&acr->hsfw))
                return ERR_PTR(-ENOSYS);
 
        lsfw = nvkm_acr_lsfw_get(acr, id);
index d8ba728..d71af17 100644 (file)
@@ -10,6 +10,7 @@ struct nvkm_acr_fwif {
        const struct nvkm_acr_func *func;
 };
 
+int gm200_acr_nofw(struct nvkm_acr *, int, const struct nvkm_acr_fwif *);
 int gm20b_acr_load(struct nvkm_acr *, int, const struct nvkm_acr_fwif *);
 int gp102_acr_load(struct nvkm_acr *, int, const struct nvkm_acr_fwif *);
 
@@ -27,8 +28,10 @@ struct nvkm_acr_func {
        void (*wpr_check)(struct nvkm_acr *, u64 *start, u64 *limit);
        int (*init)(struct nvkm_acr *);
        void (*fini)(struct nvkm_acr *);
+       u64 bootstrap_falcons;
 };
 
+extern const struct nvkm_acr_func gm200_acr;
 int gm200_acr_wpr_parse(struct nvkm_acr *);
 u32 gm200_acr_wpr_layout(struct nvkm_acr *);
 int gm200_acr_wpr_build(struct nvkm_acr *, struct nvkm_acr_lsf *);
index d28d8f3..c4981bc 100644 (file)
@@ -219,6 +219,7 @@ tu102_acr_load(struct nvkm_acr *acr, int version,
 static const struct nvkm_acr_fwif
 tu102_acr_fwif[] = {
        {  0, tu102_acr_load, &tu102_acr },
+       { -1, gm200_acr_nofw, &gm200_acr },
        {}
 };
 
index a8d5d67..8698f26 100644 (file)
@@ -172,8 +172,8 @@ dcb_outp_parse(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len,
                                        outp->dpconf.link_nr = 1;
                                        break;
                                }
+                               fallthrough;
 
-                               /* fall-through... */
                        case DCB_OUTPUT_TMDS:
                        case DCB_OUTPUT_LVDS:
                                outp->link = (conf & 0x00000030) >> 4;
index b099d12..c694501 100644 (file)
@@ -100,7 +100,7 @@ nvbios_dpout_parse(struct nvkm_bios *bios, u8 idx,
                switch (*ver) {
                case 0x20:
                        info->mask |= 0x00c0; /* match any link */
-                       /* fall-through */
+                       fallthrough;
                case 0x21:
                case 0x30:
                        info->flags     = nvbios_rd08(bios, data + 0x05);
index 7112992..f039388 100644 (file)
@@ -135,7 +135,7 @@ nvbios_perfEp(struct nvkm_bios *bios, int idx,
                break;
        case 0x30:
                info->script   = nvbios_rd16(bios, perf + 0x02);
-               /* fall through */
+               fallthrough;
        case 0x35:
                info->fanspeed = nvbios_rd08(bios, perf + 0x06);
                info->voltage  = nvbios_rd08(bios, perf + 0x07);
index bda6cc9..350f10a 100644 (file)
@@ -134,7 +134,7 @@ pll_map(struct nvkm_bios *bios)
                    device->chipset == 0xaa ||
                    device->chipset == 0xac)
                        return g84_pll_mapping;
-               /* fall through */
+               fallthrough;
        default:
                return NULL;
        }
index 20ff517..2da45e2 100644 (file)
@@ -115,21 +115,21 @@ nvbios_timingEp(struct nvkm_bios *bios, int idx,
                switch (min_t(u8, *hdr, 25)) {
                case 25:
                        p->timing_10_24  = nvbios_rd08(bios, data + 0x18);
-                       /* fall through */
+                       fallthrough;
                case 24:
                case 23:
                case 22:
                        p->timing_10_21  = nvbios_rd08(bios, data + 0x15);
-                       /* fall through */
+                       fallthrough;
                case 21:
                        p->timing_10_20  = nvbios_rd08(bios, data + 0x14);
-                       /* fall through */
+                       fallthrough;
                case 20:
                        p->timing_10_CWL = nvbios_rd08(bios, data + 0x13);
-                       /* fall through */
+                       fallthrough;
                case 19:
                        p->timing_10_18  = nvbios_rd08(bios, data + 0x12);
-                       /* fall through */
+                       fallthrough;
                case 18:
                case 17:
                        p->timing_10_16  = nvbios_rd08(bios, data + 0x10);
index 40e5645..dc184e8 100644 (file)
@@ -90,7 +90,7 @@ nvkm_cstate_valid(struct nvkm_clk *clk, struct nvkm_cstate *cstate,
                        case NVKM_CLK_BOOST_NONE:
                                if (clk->base_khz && freq > clk->base_khz)
                                        return false;
-                               /* fall through */
+                               fallthrough;
                        case NVKM_CLK_BOOST_BIOS:
                                if (clk->boost_khz && freq > clk->boost_khz)
                                        return false;
index 4f00023..efa5027 100644 (file)
@@ -363,7 +363,7 @@ mcp77_clk_prog(struct nvkm_clk *base)
        switch (clk->vsrc) {
        case nv_clk_src_cclk:
                mast |= 0x00400000;
-               /* fall through */
+               fallthrough;
        default:
                nvkm_wr32(device, 0x4600, clk->vdiv);
        }
index c3dae05..317ce9f 100644 (file)
@@ -119,11 +119,11 @@ powerctrl_1_shift(int chip_version, int reg)
 
        switch (reg) {
        case 0x680520:
-               shift += 4; /* fall through */
+               shift += 4; fallthrough;
        case 0x680508:
-               shift += 4; /* fall through */
+               shift += 4; fallthrough;
        case 0x680504:
-               shift += 4; /* fall through */
+               shift += 4; fallthrough;
        case 0x680500:
                shift += 4;
        }
@@ -245,11 +245,11 @@ setPLL_double_highregs(struct nvkm_devinit *init, u32 reg1,
 
                switch (reg1) {
                case 0x680504:
-                       shift_c040 += 2; /* fall through */
+                       shift_c040 += 2; fallthrough;
                case 0x680500:
-                       shift_c040 += 2; /* fall through */
+                       shift_c040 += 2; fallthrough;
                case 0x680520:
-                       shift_c040 += 2; /* fall through */
+                       shift_c040 += 2; fallthrough;
                case 0x680508:
                        shift_c040 += 2;
                }
index 5f4c287..97b3a28 100644 (file)
@@ -131,13 +131,13 @@ nv40_ram_prog(struct nvkm_ram *base)
                nvkm_mask(device, 0x00402c, 0xc0771100, ram->ctrl);
                nvkm_wr32(device, 0x004048, ram->coef);
                nvkm_wr32(device, 0x004030, ram->coef);
-               /* fall through */
+               fallthrough;
        case 0x43:
        case 0x49:
        case 0x4b:
                nvkm_mask(device, 0x004038, 0xc0771100, ram->ctrl);
                nvkm_wr32(device, 0x00403c, ram->coef);
-               /* fall through */
+               fallthrough;
        default:
                nvkm_mask(device, 0x004020, 0xc0771100, ram->ctrl);
                nvkm_wr32(device, 0x004024, ram->coef);
index c8ab1b5..db7769c 100644 (file)
@@ -118,10 +118,10 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
                if (retries)
                        udelay(400);
 
-               /* transaction request, wait up to 1ms for it to complete */
+               /* transaction request, wait up to 2ms for it to complete */
                nvkm_wr32(device, 0x00e4e4 + base, 0x00010000 | ctrl);
 
-               timeout = 1000;
+               timeout = 2000;
                do {
                        ctrl = nvkm_rd32(device, 0x00e4e4 + base);
                        udelay(1);
index 7ef6089..edb6148 100644 (file)
@@ -118,10 +118,10 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
                if (retries)
                        udelay(400);
 
-               /* transaction request, wait up to 1ms for it to complete */
+               /* transaction request, wait up to 2ms for it to complete */
                nvkm_wr32(device, 0x00d954 + base, 0x00010000 | ctrl);
 
-               timeout = 1000;
+               timeout = 2000;
                do {
                        ctrl = nvkm_rd32(device, 0x00d954 + base);
                        udelay(1);
index ff8629d..45c62f5 100644 (file)
@@ -23,7 +23,7 @@
 
 #include "priv.h"
 
-void
+static void
 gp10b_mc_init(struct nvkm_mc *mc)
 {
        struct nvkm_device *device = mc->subdev.device;
index ee11cca..de91e9a 100644 (file)
@@ -61,7 +61,7 @@ nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt)
        kfree(pt);
 }
 
-struct nvkm_mmu_pt *
+static struct nvkm_mmu_pt *
 nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero)
 {
        struct nvkm_mmu_pt *pt;
index b21e82e..94081f3 100644 (file)
@@ -27,7 +27,7 @@
 
 #include <nvif/class.h>
 
-const u8 *
+static const u8 *
 tu102_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
 {
        static const u8
index 199f94e..710f3f8 100644 (file)
@@ -1030,7 +1030,7 @@ nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size)
        return 0;
 }
 
-int
+static int
 nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
              u32 pd_header, bool managed, u64 addr, u64 size,
              struct lock_class_key *key, const char *name,
@@ -1204,7 +1204,6 @@ nvkm_vmm_pfn_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size)
 /*TODO:
  * - Avoid PT readback (for dma_unmap etc), this might end up being dealt
  *   with inside HMM, which would be a lot nicer for us to deal with.
- * - Multiple page sizes (particularly for huge page support).
  * - Support for systems without a 4KiB page size.
  */
 int
@@ -1220,8 +1219,8 @@ nvkm_vmm_pfn_map(struct nvkm_vmm *vmm, u8 shift, u64 addr, u64 size, u64 *pfn)
        /* Only support mapping where the page size of the incoming page
         * array matches a page size available for direct mapping.
         */
-       while (page->shift && page->shift != shift &&
-              page->desc->func->pfn == NULL)
+       while (page->shift && (page->shift != shift ||
+              page->desc->func->pfn == NULL))
                page++;
 
        if (!page->shift || !IS_ALIGNED(addr, 1ULL << shift) ||
index d3f8f91..a2b1795 100644 (file)
@@ -163,9 +163,6 @@ int nvkm_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *,
                  u32 pd_header, bool managed, u64 addr, u64 size,
                  struct lock_class_key *, const char *name,
                  struct nvkm_vmm **);
-int nvkm_vmm_ctor(const struct nvkm_vmm_func *, struct nvkm_mmu *,
-                 u32 pd_header, bool managed, u64 addr, u64 size,
-                 struct lock_class_key *, const char *name, struct nvkm_vmm *);
 struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr);
 struct nvkm_vma *nvkm_vmm_node_split(struct nvkm_vmm *, struct nvkm_vma *,
                                     u64 addr, u64 size);
index d862875..9539e6c 100644 (file)
@@ -258,12 +258,94 @@ gp100_vmm_pd0_unmap(struct nvkm_vmm *vmm,
        VMM_FO128(pt, vmm, pdei * 0x10, 0ULL, 0ULL, pdes);
 }
 
+static void
+gp100_vmm_pd0_pfn_unmap(struct nvkm_vmm *vmm,
+                       struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+       struct device *dev = vmm->mmu->subdev.device->dev;
+       dma_addr_t addr;
+
+       nvkm_kmap(pt->memory);
+       while (ptes--) {
+               u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 0);
+               u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 4);
+               u64 data   = (u64)datahi << 32 | datalo;
+
+               if ((data & (3ULL << 1)) != 0) {
+                       addr = (data >> 8) << 12;
+                       dma_unmap_page(dev, addr, 1UL << 21, DMA_BIDIRECTIONAL);
+               }
+               ptei++;
+       }
+       nvkm_done(pt->memory);
+}
+
+static bool
+gp100_vmm_pd0_pfn_clear(struct nvkm_vmm *vmm,
+                       struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+       bool dma = false;
+
+       nvkm_kmap(pt->memory);
+       while (ptes--) {
+               u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 0);
+               u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 4);
+               u64 data   = (u64)datahi << 32 | datalo;
+
+               if ((data & BIT_ULL(0)) && (data & (3ULL << 1)) != 0) {
+                       VMM_WO064(pt, vmm, ptei * 16, data & ~BIT_ULL(0));
+                       dma = true;
+               }
+               ptei++;
+       }
+       nvkm_done(pt->memory);
+       return dma;
+}
+
+static void
+gp100_vmm_pd0_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+                 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+       struct device *dev = vmm->mmu->subdev.device->dev;
+       dma_addr_t addr;
+
+       nvkm_kmap(pt->memory);
+       while (ptes--) {
+               u64 data = 0;
+
+               if (!(*map->pfn & NVKM_VMM_PFN_W))
+                       data |= BIT_ULL(6); /* RO. */
+
+               if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) {
+                       addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT;
+                       addr = dma_map_page(dev, pfn_to_page(addr), 0,
+                                           1UL << 21, DMA_BIDIRECTIONAL);
+                       if (!WARN_ON(dma_mapping_error(dev, addr))) {
+                               data |= addr >> 4;
+                               data |= 2ULL << 1; /* SYSTEM_COHERENT_MEMORY. */
+                               data |= BIT_ULL(3); /* VOL. */
+                               data |= BIT_ULL(0); /* VALID. */
+                       }
+               } else {
+                       data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4;
+                       data |= BIT_ULL(0); /* VALID. */
+               }
+
+               VMM_WO064(pt, vmm, ptei++ * 16, data);
+               map->pfn++;
+       }
+       nvkm_done(pt->memory);
+}
+
 static const struct nvkm_vmm_desc_func
 gp100_vmm_desc_pd0 = {
        .unmap = gp100_vmm_pd0_unmap,
        .sparse = gp100_vmm_pd0_sparse,
        .pde = gp100_vmm_pd0_pde,
        .mem = gp100_vmm_pd0_mem,
+       .pfn = gp100_vmm_pd0_pfn,
+       .pfn_clear = gp100_vmm_pd0_pfn_clear,
+       .pfn_unmap = gp100_vmm_pd0_pfn_unmap,
 };
 
 static void
@@ -466,7 +548,6 @@ void
 gp100_vmm_flush(struct nvkm_vmm *vmm, int depth)
 {
        u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24;
-       type = 0; /*XXX: need to confirm stuff works with depth enabled... */
        if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
                type |= 0x00000004; /* HUB_ONLY */
        type |= 0x00000001; /* PAGE_ALL */
index be91cff..b1294d0 100644 (file)
@@ -28,9 +28,9 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
 {
        struct nvkm_subdev *subdev = &vmm->mmu->subdev;
        struct nvkm_device *device = subdev->device;
-       u32 type = depth << 24; /*XXX: not confirmed */
+       u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24;
 
-       type = 0x00000001; /* PAGE_ALL */
+       type |= 0x00000001; /* PAGE_ALL */
        if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
                type |= 0x00000004; /* HUB_ONLY */
 
index 2a6150a..70e2c41 100644 (file)
@@ -159,7 +159,7 @@ mxm_dcb_sanitise_entry(struct nvkm_bios *bios, void *data, int idx, u16 pdcb)
                break;
        case 0x0e: /* eDP, falls through to DPint */
                ctx.outp[1] |= 0x00010000;
-               /* fall through */
+               fallthrough;
        case 0x07: /* DP internal, wtf is this?? HP8670w */
                ctx.outp[1] |= 0x00000004; /* use_power_scripts? */
                type = DCB_CONNECTOR_eDP;
index a76c2a7..eafc932 100644 (file)
@@ -9,7 +9,7 @@ nvkm-y += nvkm/subdev/pmu/gk110.o
 nvkm-y += nvkm/subdev/pmu/gk208.o
 nvkm-y += nvkm/subdev/pmu/gk20a.o
 nvkm-y += nvkm/subdev/pmu/gm107.o
+nvkm-y += nvkm/subdev/pmu/gm200.o
 nvkm-y += nvkm/subdev/pmu/gm20b.o
-nvkm-y += nvkm/subdev/pmu/gp100.o
 nvkm-y += nvkm/subdev/pmu/gp102.o
 nvkm-y += nvkm/subdev/pmu/gp10b.o
 #include "priv.h"
 
 static const struct nvkm_pmu_func
-gp100_pmu = {
+gm200_pmu = {
        .flcn = &gt215_pmu_flcn,
        .enabled = gf100_pmu_enabled,
        .reset = gf100_pmu_reset,
 };
 
+
+int
+gm200_pmu_nofw(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif)
+{
+       nvkm_warn(&pmu->subdev, "firmware unavailable\n");
+       return 0;
+}
+
 static const struct nvkm_pmu_fwif
-gp100_pmu_fwif[] = {
-       { -1, gf100_pmu_nofw, &gp100_pmu },
+gm200_pmu_fwif[] = {
+       { -1, gm200_pmu_nofw, &gm200_pmu },
        {}
 };
 
 int
-gp100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gm200_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
 {
-       return nvkm_pmu_new_(gp100_pmu_fwif, device, index, ppmu);
+       return nvkm_pmu_new_(gm200_pmu_fwif, device, index, ppmu);
 }
index 8257103..8f6ed53 100644 (file)
@@ -28,7 +28,7 @@
 #include <nvfw/pmu.h>
 
 static int
-gm20b_pmu_acr_bootstrap_falcon_cb(void *priv, struct nv_falcon_msg *hdr)
+gm20b_pmu_acr_bootstrap_falcon_cb(void *priv, struct nvfw_falcon_msg *hdr)
 {
        struct nv_pmu_acr_bootstrap_falcon_msg *msg =
                container_of(hdr, typeof(*msg), msg.hdr);
@@ -126,11 +126,14 @@ gm20b_pmu_acr = {
        .bld_write = gm20b_pmu_acr_bld_write,
        .bld_patch = gm20b_pmu_acr_bld_patch,
        .boot = gm20b_pmu_acr_boot,
+       .bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_PMU) |
+                            BIT_ULL(NVKM_ACR_LSF_FECS) |
+                            BIT_ULL(NVKM_ACR_LSF_GPCCS),
        .bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon,
 };
 
 static int
-gm20b_pmu_acr_init_wpr_callback(void *priv, struct nv_falcon_msg *hdr)
+gm20b_pmu_acr_init_wpr_callback(void *priv, struct nvfw_falcon_msg *hdr)
 {
        struct nv_pmu_acr_init_wpr_region_msg *msg =
                container_of(hdr, typeof(*msg), msg.hdr);
@@ -231,7 +234,8 @@ gm20b_pmu_load(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif)
 
 static const struct nvkm_pmu_fwif
 gm20b_pmu_fwif[] = {
-       { 0, gm20b_pmu_load, &gm20b_pmu, &gm20b_pmu_acr },
+       {  0, gm20b_pmu_load, &gm20b_pmu, &gm20b_pmu_acr },
+       { -1, gm200_pmu_nofw, &gm20b_pmu },
        {}
 };
 
index 262b8a3..3d8ce14 100644 (file)
@@ -46,7 +46,7 @@ gp102_pmu = {
 
 static const struct nvkm_pmu_fwif
 gp102_pmu_fwif[] = {
-       { -1, gf100_pmu_nofw, &gp102_pmu },
+       { -1, gm200_pmu_nofw, &gp102_pmu },
        {}
 };
 
index 5b81c73..9c237c4 100644 (file)
@@ -28,7 +28,7 @@
 
 static int
 gp10b_pmu_acr_bootstrap_multiple_falcons_cb(void *priv,
-                                           struct nv_falcon_msg *hdr)
+                                           struct nvfw_falcon_msg *hdr)
 {
        struct nv_pmu_acr_bootstrap_multiple_falcons_msg *msg =
                container_of(hdr, typeof(*msg), msg.hdr);
@@ -69,6 +69,9 @@ gp10b_pmu_acr = {
        .bld_write = gm20b_pmu_acr_bld_write,
        .bld_patch = gm20b_pmu_acr_bld_patch,
        .boot = gm20b_pmu_acr_boot,
+       .bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_PMU) |
+                            BIT_ULL(NVKM_ACR_LSF_FECS) |
+                            BIT_ULL(NVKM_ACR_LSF_GPCCS),
        .bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon,
        .bootstrap_multiple_falcons = gp10b_pmu_acr_bootstrap_multiple_falcons,
 };
@@ -90,7 +93,8 @@ MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");
 
 static const struct nvkm_pmu_fwif
 gp10b_pmu_fwif[] = {
-       { 0, gm20b_pmu_load, &gp10b_pmu, &gp10b_pmu_acr },
+       {  0, gm20b_pmu_load, &gp10b_pmu, &gp10b_pmu_acr },
+       { -1, gm200_pmu_nofw, &gp10b_pmu },
        {}
 };
 
index f470859..276b6d7 100644 (file)
@@ -59,6 +59,7 @@ struct nvkm_pmu_fwif {
 };
 
 int gf100_pmu_nofw(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *);
+int gm200_pmu_nofw(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *);
 int gm20b_pmu_load(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *);
 
 int nvkm_pmu_ctor(const struct nvkm_pmu_fwif *, struct nvkm_device *,
index 4caf401..c08097f 100644 (file)
@@ -36,7 +36,7 @@ gt215_therm_fan_sense(struct nvkm_therm *therm)
        return -ENODEV;
 }
 
-void
+static void
 gt215_therm_init(struct nvkm_therm *therm)
 {
        struct nvkm_device *device = therm->subdev.device;
index 8849353..af6ea54 100644 (file)
@@ -2583,6 +2583,7 @@ static const struct panel_desc logicpd_type_28 = {
        .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
        .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
                     DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE,
+       .connector_type = DRM_MODE_CONNECTOR_DPI,
 };
 
 static const struct panel_desc mitsubishi_aa070mc01 = {
@@ -2746,6 +2747,7 @@ static const struct panel_desc newhaven_nhd_43_480272ef_atxl = {
        .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
        .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
                     DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
+       .connector_type = DRM_MODE_CONNECTOR_DPI,
 };
 
 static const struct display_timing nlt_nl192108ac18_02d_timing = {
index 1d8e07b..bf9dc45 100644 (file)
@@ -149,10 +149,9 @@ static int qxl_ttm_backend_bind(struct ttm_tt *ttm,
        return -1;
 }
 
-static int qxl_ttm_backend_unbind(struct ttm_tt *ttm)
+static void qxl_ttm_backend_unbind(struct ttm_tt *ttm)
 {
        /* Not implemented */
-       return -1;
 }
 
 static void qxl_ttm_backend_destroy(struct ttm_tt *ttm)
index 54af06d..004344d 100644 (file)
@@ -591,7 +591,7 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
        return 0;
 }
 
-static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
+static void radeon_ttm_backend_unbind(struct ttm_tt *ttm)
 {
        struct radeon_ttm_tt *gtt = (void *)ttm;
 
@@ -599,8 +599,6 @@ static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
 
        if (gtt->userptr)
                radeon_ttm_tt_unpin_userptr(ttm);
-
-       return 0;
 }
 
 static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
index 0919f1f..f65d148 100644 (file)
@@ -31,6 +31,7 @@ config DRM_RCAR_DW_HDMI
 config DRM_RCAR_LVDS
        tristate "R-Car DU LVDS Encoder Support"
        depends on DRM && DRM_BRIDGE && OF
+       select DRM_KMS_HELPER
        select DRM_PANEL
        select OF_FLATTREE
        select OF_OVERLAY
index ce07ddc..557cbe5 100644 (file)
@@ -259,9 +259,8 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force)
        struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
        unsigned long reg;
 
-       if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_HPD_REG, reg,
-                              reg & SUN4I_HDMI_HPD_HIGH,
-                              0, 500000)) {
+       reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG);
+       if (reg & SUN4I_HDMI_HPD_HIGH) {
                cec_phys_addr_invalidate(hdmi->cec_adap);
                return connector_status_disconnected;
        }
index 9b308b5..9a0b324 100644 (file)
@@ -368,6 +368,12 @@ static void tegra_dc_setup_window(struct tegra_plane *plane,
        h_size = window->src.w * bpp;
        v_size = window->src.h;
 
+       if (window->reflect_x)
+               h_offset += (window->src.w - 1) * bpp;
+
+       if (window->reflect_y)
+               v_offset += window->src.h - 1;
+
        value = V_PRESCALED_SIZE(v_size) | H_PRESCALED_SIZE(h_size);
        tegra_plane_writel(plane, value, DC_WIN_PRESCALED_SIZE);
 
@@ -404,9 +410,6 @@ static void tegra_dc_setup_window(struct tegra_plane *plane,
                tegra_plane_writel(plane, window->stride[0], DC_WIN_LINE_STRIDE);
        }
 
-       if (window->bottom_up)
-               v_offset += window->src.h - 1;
-
        tegra_plane_writel(plane, h_offset, DC_WINBUF_ADDR_H_OFFSET);
        tegra_plane_writel(plane, v_offset, DC_WINBUF_ADDR_V_OFFSET);
 
@@ -470,7 +473,10 @@ static void tegra_dc_setup_window(struct tegra_plane *plane,
                value |= COLOR_EXPAND;
        }
 
-       if (window->bottom_up)
+       if (window->reflect_x)
+               value |= H_DIRECTION;
+
+       if (window->reflect_y)
                value |= V_DIRECTION;
 
        if (tegra_plane_use_horizontal_filtering(plane, window)) {
@@ -601,7 +607,10 @@ static int tegra_plane_atomic_check(struct drm_plane *plane,
                                    struct drm_plane_state *state)
 {
        struct tegra_plane_state *plane_state = to_tegra_plane_state(state);
-       unsigned int rotation = DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_Y;
+       unsigned int supported_rotation = DRM_MODE_ROTATE_0 |
+                                         DRM_MODE_REFLECT_X |
+                                         DRM_MODE_REFLECT_Y;
+       unsigned int rotation = state->rotation;
        struct tegra_bo_tiling *tiling = &plane_state->tiling;
        struct tegra_plane *tegra = to_tegra_plane(plane);
        struct tegra_dc *dc = to_tegra_dc(state->crtc);
@@ -639,12 +648,26 @@ static int tegra_plane_atomic_check(struct drm_plane *plane,
                return -EINVAL;
        }
 
-       rotation = drm_rotation_simplify(state->rotation, rotation);
+       /*
+        * Older userspace used custom BO flag in order to specify the Y
+        * reflection, while modern userspace uses the generic DRM rotation
+        * property in order to achieve the same result.  The legacy BO flag
+        * duplicates the DRM rotation property when both are set.
+        */
+       if (tegra_fb_is_bottom_up(state->fb))
+               rotation |= DRM_MODE_REFLECT_Y;
+
+       rotation = drm_rotation_simplify(rotation, supported_rotation);
+
+       if (rotation & DRM_MODE_REFLECT_X)
+               plane_state->reflect_x = true;
+       else
+               plane_state->reflect_x = false;
 
        if (rotation & DRM_MODE_REFLECT_Y)
-               plane_state->bottom_up = true;
+               plane_state->reflect_y = true;
        else
-               plane_state->bottom_up = false;
+               plane_state->reflect_y = false;
 
        /*
         * Tegra doesn't support different strides for U and V planes so we
@@ -706,7 +729,8 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
        window.dst.w = drm_rect_width(&plane->state->dst);
        window.dst.h = drm_rect_height(&plane->state->dst);
        window.bits_per_pixel = fb->format->cpp[0] * 8;
-       window.bottom_up = tegra_fb_is_bottom_up(fb) || state->bottom_up;
+       window.reflect_x = state->reflect_x;
+       window.reflect_y = state->reflect_y;
 
        /* copy from state */
        window.zpos = plane->state->normalized_zpos;
@@ -792,6 +816,8 @@ static struct drm_plane *tegra_primary_plane_create(struct drm_device *drm,
        err = drm_plane_create_rotation_property(&plane->base,
                                                 DRM_MODE_ROTATE_0,
                                                 DRM_MODE_ROTATE_0 |
+                                                DRM_MODE_ROTATE_180 |
+                                                DRM_MODE_REFLECT_X |
                                                 DRM_MODE_REFLECT_Y);
        if (err < 0)
                dev_err(dc->dev, "failed to create rotation property: %d\n",
@@ -957,6 +983,7 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
        }
 
        drm_plane_helper_add(&plane->base, &tegra_cursor_plane_helper_funcs);
+       drm_plane_create_zpos_immutable_property(&plane->base, 255);
 
        return &plane->base;
 }
@@ -1079,6 +1106,8 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
        err = drm_plane_create_rotation_property(&plane->base,
                                                 DRM_MODE_ROTATE_0,
                                                 DRM_MODE_ROTATE_0 |
+                                                DRM_MODE_ROTATE_180 |
+                                                DRM_MODE_REFLECT_X |
                                                 DRM_MODE_REFLECT_Y);
        if (err < 0)
                dev_err(dc->dev, "failed to create rotation property: %d\n",
@@ -2554,10 +2583,8 @@ static int tegra_dc_probe(struct platform_device *pdev)
                return PTR_ERR(dc->regs);
 
        dc->irq = platform_get_irq(pdev, 0);
-       if (dc->irq < 0) {
-               dev_err(&pdev->dev, "failed to get IRQ\n");
+       if (dc->irq < 0)
                return -ENXIO;
-       }
 
        err = tegra_dc_rgb_probe(dc);
        if (err < 0 && err != -ENODEV) {
index 3d8ddcc..051d03d 100644 (file)
@@ -136,7 +136,8 @@ struct tegra_dc_window {
        unsigned int stride[2];
        unsigned long base[3];
        unsigned int zpos;
-       bool bottom_up;
+       bool reflect_x;
+       bool reflect_y;
 
        struct tegra_bo_tiling tiling;
        u32 format;
index 38beab9..3820e8d 100644 (file)
@@ -670,6 +670,7 @@ static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
 static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
 {
        u32 value;
+       int err;
 
        /*
         * XXX Is this still needed? The module reset is deasserted right
@@ -693,7 +694,11 @@ static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
                DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3);
        tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3);
 
-       return tegra_mipi_calibrate(dsi->mipi);
+       err = tegra_mipi_calibrate(dsi->mipi);
+       if (err < 0)
+               return err;
+
+       return tegra_mipi_wait(dsi->mipi);
 }
 
 static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk,
@@ -1618,7 +1623,7 @@ static int tegra_dsi_probe(struct platform_device *pdev)
        if (IS_ERR(dsi->regs))
                return PTR_ERR(dsi->regs);
 
-       dsi->mipi = tegra_mipi_request(&pdev->dev);
+       dsi->mipi = tegra_mipi_request(&pdev->dev, pdev->dev.of_node);
        if (IS_ERR(dsi->mipi))
                return PTR_ERR(dsi->mipi);
 
index 48363f7..1a0d3ba 100644 (file)
@@ -177,6 +177,7 @@ static const u32 gr2d_addr_regs[] = {
        GR2D_DSTC_BASE_ADDR,
        GR2D_SRCA_BASE_ADDR,
        GR2D_SRCB_BASE_ADDR,
+       GR2D_PATBASE_ADDR,
        GR2D_SRC_BASE_ADDR_SB,
        GR2D_DSTA_BASE_ADDR_SB,
        GR2D_DSTB_BASE_ADDR_SB,
index 2398486..9b7d66e 100644 (file)
@@ -14,6 +14,7 @@
 #define GR2D_DSTC_BASE_ADDR            0x2d
 #define GR2D_SRCA_BASE_ADDR            0x31
 #define GR2D_SRCB_BASE_ADDR            0x32
+#define GR2D_PATBASE_ADDR              0x47
 #define GR2D_SRC_BASE_ADDR_SB          0x48
 #define GR2D_DSTA_BASE_ADDR_SB         0x49
 #define GR2D_DSTB_BASE_ADDR_SB         0x4a
index c0a528b..b0b8154 100644 (file)
@@ -381,10 +381,12 @@ static int gr3d_remove(struct platform_device *pdev)
        }
 
        if (gr3d->clk_secondary) {
+               reset_control_assert(gr3d->rst_secondary);
                tegra_powergate_power_off(TEGRA_POWERGATE_3D1);
                clk_disable_unprepare(gr3d->clk_secondary);
        }
 
+       reset_control_assert(gr3d->rst);
        tegra_powergate_power_off(TEGRA_POWERGATE_3D);
        clk_disable_unprepare(gr3d->clk);
 
index 8183e61..22a03f7 100644 (file)
@@ -149,7 +149,9 @@ int tegra_display_hub_prepare(struct tegra_display_hub *hub)
        for (i = 0; i < hub->soc->num_wgrps; i++) {
                struct tegra_windowgroup *wgrp = &hub->wgrps[i];
 
-               tegra_windowgroup_enable(wgrp);
+               /* Skip orphaned window group whose parent DC is disabled */
+               if (wgrp->parent)
+                       tegra_windowgroup_enable(wgrp);
        }
 
        return 0;
@@ -166,7 +168,9 @@ void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
        for (i = 0; i < hub->soc->num_wgrps; i++) {
                struct tegra_windowgroup *wgrp = &hub->wgrps[i];
 
-               tegra_windowgroup_disable(wgrp);
+               /* Skip orphaned window group whose parent DC is disabled */
+               if (wgrp->parent)
+                       tegra_windowgroup_disable(wgrp);
        }
 }
 
@@ -944,6 +948,15 @@ static int tegra_display_hub_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "failed to register host1x client: %d\n",
                        err);
 
+       err = devm_of_platform_populate(&pdev->dev);
+       if (err < 0)
+               goto unregister;
+
+       return err;
+
+unregister:
+       host1x_client_unregister(&hub->client);
+       pm_runtime_disable(&pdev->dev);
        return err;
 }
 
index 9ccfb56..4cd0461 100644 (file)
@@ -61,7 +61,8 @@ tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
        copy->tiling = state->tiling;
        copy->format = state->format;
        copy->swap = state->swap;
-       copy->bottom_up = state->bottom_up;
+       copy->reflect_x = state->reflect_x;
+       copy->reflect_y = state->reflect_y;
        copy->opaque = state->opaque;
 
        for (i = 0; i < 2; i++)
index a158a91..c691dd7 100644 (file)
@@ -46,7 +46,8 @@ struct tegra_plane_state {
        u32 format;
        u32 swap;
 
-       bool bottom_up;
+       bool reflect_x;
+       bool reflect_y;
 
        /* used for legacy blending support only */
        struct tegra_plane_legacy_blending_state blending[2];
index 7cbcf96..45b5258 100644 (file)
@@ -2946,7 +2946,7 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
 {
        int err;
 
-       sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io");
+       sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io-hdmi-dp");
        if (IS_ERR(sor->avdd_io_supply)) {
                dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n",
                        PTR_ERR(sor->avdd_io_supply));
@@ -2960,7 +2960,7 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
                return err;
        }
 
-       sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll");
+       sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-hdmi-dp-pll");
        if (IS_ERR(sor->vdd_pll_supply)) {
                dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n",
                        PTR_ERR(sor->vdd_pll_supply));
index 6050dc8..38f1351 100644 (file)
@@ -82,17 +82,18 @@ static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
        return ret;
 }
 
-static int ttm_agp_unbind(struct ttm_tt *ttm)
+static void ttm_agp_unbind(struct ttm_tt *ttm)
 {
        struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
 
        if (agp_be->mem) {
-               if (agp_be->mem->is_bound)
-                       return agp_unbind_memory(agp_be->mem);
+               if (agp_be->mem->is_bound) {
+                       agp_unbind_memory(agp_be->mem);
+                       return;
+               }
                agp_free_memory(agp_be->mem);
                agp_be->mem = NULL;
        }
-       return 0;
 }
 
 static void ttm_agp_destroy(struct ttm_tt *ttm)
index 0768a05..f297fd5 100644 (file)
@@ -880,8 +880,10 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
        if (!fence)
                return 0;
 
-       if (no_wait_gpu)
+       if (no_wait_gpu) {
+               dma_fence_put(fence);
                return -EBUSY;
+       }
 
        dma_resv_add_shared_fence(bo->base.resv, fence);
 
@@ -1836,7 +1838,7 @@ out:
 }
 EXPORT_SYMBOL(ttm_bo_swapout);
 
-void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
+void ttm_bo_swapout_all(void)
 {
        struct ttm_operation_ctx ctx = {
                .interruptible = false,
index 82b893d..d7a6537 100644 (file)
@@ -300,8 +300,10 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
                        break;
                case -EBUSY:
                case -ERESTARTSYS:
+                       dma_fence_put(moving);
                        return VM_FAULT_NOPAGE;
                default:
+                       dma_fence_put(moving);
                        return VM_FAULT_SIGBUS;
                }
 
index e25d409..9d1c717 100644 (file)
@@ -244,7 +244,6 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
        ttm_tt_init_fields(ttm, bo, page_flags);
 
        if (ttm_tt_alloc_page_directory(ttm)) {
-               ttm_tt_destroy(ttm);
                pr_err("Failed allocating page table\n");
                return -ENOMEM;
        }
@@ -268,7 +267,6 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
 
        INIT_LIST_HEAD(&ttm_dma->pages_list);
        if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
-               ttm_tt_destroy(ttm);
                pr_err("Failed allocating page table\n");
                return -ENOMEM;
        }
@@ -290,7 +288,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
        else
                ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
        if (ret) {
-               ttm_tt_destroy(ttm);
                pr_err("Failed allocating page table\n");
                return -ENOMEM;
        }
@@ -313,11 +310,8 @@ EXPORT_SYMBOL(ttm_dma_tt_fini);
 
 void ttm_tt_unbind(struct ttm_tt *ttm)
 {
-       int ret;
-
        if (ttm->state == tt_bound) {
-               ret = ttm->func->unbind(ttm);
-               BUG_ON(ret);
+               ttm->func->unbind(ttm);
                ttm->state = tt_unbound;
        }
 }
index 4704283..fb39826 100644 (file)
@@ -1352,7 +1352,7 @@ static int vmw_pm_freeze(struct device *kdev)
        vmw_execbuf_release_pinned_bo(dev_priv);
        vmw_resource_evict_all(dev_priv);
        vmw_release_device_early(dev_priv);
-       ttm_bo_swapout_all(&dev_priv->bdev);
+       ttm_bo_swapout_all();
        if (dev_priv->enable_fb)
                vmw_fifo_resource_dec(dev_priv);
        if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
index 9ffa9c7..16b3856 100644 (file)
@@ -1069,10 +1069,6 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
        if (new_content_type != SAME_AS_DISPLAY) {
                struct vmw_surface_metadata metadata = {0};
 
-               metadata.base_size.width = hdisplay;
-               metadata.base_size.height = vdisplay;
-               metadata.base_size.depth = 1;
-
                /*
                 * If content buffer is a buffer object, then we have to
                 * construct surface info
@@ -1104,6 +1100,10 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
                        metadata = new_vfbs->surface->metadata;
                }
 
+               metadata.base_size.width = hdisplay;
+               metadata.base_size.height = vdisplay;
+               metadata.base_size.depth = 1;
+
                if (vps->surf) {
                        struct drm_vmw_size cur_base_size =
                                vps->surf->metadata.base_size;
index 1d78187..ab524ab 100644 (file)
@@ -610,7 +610,7 @@ static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
        return 0;
 }
 
-static int vmw_ttm_unbind(struct ttm_tt *ttm)
+static void vmw_ttm_unbind(struct ttm_tt *ttm)
 {
        struct vmw_ttm_tt *vmw_be =
                container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
@@ -628,8 +628,6 @@ static int vmw_ttm_unbind(struct ttm_tt *ttm)
 
        if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
                vmw_ttm_unmap_dma(vmw_be);
-
-       return 0;
 }
 
 
index 6a995db..e201f62 100644 (file)
@@ -686,8 +686,17 @@ EXPORT_SYMBOL(host1x_driver_register_full);
  */
 void host1x_driver_unregister(struct host1x_driver *driver)
 {
+       struct host1x *host1x;
+
        driver_unregister(&driver->driver);
 
+       mutex_lock(&devices_lock);
+
+       list_for_each_entry(host1x, &devices, list)
+               host1x_detach_driver(host1x, driver);
+
+       mutex_unlock(&devices_lock);
+
        mutex_lock(&drivers_lock);
        list_del_init(&driver->list);
        mutex_unlock(&drivers_lock);
index c039267..1b4997b 100644 (file)
@@ -16,6 +16,8 @@
 #include "debug.h"
 #include "channel.h"
 
+static DEFINE_MUTEX(debug_lock);
+
 unsigned int host1x_debug_trace_cmdbuf;
 
 static pid_t host1x_debug_force_timeout_pid;
@@ -52,12 +54,14 @@ static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo)
        struct output *o = data;
 
        mutex_lock(&ch->cdma.lock);
+       mutex_lock(&debug_lock);
 
        if (show_fifo)
                host1x_hw_show_channel_fifo(m, ch, o);
 
        host1x_hw_show_channel_cdma(m, ch, o);
 
+       mutex_unlock(&debug_lock);
        mutex_unlock(&ch->cdma.lock);
 
        return 0;
index d24344e..d0ebb70 100644 (file)
@@ -468,11 +468,18 @@ static int host1x_probe(struct platform_device *pdev)
 
        err = host1x_register(host);
        if (err < 0)
-               goto deinit_intr;
+               goto deinit_debugfs;
+
+       err = devm_of_platform_populate(&pdev->dev);
+       if (err < 0)
+               goto unregister;
 
        return 0;
 
-deinit_intr:
+unregister:
+       host1x_unregister(host);
+deinit_debugfs:
+       host1x_debug_deinit(host);
        host1x_intr_deinit(host);
 deinit_syncpt:
        host1x_syncpt_deinit(host);
index 0212584..f31bcfa 100644 (file)
@@ -192,8 +192,14 @@ static void show_gather(struct output *o, phys_addr_t phys_addr,
 
 static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
 {
+       struct push_buffer *pb = &cdma->push_buffer;
        struct host1x_job *job;
 
+       host1x_debug_output(o, "PUSHBUF at %pad, %u words\n",
+                           &pb->dma, pb->size / 4);
+
+       show_gather(o, pb->dma, pb->size / 4, cdma, pb->dma, pb->mapped);
+
        list_for_each_entry(job, &cdma->sync_queue, list) {
                unsigned int i;
 
index a10643a..89b6c14 100644 (file)
@@ -27,10 +27,13 @@ struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
                                    u32 num_cmdbufs, u32 num_relocs)
 {
        struct host1x_job *job = NULL;
-       unsigned int num_unpins = num_cmdbufs + num_relocs;
+       unsigned int num_unpins = num_relocs;
        u64 total;
        void *mem;
 
+       if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
+               num_unpins += num_cmdbufs;
+
        /* Check that we're not going to overflow */
        total = sizeof(struct host1x_job) +
                (u64)num_relocs * sizeof(struct host1x_reloc) +
@@ -102,6 +105,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
 {
        struct host1x_client *client = job->client;
        struct device *dev = client->dev;
+       struct host1x_job_gather *g;
        struct iommu_domain *domain;
        unsigned int i;
        int err;
@@ -183,8 +187,14 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
                job->num_unpins++;
        }
 
+       /*
+        * We will copy gathers BO content later, so there is no need to
+        * hold and pin them.
+        */
+       if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
+               return 0;
+
        for (i = 0; i < job->num_gathers; i++) {
-               struct host1x_job_gather *g = &job->gathers[i];
                size_t gather_size = 0;
                struct scatterlist *sg;
                struct sg_table *sgt;
@@ -194,6 +204,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
                dma_addr_t *phys;
                unsigned int j;
 
+               g = &job->gathers[i];
                g->bo = host1x_bo_get(g->bo);
                if (!g->bo) {
                        err = -EINVAL;
@@ -213,10 +224,10 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
                sgt = host1x_bo_pin(host->dev, g->bo, phys);
                if (IS_ERR(sgt)) {
                        err = PTR_ERR(sgt);
-                       goto unpin;
+                       goto put;
                }
 
-               if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
+               if (host->domain) {
                        for_each_sg(sgt->sgl, sg, sgt->nents, j)
                                gather_size += sg->length;
                        gather_size = iova_align(&host->iova, gather_size);
@@ -226,7 +237,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
                                           host->iova_end >> shift, true);
                        if (!alloc) {
                                err = -ENOMEM;
-                               goto unpin;
+                               goto put;
                        }
 
                        err = iommu_map_sg(host->domain,
@@ -235,7 +246,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
                        if (err == 0) {
                                __free_iova(&host->iova, alloc);
                                err = -EINVAL;
-                               goto unpin;
+                               goto put;
                        }
 
                        job->unpins[job->num_unpins].size = gather_size;
@@ -245,7 +256,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
                                         DMA_TO_DEVICE);
                        if (!err) {
                                err = -ENOMEM;
-                               goto unpin;
+                               goto put;
                        }
 
                        job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
@@ -263,6 +274,8 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
 
        return 0;
 
+put:
+       host1x_bo_put(g->bo);
 unpin:
        host1x_job_unpin(job);
        return err;
index e00809d..e606464 100644 (file)
@@ -21,9 +21,9 @@
  */
 
 #include <linux/clk.h>
-#include <linux/delay.h>
 #include <linux/host1x.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
@@ -206,9 +206,9 @@ static int tegra_mipi_power_down(struct tegra_mipi *mipi)
        return 0;
 }
 
-struct tegra_mipi_device *tegra_mipi_request(struct device *device)
+struct tegra_mipi_device *tegra_mipi_request(struct device *device,
+                                            struct device_node *np)
 {
-       struct device_node *np = device->of_node;
        struct tegra_mipi_device *dev;
        struct of_phandle_args args;
        int err;
@@ -293,22 +293,29 @@ int tegra_mipi_disable(struct tegra_mipi_device *dev)
 }
 EXPORT_SYMBOL(tegra_mipi_disable);
 
-static int tegra_mipi_wait(struct tegra_mipi *mipi)
+int tegra_mipi_wait(struct tegra_mipi_device *device)
 {
-       unsigned long timeout = jiffies + msecs_to_jiffies(250);
+       struct tegra_mipi *mipi = device->mipi;
+       void __iomem *status_reg = mipi->regs + (MIPI_CAL_STATUS << 2);
        u32 value;
+       int err;
 
-       while (time_before(jiffies, timeout)) {
-               value = tegra_mipi_readl(mipi, MIPI_CAL_STATUS);
-               if ((value & MIPI_CAL_STATUS_ACTIVE) == 0 &&
-                   (value & MIPI_CAL_STATUS_DONE) != 0)
-                       return 0;
+       err = clk_enable(device->mipi->clk);
+       if (err < 0)
+               return err;
 
-               usleep_range(10, 50);
-       }
+       mutex_lock(&device->mipi->lock);
 
-       return -ETIMEDOUT;
+       err = readl_relaxed_poll_timeout(status_reg, value,
+                                        !(value & MIPI_CAL_STATUS_ACTIVE) &&
+                                        (value & MIPI_CAL_STATUS_DONE), 50,
+                                        250000);
+       mutex_unlock(&device->mipi->lock);
+       clk_disable(device->mipi->clk);
+
+       return err;
 }
+EXPORT_SYMBOL(tegra_mipi_wait);
 
 int tegra_mipi_calibrate(struct tegra_mipi_device *device)
 {
@@ -374,12 +381,10 @@ int tegra_mipi_calibrate(struct tegra_mipi_device *device)
        value |= MIPI_CAL_CTRL_START;
        tegra_mipi_writel(device->mipi, value, MIPI_CAL_CTRL);
 
-       err = tegra_mipi_wait(device->mipi);
-
        mutex_unlock(&device->mipi->lock);
        clk_disable(device->mipi->clk);
 
-       return err;
+       return 0;
 }
 EXPORT_SYMBOL(tegra_mipi_calibrate);
 
index 6f1fe72..a9c2de9 100644 (file)
@@ -25,6 +25,7 @@
 
 #define U1_MOUSE_REPORT_ID                     0x01 /* Mouse data ReportID */
 #define U1_ABSOLUTE_REPORT_ID          0x03 /* Absolute data ReportID */
+#define U1_ABSOLUTE_REPORT_ID_SECD  0x02 /* FW-PTP Absolute data ReportID */
 #define U1_FEATURE_REPORT_ID           0x05 /* Feature ReportID */
 #define U1_SP_ABSOLUTE_REPORT_ID       0x06 /* Feature ReportID */
 
@@ -368,6 +369,7 @@ static int u1_raw_event(struct alps_dev *hdata, u8 *data, int size)
        case U1_FEATURE_REPORT_ID:
                break;
        case U1_ABSOLUTE_REPORT_ID:
+       case U1_ABSOLUTE_REPORT_ID_SECD:
                for (i = 0; i < hdata->max_fingers; i++) {
                        u8 *contact = &data[i * 5];
 
index 359bdfb..e82f604 100644 (file)
@@ -60,6 +60,7 @@ MODULE_PARM_DESC(swap_fn_leftctrl, "Swap the Fn and left Control keys. "
 struct apple_sc {
        unsigned long quirks;
        unsigned int fn_on;
+       unsigned int fn_found;
        DECLARE_BITMAP(pressed_numlock, KEY_CNT);
 };
 
@@ -365,12 +366,15 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                struct hid_field *field, struct hid_usage *usage,
                unsigned long **bit, int *max)
 {
+       struct apple_sc *asc = hid_get_drvdata(hdev);
+
        if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
                        usage->hid == (HID_UP_MSVENDOR | 0x0003) ||
                        usage->hid == (HID_UP_HPVENDOR2 | 0x0003)) {
                /* The fn key on Apple USB keyboards */
                set_bit(EV_REP, hi->input->evbit);
                hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
+               asc->fn_found = true;
                apple_setup_input(hi->input);
                return 1;
        }
@@ -397,6 +401,19 @@ static int apple_input_mapped(struct hid_device *hdev, struct hid_input *hi,
        return 0;
 }
 
+static int apple_input_configured(struct hid_device *hdev,
+               struct hid_input *hidinput)
+{
+       struct apple_sc *asc = hid_get_drvdata(hdev);
+
+       if ((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) {
+               hid_info(hdev, "Fn key not found (Apple Wireless Keyboard clone?), disabling Fn key handling\n");
+               asc->quirks = 0;
+       }
+
+       return 0;
+}
+
 static int apple_probe(struct hid_device *hdev,
                const struct hid_device_id *id)
 {
@@ -611,6 +628,7 @@ static struct hid_driver apple_driver = {
        .event = apple_event,
        .input_mapping = apple_input_mapping,
        .input_mapped = apple_input_mapped,
+       .input_configured = apple_input_configured,
 };
 module_hid_driver(apple_driver);
 
index 874fc37..6f370e0 100644 (file)
 #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081    0xa081
 #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2    0xa0c2
 #define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096 0xa096
+#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293 0xa293
 
 #define USB_VENDOR_ID_IMATION          0x0718
 #define USB_DEVICE_ID_DISC_STAKKA      0xd000
 #define USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO       0x3232
 #define USB_DEVICE_ID_ROCCAT_SAVU      0x2d5a
 
+#define USB_VENDOR_ID_SAI              0x17dd
+
 #define USB_VENDOR_ID_SAITEK           0x06a3
 #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
 #define USB_DEVICE_ID_SAITEK_PS1000    0x0621
index 48dff5d..a78c13c 100644 (file)
@@ -1153,7 +1153,7 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
        if (!dj_report)
                return -ENOMEM;
        dj_report->report_id = REPORT_ID_DJ_SHORT;
-       dj_report->device_index = 0xFF;
+       dj_report->device_index = HIDPP_RECEIVER_INDEX;
        dj_report->report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
        retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
        kfree(dj_report);
@@ -1175,7 +1175,7 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
 
        if (djrcv_dev->type == recvr_type_dj) {
                dj_report->report_id = REPORT_ID_DJ_SHORT;
-               dj_report->device_index = 0xFF;
+               dj_report->device_index = HIDPP_RECEIVER_INDEX;
                dj_report->report_type = REPORT_TYPE_CMD_SWITCH;
                dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
                dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] =
@@ -1204,7 +1204,7 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
        memset(buf, 0, HIDPP_REPORT_SHORT_LENGTH);
 
        buf[0] = REPORT_ID_HIDPP_SHORT;
-       buf[1] = 0xFF;
+       buf[1] = HIDPP_RECEIVER_INDEX;
        buf[2] = 0x80;
        buf[3] = 0x00;
        buf[4] = 0x00;
index 1e1cf8e..b8b53dc 100644 (file)
@@ -3146,7 +3146,7 @@ static int hi_res_scroll_enable(struct hidpp_device *hidpp)
                multiplier = 1;
 
        hidpp->vertical_wheel_counter.wheel_multiplier = multiplier;
-       hid_info(hidpp->hid_dev, "multiplier = %d\n", multiplier);
+       hid_dbg(hidpp->hid_dev, "wheel multiplier = %d\n", multiplier);
        return 0;
 }
 
index 3413866..abd8690 100644 (file)
@@ -535,6 +535,12 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
                __set_bit(MSC_RAW, input->mscbit);
        }
 
+       /*
+        * hid-input may mark device as using autorepeat, but neither
+        * the trackpad, nor the mouse actually want it.
+        */
+       __clear_bit(EV_REP, input->evbit);
+
        return 0;
 }
 
index ca8b5c2..934fc0a 100644 (file)
@@ -88,6 +88,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096), HID_QUIRK_NO_INIT_REPORTS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
@@ -832,6 +833,7 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) },
        { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SAI, USB_DEVICE_ID_CYPRESS_HIDCOM) },
 #if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB)
        { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_INT_TP) },
index 6286204..a3b151b 100644 (file)
@@ -526,7 +526,8 @@ static int steam_register(struct steam_device *steam)
                        steam_battery_register(steam);
 
                mutex_lock(&steam_devices_lock);
-               list_add(&steam->list, &steam_devices);
+               if (list_empty(&steam->list))
+                       list_add(&steam->list, &steam_devices);
                mutex_unlock(&steam_devices_lock);
        }
 
@@ -552,7 +553,7 @@ static void steam_unregister(struct steam_device *steam)
                hid_info(steam->hdev, "Steam Controller '%s' disconnected",
                                steam->serial_no);
                mutex_lock(&steam_devices_lock);
-               list_del(&steam->list);
+               list_del_init(&steam->list);
                mutex_unlock(&steam_devices_lock);
                steam->serial_no[0] = 0;
        }
@@ -738,6 +739,7 @@ static int steam_probe(struct hid_device *hdev,
        mutex_init(&steam->mutex);
        steam->quirks = id->driver_data;
        INIT_WORK(&steam->work_connect, steam_work_connect_cb);
+       INIT_LIST_HEAD(&steam->list);
 
        steam->client_hdev = steam_create_client_hid(hdev);
        if (IS_ERR(steam->client_hdev)) {
index ec142bc..35f3bfc 100644 (file)
@@ -374,6 +374,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
                .driver_data = (void *)&sipodev_desc
        },
        {
+               .ident = "Mediacom FlexBook edge 13",
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook_edge13-M-FBE13"),
+               },
+               .driver_data = (void *)&sipodev_desc
+       },
+       {
                .ident = "Odys Winbook 13",
                .matches = {
                        DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AXDIA International GmbH"),
index 9147ee9..d69f4ef 100644 (file)
@@ -1368,7 +1368,7 @@ static void hv_kmsg_dump(struct kmsg_dumper *dumper,
         * Write dump contents to the page. No need to synchronize; panic should
         * be single-threaded.
         */
-       kmsg_dump_get_buffer(dumper, true, hv_panic_page, HV_HYP_PAGE_SIZE,
+       kmsg_dump_get_buffer(dumper, false, hv_panic_page, HV_HYP_PAGE_SIZE,
                             &bytes_written);
        if (bytes_written)
                hyperv_report_panic_msg(panic_pa, bytes_written);
index 0db8ef4..a270b97 100644 (file)
@@ -883,7 +883,7 @@ static int acpi_power_meter_add(struct acpi_device *device)
 
        res = setup_attrs(resource);
        if (res)
-               goto exit_free;
+               goto exit_free_capability;
 
        resource->hwmon_dev = hwmon_device_register(&device->dev);
        if (IS_ERR(resource->hwmon_dev)) {
@@ -896,6 +896,8 @@ static int acpi_power_meter_add(struct acpi_device *device)
 
 exit_remove:
        remove_attrs(resource);
+exit_free_capability:
+       free_capabilities(resource);
 exit_free:
        kfree(resource);
 exit:
index e95b742..2960374 100644 (file)
@@ -362,7 +362,7 @@ static struct platform_driver amd_energy_driver = {
 static struct platform_device *amd_energy_platdev;
 
 static const struct x86_cpu_id cpu_ids[] __initconst = {
-       X86_MATCH_VENDOR_FAM(AMD, 0x17, NULL),
+       X86_MATCH_VENDOR_FAM_MODEL(AMD, 0x17, 0x31, NULL),
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, cpu_ids);
index 33fb548..3d8239f 100644 (file)
@@ -851,6 +851,8 @@ static int aspeed_create_fan(struct device *dev,
        ret = of_property_read_u32(child, "reg", &pwm_port);
        if (ret)
                return ret;
+       if (pwm_port >= ARRAY_SIZE(pwm_port_params))
+               return -EINVAL;
        aspeed_create_pwm_port(priv, (u8)pwm_port);
 
        ret = of_property_count_u8_elems(child, "cooling-levels");
index 1a9772f..94698ca 100644 (file)
@@ -64,7 +64,7 @@ static const struct pvt_sensor_info pvt_info[] = {
  *     48380,
  * where T = [-48380, 147438] mC and N = [0, 1023].
  */
-static const struct pvt_poly poly_temp_to_N = {
+static const struct pvt_poly __maybe_unused poly_temp_to_N = {
        .total_divider = 10000,
        .terms = {
                {4, 18322, 10000, 10000},
@@ -96,7 +96,7 @@ static const struct pvt_poly poly_N_to_temp = {
  * N = (18658e-3*V - 11572) / 10,
  * V = N * 10^5 / 18658 + 11572 * 10^4 / 18658.
  */
-static const struct pvt_poly poly_volt_to_N = {
+static const struct pvt_poly __maybe_unused poly_volt_to_N = {
        .total_divider = 10,
        .terms = {
                {1, 18658, 1000, 1},
@@ -300,12 +300,12 @@ static irqreturn_t pvt_soft_isr(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
+static inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
 {
        return 0644;
 }
 
-inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
+static inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
 {
        return 0444;
 }
@@ -462,12 +462,12 @@ static irqreturn_t pvt_hard_isr(int irq, void *data)
 
 #define pvt_soft_isr NULL
 
-inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
+static inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
 {
        return 0;
 }
 
-inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
+static inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
 {
        return 0;
 }
index 0d4f3d9..72c7603 100644 (file)
@@ -285,6 +285,42 @@ static int drivetemp_get_scttemp(struct drivetemp_data *st, u32 attr, long *val)
        return err;
 }
 
+static const char * const sct_avoid_models[] = {
+/*
+ * These drives will have WRITE FPDMA QUEUED command timeouts and sometimes just
+ * freeze until power-cycled under heavy write loads when their temperature is
+ * getting polled in SCT mode. The SMART mode seems to be fine, though.
+ *
+ * While only the 3 TB model (DT01ACA3) was actually caught exhibiting the
+ * problem let's play safe here to avoid data corruption and ban the whole
+ * DT01ACAx family.
+
+ * The models from this array are prefix-matched.
+ */
+       "TOSHIBA DT01ACA",
+};
+
+static bool drivetemp_sct_avoid(struct drivetemp_data *st)
+{
+       struct scsi_device *sdev = st->sdev;
+       unsigned int ctr;
+
+       if (!sdev->model)
+               return false;
+
+       /*
+        * The "model" field contains just the raw SCSI INQUIRY response
+        * "product identification" field, which has a width of 16 bytes.
+        * This field is space-filled, but is NOT NULL-terminated.
+        */
+       for (ctr = 0; ctr < ARRAY_SIZE(sct_avoid_models); ctr++)
+               if (!strncmp(sdev->model, sct_avoid_models[ctr],
+                            strlen(sct_avoid_models[ctr])))
+                       return true;
+
+       return false;
+}
+
 static int drivetemp_identify_sata(struct drivetemp_data *st)
 {
        struct scsi_device *sdev = st->sdev;
@@ -326,6 +362,13 @@ static int drivetemp_identify_sata(struct drivetemp_data *st)
        /* bail out if this is not a SATA device */
        if (!is_ata || !is_sata)
                return -ENODEV;
+
+       if (have_sct && drivetemp_sct_avoid(st)) {
+               dev_notice(&sdev->sdev_gendev,
+                          "will avoid using SCT for temperature monitoring\n");
+               have_sct = false;
+       }
+
        if (!have_sct)
                goto skip_sct;
 
index 491a570..924c02c 100644 (file)
@@ -443,7 +443,7 @@ static ssize_t pwm1_enable_store(struct device *dev,
        }
 
        result = read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg);
-       if (result) {
+       if (result < 0) {
                count = result;
                goto err;
        }
index 743752a..64122eb 100644 (file)
@@ -38,8 +38,9 @@ static const u8 MAX6697_REG_CRIT[] = {
  * Map device tree / platform data register bit map to chip bit map.
  * Applies to alert register and over-temperature register.
  */
-#define MAX6697_MAP_BITS(reg)  ((((reg) & 0x7e) >> 1) | \
+#define MAX6697_ALERT_MAP_BITS(reg)    ((((reg) & 0x7e) >> 1) | \
                                 (((reg) & 0x01) << 6) | ((reg) & 0x80))
+#define MAX6697_OVERT_MAP_BITS(reg) (((reg) >> 1) | (((reg) & 0x01) << 7))
 
 #define MAX6697_REG_STAT(n)            (0x44 + (n))
 
@@ -562,12 +563,12 @@ static int max6697_init_chip(struct max6697_data *data,
                return ret;
 
        ret = i2c_smbus_write_byte_data(client, MAX6697_REG_ALERT_MASK,
-                                       MAX6697_MAP_BITS(pdata->alert_mask));
+                               MAX6697_ALERT_MAP_BITS(pdata->alert_mask));
        if (ret < 0)
                return ret;
 
        ret = i2c_smbus_write_byte_data(client, MAX6697_REG_OVERT_MASK,
-                               MAX6697_MAP_BITS(pdata->over_temperature_mask));
+                       MAX6697_OVERT_MAP_BITS(pdata->over_temperature_mask));
        if (ret < 0)
                return ret;
 
index e7e1ddc..750b087 100644 (file)
@@ -786,13 +786,13 @@ static const char *const nct6798_temp_label[] = {
        "Agent1 Dimm1",
        "BYTE_TEMP0",
        "BYTE_TEMP1",
-       "",
-       "",
+       "PECI Agent 0 Calibration",     /* undocumented */
+       "PECI Agent 1 Calibration",     /* undocumented */
        "",
        "Virtual_TEMP"
 };
 
-#define NCT6798_TEMP_MASK      0x8fff0ffe
+#define NCT6798_TEMP_MASK      0xbfff0ffe
 #define NCT6798_VIRT_TEMP_MASK 0x80000c00
 
 /* NCT6102D/NCT6106D specific data */
index a337195..ea516ce 100644 (file)
@@ -71,7 +71,7 @@ config SENSORS_IR35221
          Infineon IR35221 controller.
 
          This driver can also be built as a module. If so, the module will
-         be called ir35521.
+         be called ir35221.
 
 config SENSORS_IR38064
        tristate "Infineon IR38064"
index e25f541..1931757 100644 (file)
@@ -465,6 +465,7 @@ MODULE_DEVICE_TABLE(i2c, adm1275_id);
 static int adm1275_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {
+       s32 (*config_read_fn)(const struct i2c_client *client, u8 reg);
        u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1];
        int config, device_config;
        int ret;
@@ -510,11 +511,16 @@ static int adm1275_probe(struct i2c_client *client,
                           "Device mismatch: Configured %s, detected %s\n",
                           id->name, mid->name);
 
-       config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG);
+       if (mid->driver_data == adm1272 || mid->driver_data == adm1278 ||
+           mid->driver_data == adm1293 || mid->driver_data == adm1294)
+               config_read_fn = i2c_smbus_read_word_data;
+       else
+               config_read_fn = i2c_smbus_read_byte_data;
+       config = config_read_fn(client, ADM1275_PMON_CONFIG);
        if (config < 0)
                return config;
 
-       device_config = i2c_smbus_read_byte_data(client, ADM1275_DEVICE_CONFIG);
+       device_config = config_read_fn(client, ADM1275_DEVICE_CONFIG);
        if (device_config < 0)
                return device_config;
 
index a420877..2191575 100644 (file)
@@ -1869,7 +1869,7 @@ static int pmbus_add_fan_ctrl(struct i2c_client *client,
        struct pmbus_sensor *sensor;
 
        sensor = pmbus_add_sensor(data, "fan", "target", index, page,
-                                 PMBUS_VIRT_FAN_TARGET_1 + id, 0xff, PSC_FAN,
+                                 0xff, PMBUS_VIRT_FAN_TARGET_1 + id, PSC_FAN,
                                  false, false, true);
 
        if (!sensor)
@@ -1880,14 +1880,14 @@ static int pmbus_add_fan_ctrl(struct i2c_client *client,
                return 0;
 
        sensor = pmbus_add_sensor(data, "pwm", NULL, index, page,
-                                 PMBUS_VIRT_PWM_1 + id, 0xff, PSC_PWM,
+                                 0xff, PMBUS_VIRT_PWM_1 + id, PSC_PWM,
                                  false, false, true);
 
        if (!sensor)
                return -ENOMEM;
 
        sensor = pmbus_add_sensor(data, "pwm", "enable", index, page,
-                                 PMBUS_VIRT_PWM_ENABLE_1 + id, 0xff, PSC_PWM,
+                                 0xff, PMBUS_VIRT_PWM_ENABLE_1 + id, PSC_PWM,
                                  true, false, false);
 
        if (!sensor)
@@ -1929,7 +1929,7 @@ static int pmbus_add_fan_attributes(struct i2c_client *client,
                                continue;
 
                        if (pmbus_add_sensor(data, "fan", "input", index,
-                                            page, pmbus_fan_registers[f], 0xff,
+                                            page, 0xff, pmbus_fan_registers[f],
                                             PSC_FAN, true, true, true) == NULL)
                                return -ENOMEM;
 
index 286d3cf..d421e69 100644 (file)
@@ -147,7 +147,7 @@ static enum hwmon_sensor_types scmi_types[] = {
        [ENERGY] = hwmon_energy,
 };
 
-static u32 hwmon_attributes[] = {
+static u32 hwmon_attributes[hwmon_max] = {
        [hwmon_chip] = HWMON_C_REGISTER_TZ,
        [hwmon_temp] = HWMON_T_INPUT | HWMON_T_LABEL,
        [hwmon_in] = HWMON_I_INPUT | HWMON_I_LABEL,
index 40387d5..3ccc703 100644 (file)
@@ -747,17 +747,50 @@ static int cti_dying_cpu(unsigned int cpu)
        return 0;
 }
 
+static int cti_pm_setup(struct cti_drvdata *drvdata)
+{
+       int ret;
+
+       if (drvdata->ctidev.cpu == -1)
+               return 0;
+
+       if (nr_cti_cpu)
+               goto done;
+
+       cpus_read_lock();
+       ret = cpuhp_setup_state_nocalls_cpuslocked(
+                       CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
+                       "arm/coresight_cti:starting",
+                       cti_starting_cpu, cti_dying_cpu);
+       if (ret) {
+               cpus_read_unlock();
+               return ret;
+       }
+
+       ret = cpu_pm_register_notifier(&cti_cpu_pm_nb);
+       cpus_read_unlock();
+       if (ret) {
+               cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_CTI_STARTING);
+               return ret;
+       }
+
+done:
+       nr_cti_cpu++;
+       cti_cpu_drvdata[drvdata->ctidev.cpu] = drvdata;
+
+       return 0;
+}
+
 /* release PM registrations */
 static void cti_pm_release(struct cti_drvdata *drvdata)
 {
-       if (drvdata->ctidev.cpu >= 0) {
-               if (--nr_cti_cpu == 0) {
-                       cpu_pm_unregister_notifier(&cti_cpu_pm_nb);
+       if (drvdata->ctidev.cpu == -1)
+               return;
 
-                       cpuhp_remove_state_nocalls(
-                               CPUHP_AP_ARM_CORESIGHT_CTI_STARTING);
-               }
-               cti_cpu_drvdata[drvdata->ctidev.cpu] = NULL;
+       cti_cpu_drvdata[drvdata->ctidev.cpu] = NULL;
+       if (--nr_cti_cpu == 0) {
+               cpu_pm_unregister_notifier(&cti_cpu_pm_nb);
+               cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_CTI_STARTING);
        }
 }
 
@@ -823,19 +856,14 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
 
        /* driver data*/
        drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
-       if (!drvdata) {
-               ret = -ENOMEM;
-               dev_info(dev, "%s, mem err\n", __func__);
-               goto err_out;
-       }
+       if (!drvdata)
+               return -ENOMEM;
 
        /* Validity for the resource is already checked by the AMBA core */
        base = devm_ioremap_resource(dev, res);
-       if (IS_ERR(base)) {
-               ret = PTR_ERR(base);
-               dev_err(dev, "%s, remap err\n", __func__);
-               goto err_out;
-       }
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
        drvdata->base = base;
 
        dev_set_drvdata(dev, drvdata);
@@ -854,8 +882,7 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
        pdata = coresight_cti_get_platform_data(dev);
        if (IS_ERR(pdata)) {
                dev_err(dev, "coresight_cti_get_platform_data err\n");
-               ret =  PTR_ERR(pdata);
-               goto err_out;
+               return  PTR_ERR(pdata);
        }
 
        /* default to powered - could change on PM notifications */
@@ -867,35 +894,20 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
                                               drvdata->ctidev.cpu);
        else
                cti_desc.name = coresight_alloc_device_name(&cti_sys_devs, dev);
-       if (!cti_desc.name) {
-               ret = -ENOMEM;
-               goto err_out;
-       }
+       if (!cti_desc.name)
+               return -ENOMEM;
 
        /* setup CPU power management handling for CPU bound CTI devices. */
-       if (drvdata->ctidev.cpu >= 0) {
-               cti_cpu_drvdata[drvdata->ctidev.cpu] = drvdata;
-               if (!nr_cti_cpu++) {
-                       cpus_read_lock();
-                       ret = cpuhp_setup_state_nocalls_cpuslocked(
-                               CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
-                               "arm/coresight_cti:starting",
-                               cti_starting_cpu, cti_dying_cpu);
-
-                       if (!ret)
-                               ret = cpu_pm_register_notifier(&cti_cpu_pm_nb);
-                       cpus_read_unlock();
-                       if (ret)
-                               goto err_out;
-               }
-       }
+       ret = cti_pm_setup(drvdata);
+       if (ret)
+               return ret;
 
        /* create dynamic attributes for connections */
        ret = cti_create_cons_sysfs(dev, drvdata);
        if (ret) {
                dev_err(dev, "%s: create dynamic sysfs entries failed\n",
                        cti_desc.name);
-               goto err_out;
+               goto pm_release;
        }
 
        /* set up coresight component description */
@@ -908,7 +920,7 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
        drvdata->csdev = coresight_register(&cti_desc);
        if (IS_ERR(drvdata->csdev)) {
                ret = PTR_ERR(drvdata->csdev);
-               goto err_out;
+               goto pm_release;
        }
 
        /* add to list of CTI devices */
@@ -927,7 +939,7 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
        dev_info(&drvdata->csdev->dev, "CTI initialized\n");
        return 0;
 
-err_out:
+pm_release:
        cti_pm_release(drvdata);
        return ret;
 }
index 747afc8..0c35cd5 100644 (file)
@@ -1388,18 +1388,57 @@ static struct notifier_block etm4_cpu_pm_nb = {
        .notifier_call = etm4_cpu_pm_notify,
 };
 
-static int etm4_cpu_pm_register(void)
+/* Setup PM. Called with cpus locked. Deals with error conditions and counts */
+static int etm4_pm_setup_cpuslocked(void)
 {
-       if (IS_ENABLED(CONFIG_CPU_PM))
-               return cpu_pm_register_notifier(&etm4_cpu_pm_nb);
+       int ret;
 
-       return 0;
+       if (etm4_count++)
+               return 0;
+
+       ret = cpu_pm_register_notifier(&etm4_cpu_pm_nb);
+       if (ret)
+               goto reduce_count;
+
+       ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING,
+                                                  "arm/coresight4:starting",
+                                                  etm4_starting_cpu, etm4_dying_cpu);
+
+       if (ret)
+               goto unregister_notifier;
+
+       ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
+                                                  "arm/coresight4:online",
+                                                  etm4_online_cpu, NULL);
+
+       /* HP dyn state ID returned in ret on success */
+       if (ret > 0) {
+               hp_online = ret;
+               return 0;
+       }
+
+       /* failed dyn state - remove others */
+       cpuhp_remove_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING);
+
+unregister_notifier:
+       cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
+
+reduce_count:
+       --etm4_count;
+       return ret;
 }
 
-static void etm4_cpu_pm_unregister(void)
+static void etm4_pm_clear(void)
 {
-       if (IS_ENABLED(CONFIG_CPU_PM))
-               cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
+       if (--etm4_count != 0)
+               return;
+
+       cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
+       cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
+       if (hp_online) {
+               cpuhp_remove_state_nocalls(hp_online);
+               hp_online = 0;
+       }
 }
 
 static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
@@ -1453,24 +1492,15 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
                                etm4_init_arch_data,  drvdata, 1))
                dev_err(dev, "ETM arch init failed\n");
 
-       if (!etm4_count++) {
-               cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING,
-                                                    "arm/coresight4:starting",
-                                                    etm4_starting_cpu, etm4_dying_cpu);
-               ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
-                                                          "arm/coresight4:online",
-                                                          etm4_online_cpu, NULL);
-               if (ret < 0)
-                       goto err_arch_supported;
-               hp_online = ret;
+       ret = etm4_pm_setup_cpuslocked();
+       cpus_read_unlock();
 
-               ret = etm4_cpu_pm_register();
-               if (ret)
-                       goto err_arch_supported;
+       /* etm4_pm_setup_cpuslocked() does its own cleanup - exit on error */
+       if (ret) {
+               etmdrvdata[drvdata->cpu] = NULL;
+               return ret;
        }
 
-       cpus_read_unlock();
-
        if (etm4_arch_supported(drvdata->arch) == false) {
                ret = -EINVAL;
                goto err_arch_supported;
@@ -1517,13 +1547,7 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
 
 err_arch_supported:
        etmdrvdata[drvdata->cpu] = NULL;
-       if (--etm4_count == 0) {
-               etm4_cpu_pm_unregister();
-
-               cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
-               if (hp_online)
-                       cpuhp_remove_state_nocalls(hp_online);
-       }
+       etm4_pm_clear();
        return ret;
 }
 
index ca232ec..c9ac3dc 100644 (file)
@@ -1021,15 +1021,30 @@ int intel_th_set_output(struct intel_th_device *thdev,
 {
        struct intel_th_device *hub = to_intel_th_hub(thdev);
        struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver);
+       int ret;
 
        /* In host mode, this is up to the external debugger, do nothing. */
        if (hub->host_mode)
                return 0;
 
-       if (!hubdrv->set_output)
-               return -ENOTSUPP;
+       /*
+        * hub is instantiated together with the source device that
+        * calls here, so guaranteed to be present.
+        */
+       hubdrv = to_intel_th_driver(hub->dev.driver);
+       if (!hubdrv || !try_module_get(hubdrv->driver.owner))
+               return -EINVAL;
+
+       if (!hubdrv->set_output) {
+               ret = -ENOTSUPP;
+               goto out;
+       }
+
+       ret = hubdrv->set_output(hub, master);
 
-       return hubdrv->set_output(hub, master);
+out:
+       module_put(hubdrv->driver.owner);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(intel_th_set_output);
 
index 7ccac74..21fdf0b 100644 (file)
@@ -234,11 +234,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
                .driver_data = (kernel_ulong_t)&intel_th_2x,
        },
        {
+               /* Tiger Lake PCH-H */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x43a6),
+               .driver_data = (kernel_ulong_t)&intel_th_2x,
+       },
+       {
                /* Jasper Lake PCH */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6),
                .driver_data = (kernel_ulong_t)&intel_th_2x,
        },
        {
+               /* Jasper Lake CPU */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4e29),
+               .driver_data = (kernel_ulong_t)&intel_th_2x,
+       },
+       {
                /* Elkhart Lake CPU */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4529),
                .driver_data = (kernel_ulong_t)&intel_th_2x,
@@ -248,6 +258,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26),
                .driver_data = (kernel_ulong_t)&intel_th_2x,
        },
+       {
+               /* Emmitsburg PCH */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1bcc),
+               .driver_data = (kernel_ulong_t)&intel_th_2x,
+       },
        { 0 },
 };
 
index 3a1f4e6..a1529f5 100644 (file)
@@ -161,9 +161,7 @@ static int sth_stm_link(struct stm_data *stm_data, unsigned int master,
 {
        struct sth_device *sth = container_of(stm_data, struct sth_device, stm);
 
-       intel_th_set_output(to_intel_th_device(sth->dev), master);
-
-       return 0;
+       return intel_th_set_output(to_intel_th_device(sth->dev), master);
 }
 
 static int intel_th_sw_init(struct sth_device *sth)
index ef39c83..bae1dc0 100644 (file)
@@ -113,11 +113,18 @@ config I2C_STUB
 
 config I2C_SLAVE
        bool "I2C slave support"
+       help
+         This enables Linux to act as an I2C slave device. Note that your I2C
+         bus master driver also needs to support this functionality. Please
+         read Documentation/i2c/slave-interface.rst for further details.
 
 if I2C_SLAVE
 
 config I2C_SLAVE_EEPROM
        tristate "I2C eeprom slave driver"
+       help
+         This backend makes Linux behave like an I2C EEPROM. Please read
+         Documentation/i2c/slave-eeprom-backend.rst for further details.
 
 endif
 
index 7f10312..3889787 100644 (file)
@@ -314,7 +314,8 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
                        DEB2("BUS ERROR - SDA Stuck low\n");
                        pca_reset(adap);
                        goto out;
-               case 0x90: /* Bus error - SCL stuck low */
+               case 0x78: /* Bus error - SCL stuck low (PCA9665) */
+               case 0x90: /* Bus error - SCL stuck low (PCA9564) */
                        DEB2("BUS ERROR - SCL Stuck low\n");
                        pca_reset(adap);
                        goto out;
index e3a8640..3c19aad 100644 (file)
@@ -286,10 +286,8 @@ int i2c_dw_acpi_configure(struct device *device)
 }
 EXPORT_SYMBOL_GPL(i2c_dw_acpi_configure);
 
-void i2c_dw_acpi_adjust_bus_speed(struct device *device)
+static u32 i2c_dw_acpi_round_bus_speed(struct device *device)
 {
-       struct dw_i2c_dev *dev = dev_get_drvdata(device);
-       struct i2c_timings *t = &dev->timings;
        u32 acpi_speed;
        int i;
 
@@ -300,9 +298,22 @@ void i2c_dw_acpi_adjust_bus_speed(struct device *device)
         */
        for (i = 0; i < ARRAY_SIZE(supported_speeds); i++) {
                if (acpi_speed >= supported_speeds[i])
-                       break;
+                       return supported_speeds[i];
        }
-       acpi_speed = i < ARRAY_SIZE(supported_speeds) ? supported_speeds[i] : 0;
+
+       return 0;
+}
+
+#else  /* CONFIG_ACPI */
+
+static inline u32 i2c_dw_acpi_round_bus_speed(struct device *device) { return 0; }
+
+#endif /* CONFIG_ACPI */
+
+void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev)
+{
+       u32 acpi_speed = i2c_dw_acpi_round_bus_speed(dev->dev);
+       struct i2c_timings *t = &dev->timings;
 
        /*
         * Find bus speed from the "clock-frequency" device property, ACPI
@@ -315,9 +326,7 @@ void i2c_dw_acpi_adjust_bus_speed(struct device *device)
        else
                t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
 }
-EXPORT_SYMBOL_GPL(i2c_dw_acpi_adjust_bus_speed);
-
-#endif /* CONFIG_ACPI */
+EXPORT_SYMBOL_GPL(i2c_dw_adjust_bus_speed);
 
 u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
 {
index 556673a..eb5ef4d 100644 (file)
@@ -361,11 +361,10 @@ static inline int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev) { return 0;
 #endif
 
 int i2c_dw_validate_speed(struct dw_i2c_dev *dev);
+void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev);
 
 #if IS_ENABLED(CONFIG_ACPI)
 int i2c_dw_acpi_configure(struct device *device);
-void i2c_dw_acpi_adjust_bus_speed(struct device *device);
 #else
 static inline int i2c_dw_acpi_configure(struct device *device) { return -ENODEV; }
-static inline void i2c_dw_acpi_adjust_bus_speed(struct device *device) {}
 #endif
index 947c096..8522134 100644 (file)
@@ -240,7 +240,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
                }
        }
 
-       i2c_dw_acpi_adjust_bus_speed(&pdev->dev);
+       i2c_dw_adjust_bus_speed(dev);
 
        if (has_acpi_companion(&pdev->dev))
                i2c_dw_acpi_configure(&pdev->dev);
index 0de4e30..a71bc58 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/clk-provider.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
+#include <linux/dmi.h>
 #include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/i2c.h>
@@ -191,6 +192,17 @@ static int dw_i2c_plat_request_regs(struct dw_i2c_dev *dev)
        return ret;
 }
 
+static const struct dmi_system_id dw_i2c_hwmon_class_dmi[] = {
+       {
+               .ident = "Qtechnology QT5222",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Qtechnology"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "QT5222"),
+               },
+       },
+       { } /* terminate list */
+};
+
 static int dw_i2c_plat_probe(struct platform_device *pdev)
 {
        struct dw_i2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -228,7 +240,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
        else
                i2c_parse_fw_timings(&pdev->dev, t, false);
 
-       i2c_dw_acpi_adjust_bus_speed(&pdev->dev);
+       i2c_dw_adjust_bus_speed(dev);
 
        if (pdev->dev.of_node)
                dw_i2c_of_configure(pdev);
@@ -267,7 +279,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
 
        adap = &dev->adapter;
        adap->owner = THIS_MODULE;
-       adap->class = I2C_CLASS_DEPRECATED;
+       adap->class = dmi_check_system(dw_i2c_hwmon_class_dmi) ?
+                                       I2C_CLASS_HWMON : I2C_CLASS_DEPRECATED;
        ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
        adap->dev.of_node = pdev->dev.of_node;
        adap->nr = -1;
index bb810de..73f1396 100644 (file)
@@ -180,6 +180,7 @@ static const struct pci_device_id pch_pcidev_id[] = {
        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_I2C), 1, },
        {0,}
 };
+MODULE_DEVICE_TABLE(pci, pch_pcidev_id);
 
 static irqreturn_t pch_i2c_handler(int irq, void *pData);
 
index e0c2569..977d6f5 100644 (file)
@@ -98,7 +98,7 @@
 #define I2C_STAT_DAT_REQ       BIT(25)
 #define I2C_STAT_CMD_COMP      BIT(24)
 #define I2C_STAT_STOP_ERR      BIT(23)
-#define I2C_STAT_MAX_PORT      GENMASK(19, 16)
+#define I2C_STAT_MAX_PORT      GENMASK(22, 16)
 #define I2C_STAT_ANY_INT       BIT(15)
 #define I2C_STAT_SCL_IN                BIT(11)
 #define I2C_STAT_SDA_IN                BIT(10)
index 2fd717d..71d7bae 100644 (file)
@@ -337,9 +337,9 @@ static int mlxcpld_i2c_wait_for_tc(struct mlxcpld_i2c_priv *priv)
                if (priv->smbus_block && (val & MLXCPLD_I2C_SMBUS_BLK_BIT)) {
                        mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_NUM_DAT_REG,
                                              &datalen, 1);
-                       if (unlikely(datalen > (I2C_SMBUS_BLOCK_MAX + 1))) {
+                       if (unlikely(datalen > I2C_SMBUS_BLOCK_MAX)) {
                                dev_err(priv->dev, "Incorrect smbus block read message len\n");
-                               return -E2BIG;
+                               return -EPROTO;
                        }
                } else {
                        datalen = priv->xfer.data_len;
index 56bb840..f5c9787 100644 (file)
@@ -495,6 +495,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
                        break;
                case I2C_SMBUS_BLOCK_DATA:
                case I2C_SMBUS_BLOCK_PROC_CALL:
+                       if (msg[1].buf[0] > I2C_SMBUS_BLOCK_MAX) {
+                               dev_err(&adapter->dev,
+                                       "Invalid block size returned: %d\n",
+                                       msg[1].buf[0]);
+                               status = -EPROTO;
+                               goto cleanup;
+                       }
                        for (i = 0; i < msg[1].buf[0] + 1; i++)
                                data->block[i] = msg[1].buf[i];
                        break;
index 00e100f..813bca7 100644 (file)
@@ -1685,10 +1685,13 @@ static int mma8452_probe(struct i2c_client *client,
 
        ret = mma8452_set_freefall_mode(data, false);
        if (ret < 0)
-               goto buffer_cleanup;
+               goto unregister_device;
 
        return 0;
 
+unregister_device:
+       iio_device_unregister(indio_dev);
+
 buffer_cleanup:
        iio_triggered_buffer_cleanup(indio_dev);
 
index f47606e..b33fe6c 100644 (file)
@@ -329,7 +329,7 @@ static int ad7780_probe(struct spi_device *spi)
 
        ret = ad7780_init_gpios(&spi->dev, st);
        if (ret)
-               goto error_cleanup_buffer_and_trigger;
+               return ret;
 
        st->reg = devm_regulator_get(&spi->dev, "avdd");
        if (IS_ERR(st->reg))
index c24c8da..7af8f05 100644 (file)
@@ -332,12 +332,12 @@ static struct adi_axi_adc_client *adi_axi_adc_attach_client(struct device *dev)
                if (cl->dev->of_node != cln)
                        continue;
 
-               if (!try_module_get(dev->driver->owner)) {
+               if (!try_module_get(cl->dev->driver->owner)) {
                        mutex_unlock(&registered_clients_lock);
                        return ERR_PTR(-ENODEV);
                }
 
-               get_device(dev);
+               get_device(cl->dev);
                cl->info = info;
                mutex_unlock(&registered_clients_lock);
                return cl;
index e9f87e4..a350762 100644 (file)
@@ -65,6 +65,7 @@ static const struct reg_field afe4403_reg_fields[] = {
  * @regulator: Pointer to the regulator for the IC
  * @trig: IIO trigger for this device
  * @irq: ADC_RDY line interrupt number
+ * @buffer: Used to construct data layout to push into IIO buffer.
  */
 struct afe4403_data {
        struct device *dev;
@@ -74,6 +75,8 @@ struct afe4403_data {
        struct regulator *regulator;
        struct iio_trigger *trig;
        int irq;
+       /* Ensure suitable alignment for timestamp */
+       s32 buffer[8] __aligned(8);
 };
 
 enum afe4403_chan_id {
@@ -309,7 +312,6 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private)
        struct iio_dev *indio_dev = pf->indio_dev;
        struct afe4403_data *afe = iio_priv(indio_dev);
        int ret, bit, i = 0;
-       s32 buffer[8];
        u8 tx[4] = {AFE440X_CONTROL0, 0x0, 0x0, AFE440X_CONTROL0_READ};
        u8 rx[3];
 
@@ -326,7 +328,7 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private)
                if (ret)
                        goto err;
 
-               buffer[i++] = get_unaligned_be24(&rx[0]);
+               afe->buffer[i++] = get_unaligned_be24(&rx[0]);
        }
 
        /* Disable reading from the device */
@@ -335,7 +337,8 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private)
        if (ret)
                goto err;
 
-       iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp);
+       iio_push_to_buffers_with_timestamp(indio_dev, afe->buffer,
+                                          pf->timestamp);
 err:
        iio_trigger_notify_done(indio_dev->trig);
 
index e728bbb..cebb1fd 100644 (file)
@@ -83,6 +83,7 @@ static const struct reg_field afe4404_reg_fields[] = {
  * @regulator: Pointer to the regulator for the IC
  * @trig: IIO trigger for this device
  * @irq: ADC_RDY line interrupt number
+ * @buffer: Used to construct a scan to push to the iio buffer.
  */
 struct afe4404_data {
        struct device *dev;
@@ -91,6 +92,7 @@ struct afe4404_data {
        struct regulator *regulator;
        struct iio_trigger *trig;
        int irq;
+       s32 buffer[10] __aligned(8);
 };
 
 enum afe4404_chan_id {
@@ -328,17 +330,17 @@ static irqreturn_t afe4404_trigger_handler(int irq, void *private)
        struct iio_dev *indio_dev = pf->indio_dev;
        struct afe4404_data *afe = iio_priv(indio_dev);
        int ret, bit, i = 0;
-       s32 buffer[10];
 
        for_each_set_bit(bit, indio_dev->active_scan_mask,
                         indio_dev->masklength) {
                ret = regmap_read(afe->regmap, afe4404_channel_values[bit],
-                                 &buffer[i++]);
+                                 &afe->buffer[i++]);
                if (ret)
                        goto err;
        }
 
-       iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp);
+       iio_push_to_buffers_with_timestamp(indio_dev, afe->buffer,
+                                          pf->timestamp);
 err:
        iio_trigger_notify_done(indio_dev->trig);
 
index 7ecd2ff..665eb7e 100644 (file)
@@ -38,6 +38,11 @@ struct hdc100x_data {
 
        /* integration time of the sensor */
        int adc_int_us[2];
+       /* Ensure natural alignment of timestamp */
+       struct {
+               __be16 channels[2];
+               s64 ts __aligned(8);
+       } scan;
 };
 
 /* integration time in us */
@@ -322,7 +327,6 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p)
        struct i2c_client *client = data->client;
        int delay = data->adc_int_us[0] + data->adc_int_us[1];
        int ret;
-       s16 buf[8];  /* 2x s16 + padding + 8 byte timestamp */
 
        /* dual read starts at temp register */
        mutex_lock(&data->lock);
@@ -333,13 +337,13 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p)
        }
        usleep_range(delay, delay + 1000);
 
-       ret = i2c_master_recv(client, (u8 *)buf, 4);
+       ret = i2c_master_recv(client, (u8 *)data->scan.channels, 4);
        if (ret < 0) {
                dev_err(&client->dev, "cannot read sensor data\n");
                goto err;
        }
 
-       iio_push_to_buffers_with_timestamp(indio_dev, buf,
+       iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
                                           iio_get_time_ns(indio_dev));
 err:
        mutex_unlock(&data->lock);
index 7d6771f..b2eb5ab 100644 (file)
@@ -14,8 +14,6 @@
 
 #include <linux/iio/iio.h>
 
-#define HTS221_DATA_SIZE       2
-
 enum hts221_sensor_type {
        HTS221_SENSOR_H,
        HTS221_SENSOR_T,
@@ -39,6 +37,11 @@ struct hts221_hw {
 
        bool enabled;
        u8 odr;
+       /* Ensure natural alignment of timestamp */
+       struct {
+               __le16 channels[2];
+               s64 ts __aligned(8);
+       } scan;
 };
 
 extern const struct dev_pm_ops hts221_pm_ops;
index 9fb3f33..ba7d413 100644 (file)
@@ -160,7 +160,6 @@ static const struct iio_buffer_setup_ops hts221_buffer_ops = {
 
 static irqreturn_t hts221_buffer_handler_thread(int irq, void *p)
 {
-       u8 buffer[ALIGN(2 * HTS221_DATA_SIZE, sizeof(s64)) + sizeof(s64)];
        struct iio_poll_func *pf = p;
        struct iio_dev *iio_dev = pf->indio_dev;
        struct hts221_hw *hw = iio_priv(iio_dev);
@@ -170,18 +169,20 @@ static irqreturn_t hts221_buffer_handler_thread(int irq, void *p)
        /* humidity data */
        ch = &iio_dev->channels[HTS221_SENSOR_H];
        err = regmap_bulk_read(hw->regmap, ch->address,
-                              buffer, HTS221_DATA_SIZE);
+                              &hw->scan.channels[0],
+                              sizeof(hw->scan.channels[0]));
        if (err < 0)
                goto out;
 
        /* temperature data */
        ch = &iio_dev->channels[HTS221_SENSOR_T];
        err = regmap_bulk_read(hw->regmap, ch->address,
-                              buffer + HTS221_DATA_SIZE, HTS221_DATA_SIZE);
+                              &hw->scan.channels[1],
+                              sizeof(hw->scan.channels[1]));
        if (err < 0)
                goto out;
 
-       iio_push_to_buffers_with_timestamp(iio_dev, buffer,
+       iio_push_to_buffers_with_timestamp(iio_dev, &hw->scan,
                                           iio_get_time_ns(iio_dev));
 
 out:
index 1527f01..3525333 100644 (file)
@@ -130,6 +130,8 @@ static const char * const iio_modifier_names[] = {
        [IIO_MOD_PM2P5] = "pm2p5",
        [IIO_MOD_PM4] = "pm4",
        [IIO_MOD_PM10] = "pm10",
+       [IIO_MOD_ETHANOL] = "ethanol",
+       [IIO_MOD_H2] = "h2",
 };
 
 /* relies on pairs of these shared then separate */
index 810fdfd..91c3935 100644 (file)
@@ -192,6 +192,11 @@ struct ak8974 {
        bool drdy_irq;
        struct completion drdy_complete;
        bool drdy_active_low;
+       /* Ensure timestamp is naturally aligned */
+       struct {
+               __le16 channels[3];
+               s64 ts __aligned(8);
+       } scan;
 };
 
 static const char ak8974_reg_avdd[] = "avdd";
@@ -657,7 +662,6 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev)
 {
        struct ak8974 *ak8974 = iio_priv(indio_dev);
        int ret;
-       __le16 hw_values[8]; /* Three axes + 64bit padding */
 
        pm_runtime_get_sync(&ak8974->i2c->dev);
        mutex_lock(&ak8974->lock);
@@ -667,13 +671,13 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev)
                dev_err(&ak8974->i2c->dev, "error triggering measure\n");
                goto out_unlock;
        }
-       ret = ak8974_getresult(ak8974, hw_values);
+       ret = ak8974_getresult(ak8974, ak8974->scan.channels);
        if (ret) {
                dev_err(&ak8974->i2c->dev, "error getting measures\n");
                goto out_unlock;
        }
 
-       iio_push_to_buffers_with_timestamp(indio_dev, hw_values,
+       iio_push_to_buffers_with_timestamp(indio_dev, &ak8974->scan,
                                           iio_get_time_ns(indio_dev));
 
  out_unlock:
@@ -862,19 +866,21 @@ static int ak8974_probe(struct i2c_client *i2c,
        ak8974->map = devm_regmap_init_i2c(i2c, &ak8974_regmap_config);
        if (IS_ERR(ak8974->map)) {
                dev_err(&i2c->dev, "failed to allocate register map\n");
+               pm_runtime_put_noidle(&i2c->dev);
+               pm_runtime_disable(&i2c->dev);
                return PTR_ERR(ak8974->map);
        }
 
        ret = ak8974_set_power(ak8974, AK8974_PWR_ON);
        if (ret) {
                dev_err(&i2c->dev, "could not power on\n");
-               goto power_off;
+               goto disable_pm;
        }
 
        ret = ak8974_detect(ak8974);
        if (ret) {
                dev_err(&i2c->dev, "neither AK8974 nor AMI30x found\n");
-               goto power_off;
+               goto disable_pm;
        }
 
        ret = ak8974_selftest(ak8974);
@@ -884,14 +890,9 @@ static int ak8974_probe(struct i2c_client *i2c,
        ret = ak8974_reset(ak8974);
        if (ret) {
                dev_err(&i2c->dev, "AK8974 reset failed\n");
-               goto power_off;
+               goto disable_pm;
        }
 
-       pm_runtime_set_autosuspend_delay(&i2c->dev,
-                                        AK8974_AUTOSUSPEND_DELAY);
-       pm_runtime_use_autosuspend(&i2c->dev);
-       pm_runtime_put(&i2c->dev);
-
        indio_dev->dev.parent = &i2c->dev;
        switch (ak8974->variant) {
        case AK8974_WHOAMI_VALUE_AMI306:
@@ -957,6 +958,11 @@ no_irq:
                goto cleanup_buffer;
        }
 
+       pm_runtime_set_autosuspend_delay(&i2c->dev,
+                                        AK8974_AUTOSUSPEND_DELAY);
+       pm_runtime_use_autosuspend(&i2c->dev);
+       pm_runtime_put(&i2c->dev);
+
        return 0;
 
 cleanup_buffer:
@@ -965,7 +971,6 @@ disable_pm:
        pm_runtime_put_noidle(&i2c->dev);
        pm_runtime_disable(&i2c->dev);
        ak8974_set_power(ak8974, AK8974_PWR_OFF);
-power_off:
        regulator_bulk_disable(ARRAY_SIZE(ak8974->regs), ak8974->regs);
 
        return ret;
index 2f598ad..f5db9fa 100644 (file)
@@ -212,16 +212,21 @@ static irqreturn_t ms5611_trigger_handler(int irq, void *p)
        struct iio_poll_func *pf = p;
        struct iio_dev *indio_dev = pf->indio_dev;
        struct ms5611_state *st = iio_priv(indio_dev);
-       s32 buf[4]; /* s32 (pressure) + s32 (temp) + 2 * s32 (timestamp) */
+       /* Ensure buffer elements are naturally aligned */
+       struct {
+               s32 channels[2];
+               s64 ts __aligned(8);
+       } scan;
        int ret;
 
        mutex_lock(&st->lock);
-       ret = ms5611_read_temp_and_pressure(indio_dev, &buf[1], &buf[0]);
+       ret = ms5611_read_temp_and_pressure(indio_dev, &scan.channels[1],
+                                           &scan.channels[0]);
        mutex_unlock(&st->lock);
        if (ret < 0)
                goto err;
 
-       iio_push_to_buffers_with_timestamp(indio_dev, buf,
+       iio_push_to_buffers_with_timestamp(indio_dev, &scan,
                                           iio_get_time_ns(indio_dev));
 
 err:
index 37fe851..799a8dc 100644 (file)
@@ -665,8 +665,10 @@ static int zpa2326_resume(const struct iio_dev *indio_dev)
        int err;
 
        err = pm_runtime_get_sync(indio_dev->dev.parent);
-       if (err < 0)
+       if (err < 0) {
+               pm_runtime_put(indio_dev->dev.parent);
                return err;
+       }
 
        if (err > 0) {
                /*
index 9ce787e..0d13772 100644 (file)
@@ -918,6 +918,7 @@ static void cm_free_work(struct cm_work *work)
 
 static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,
                                 struct cm_work *work)
+       __releases(&cm_id_priv->lock)
 {
        bool immediate;
 
index 3d7cc9f..c30cf53 100644 (file)
@@ -1624,6 +1624,8 @@ static struct rdma_id_private *cma_find_listener(
 {
        struct rdma_id_private *id_priv, *id_priv_dev;
 
+       lockdep_assert_held(&lock);
+
        if (!bind_list)
                return ERR_PTR(-EINVAL);
 
@@ -1670,6 +1672,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
                }
        }
 
+       mutex_lock(&lock);
        /*
         * Net namespace might be getting deleted while route lookup,
         * cm_id lookup is in progress. Therefore, perform netdevice
@@ -1711,6 +1714,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
        id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev);
 err:
        rcu_read_unlock();
+       mutex_unlock(&lock);
        if (IS_ERR(id_priv) && *net_dev) {
                dev_put(*net_dev);
                *net_dev = NULL;
@@ -2492,6 +2496,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
        struct net *net = id_priv->id.route.addr.dev_addr.net;
        int ret;
 
+       lockdep_assert_held(&lock);
+
        if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
                return;
 
@@ -3342,6 +3348,8 @@ static void cma_bind_port(struct rdma_bind_list *bind_list,
        u64 sid, mask;
        __be16 port;
 
+       lockdep_assert_held(&lock);
+
        addr = cma_src_addr(id_priv);
        port = htons(bind_list->port);
 
@@ -3370,6 +3378,8 @@ static int cma_alloc_port(enum rdma_ucm_port_space ps,
        struct rdma_bind_list *bind_list;
        int ret;
 
+       lockdep_assert_held(&lock);
+
        bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
        if (!bind_list)
                return -ENOMEM;
@@ -3396,6 +3406,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
        struct sockaddr  *saddr = cma_src_addr(id_priv);
        __be16 dport = cma_port(daddr);
 
+       lockdep_assert_held(&lock);
+
        hlist_for_each_entry(cur_id, &bind_list->owners, node) {
                struct sockaddr  *cur_daddr = cma_dst_addr(cur_id);
                struct sockaddr  *cur_saddr = cma_src_addr(cur_id);
@@ -3435,6 +3447,8 @@ static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
        unsigned int rover;
        struct net *net = id_priv->id.route.addr.dev_addr.net;
 
+       lockdep_assert_held(&lock);
+
        inet_get_local_port_range(net, &low, &high);
        remaining = (high - low) + 1;
        rover = prandom_u32() % remaining + low;
@@ -3482,6 +3496,8 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
        struct rdma_id_private *cur_id;
        struct sockaddr *addr, *cur_addr;
 
+       lockdep_assert_held(&lock);
+
        addr = cma_src_addr(id_priv);
        hlist_for_each_entry(cur_id, &bind_list->owners, node) {
                if (id_priv == cur_id)
@@ -3512,6 +3528,8 @@ static int cma_use_port(enum rdma_ucm_port_space ps,
        unsigned short snum;
        int ret;
 
+       lockdep_assert_held(&lock);
+
        snum = ntohs(cma_port(cma_src_addr(id_priv)));
        if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
                return -EACCES;
index 2257d7f..738d1fa 100644 (file)
@@ -202,7 +202,7 @@ static int __rdma_counter_unbind_qp(struct ib_qp *qp)
        return ret;
 }
 
-static void counter_history_stat_update(const struct rdma_counter *counter)
+static void counter_history_stat_update(struct rdma_counter *counter)
 {
        struct ib_device *dev = counter->device;
        struct rdma_port_counter *port_counter;
@@ -212,6 +212,8 @@ static void counter_history_stat_update(const struct rdma_counter *counter)
        if (!port_counter->hstats)
                return;
 
+       rdma_counter_query_stats(counter);
+
        for (i = 0; i < counter->stats->num_counters; i++)
                port_counter->hstats->value[i] += counter->stats->value[i];
 }
index 186e0d6..a09f8e3 100644 (file)
@@ -509,10 +509,10 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
        xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
 
        flush_workqueue(port_priv->wq);
-       ib_cancel_rmpp_recvs(mad_agent_priv);
 
        deref_mad_agent(mad_agent_priv);
        wait_for_completion(&mad_agent_priv->comp);
+       ib_cancel_rmpp_recvs(mad_agent_priv);
 
        ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
 
@@ -2718,6 +2718,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
                                                 DMA_FROM_DEVICE);
                if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
                                                  sg_list.addr))) {
+                       kfree(mad_priv);
                        ret = -ENOMEM;
                        break;
                }
index 38de494..3027cd2 100644 (file)
@@ -470,40 +470,46 @@ static struct ib_uobject *
 alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
                       struct uverbs_attr_bundle *attrs)
 {
-       const struct uverbs_obj_fd_type *fd_type =
-               container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
+       const struct uverbs_obj_fd_type *fd_type;
        int new_fd;
-       struct ib_uobject *uobj;
+       struct ib_uobject *uobj, *ret;
        struct file *filp;
 
+       uobj = alloc_uobj(attrs, obj);
+       if (IS_ERR(uobj))
+               return uobj;
+
+       fd_type =
+               container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
        if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release &&
-                   fd_type->fops->release != &uverbs_async_event_release))
-               return ERR_PTR(-EINVAL);
+                   fd_type->fops->release != &uverbs_async_event_release)) {
+               ret = ERR_PTR(-EINVAL);
+               goto err_fd;
+       }
 
        new_fd = get_unused_fd_flags(O_CLOEXEC);
-       if (new_fd < 0)
-               return ERR_PTR(new_fd);
-
-       uobj = alloc_uobj(attrs, obj);
-       if (IS_ERR(uobj))
+       if (new_fd < 0) {
+               ret = ERR_PTR(new_fd);
                goto err_fd;
+       }
 
        /* Note that uverbs_uobject_fd_release() is called during abort */
        filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL,
                                  fd_type->flags);
        if (IS_ERR(filp)) {
-               uverbs_uobject_put(uobj);
-               uobj = ERR_CAST(filp);
-               goto err_fd;
+               ret = ERR_CAST(filp);
+               goto err_getfile;
        }
        uobj->object = filp;
 
        uobj->id = new_fd;
        return uobj;
 
-err_fd:
+err_getfile:
        put_unused_fd(new_fd);
-       return uobj;
+err_fd:
+       uverbs_uobject_put(uobj);
+       return ret;
 }
 
 struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
index a2ed09a..8c930bf 100644 (file)
@@ -829,13 +829,20 @@ static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
        return len;
 }
 
-static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
+static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
 {
        struct sk_buff *skb = NULL;
        struct nlmsghdr *nlh;
        void *data;
        struct ib_sa_mad *mad;
        int len;
+       unsigned long flags;
+       unsigned long delay;
+       gfp_t gfp_flag;
+       int ret;
+
+       INIT_LIST_HEAD(&query->list);
+       query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
 
        mad = query->mad_buf->mad;
        len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
@@ -860,36 +867,25 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
        /* Repair the nlmsg header length */
        nlmsg_end(skb, nlh);
 
-       return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask);
-}
+       gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC :
+               GFP_NOWAIT;
 
-static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
-{
-       unsigned long flags;
-       unsigned long delay;
-       int ret;
+       spin_lock_irqsave(&ib_nl_request_lock, flags);
+       ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag);
 
-       INIT_LIST_HEAD(&query->list);
-       query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
+       if (ret)
+               goto out;
 
-       /* Put the request on the list first.*/
-       spin_lock_irqsave(&ib_nl_request_lock, flags);
+       /* Put the request on the list.*/
        delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
        query->timeout = delay + jiffies;
        list_add_tail(&query->list, &ib_nl_request_list);
        /* Start the timeout if this is the only request */
        if (ib_nl_request_list.next == &query->list)
                queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
-       spin_unlock_irqrestore(&ib_nl_request_lock, flags);
 
-       ret = ib_nl_send_msg(query, gfp_mask);
-       if (ret) {
-               ret = -EIO;
-               /* Remove the request */
-               spin_lock_irqsave(&ib_nl_request_lock, flags);
-               list_del(&query->list);
-               spin_unlock_irqrestore(&ib_nl_request_lock, flags);
-       }
+out:
+       spin_unlock_irqrestore(&ib_nl_request_lock, flags);
 
        return ret;
 }
index 08313f7..7dd0824 100644 (file)
@@ -212,6 +212,7 @@ int efa_query_device(struct ib_device *ibdev,
        props->max_send_sge = dev_attr->max_sq_sge;
        props->max_recv_sge = dev_attr->max_rq_sge;
        props->max_sge_rd = dev_attr->max_wr_rdma_sge;
+       props->max_pkeys = 1;
 
        if (udata && udata->outlen) {
                resp.max_sq_sge = dev_attr->max_sq_sge;
index 4633a0c..2ced236 100644 (file)
@@ -985,15 +985,10 @@ static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf,
 static int __i2c_debugfs_open(struct inode *in, struct file *fp, u32 target)
 {
        struct hfi1_pportdata *ppd;
-       int ret;
 
        ppd = private2ppd(fp);
 
-       ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
-       if (ret) /* failed - release the module */
-               module_put(THIS_MODULE);
-
-       return ret;
+       return acquire_chip_resource(ppd->dd, i2c_target(target), 0);
 }
 
 static int i2c1_debugfs_open(struct inode *in, struct file *fp)
@@ -1013,7 +1008,6 @@ static int __i2c_debugfs_release(struct inode *in, struct file *fp, u32 target)
        ppd = private2ppd(fp);
 
        release_chip_resource(ppd->dd, i2c_target(target));
-       module_put(THIS_MODULE);
 
        return 0;
 }
@@ -1031,18 +1025,10 @@ static int i2c2_debugfs_release(struct inode *in, struct file *fp)
 static int __qsfp_debugfs_open(struct inode *in, struct file *fp, u32 target)
 {
        struct hfi1_pportdata *ppd;
-       int ret;
-
-       if (!try_module_get(THIS_MODULE))
-               return -ENODEV;
 
        ppd = private2ppd(fp);
 
-       ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
-       if (ret) /* failed - release the module */
-               module_put(THIS_MODULE);
-
-       return ret;
+       return acquire_chip_resource(ppd->dd, i2c_target(target), 0);
 }
 
 static int qsfp1_debugfs_open(struct inode *in, struct file *fp)
@@ -1062,7 +1048,6 @@ static int __qsfp_debugfs_release(struct inode *in, struct file *fp, u32 target)
        ppd = private2ppd(fp);
 
        release_chip_resource(ppd->dd, i2c_target(target));
-       module_put(THIS_MODULE);
 
        return 0;
 }
index 5eed436..cb7ad12 100644 (file)
@@ -831,6 +831,29 @@ wq_error:
 }
 
 /**
+ * destroy_workqueues - destroy per port workqueues
+ * @dd: the hfi1_ib device
+ */
+static void destroy_workqueues(struct hfi1_devdata *dd)
+{
+       int pidx;
+       struct hfi1_pportdata *ppd;
+
+       for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+               ppd = dd->pport + pidx;
+
+               if (ppd->hfi1_wq) {
+                       destroy_workqueue(ppd->hfi1_wq);
+                       ppd->hfi1_wq = NULL;
+               }
+               if (ppd->link_wq) {
+                       destroy_workqueue(ppd->link_wq);
+                       ppd->link_wq = NULL;
+               }
+       }
+}
+
+/**
  * enable_general_intr() - Enable the IRQs that will be handled by the
  * general interrupt handler.
  * @dd: valid devdata
@@ -1103,15 +1126,10 @@ static void shutdown_device(struct hfi1_devdata *dd)
                 * We can't count on interrupts since we are stopping.
                 */
                hfi1_quiet_serdes(ppd);
-
-               if (ppd->hfi1_wq) {
-                       destroy_workqueue(ppd->hfi1_wq);
-                       ppd->hfi1_wq = NULL;
-               }
-               if (ppd->link_wq) {
-                       destroy_workqueue(ppd->link_wq);
-                       ppd->link_wq = NULL;
-               }
+               if (ppd->hfi1_wq)
+                       flush_workqueue(ppd->hfi1_wq);
+               if (ppd->link_wq)
+                       flush_workqueue(ppd->link_wq);
        }
        sdma_exit(dd);
 }
@@ -1756,6 +1774,7 @@ static void remove_one(struct pci_dev *pdev)
         * clear dma engines, etc.
         */
        shutdown_device(dd);
+       destroy_workqueues(dd);
 
        stop_timers(dd);
 
index 07847cb..d580aa1 100644 (file)
@@ -399,7 +399,7 @@ static inline void iowait_get_priority(struct iowait *w)
  * @wait_head: the wait queue
  *
  * This function is called to insert an iowait struct into a
- * wait queue after a resource (eg, sdma decriptor or pio
+ * wait queue after a resource (eg, sdma descriptor or pio
  * buffer) is run out.
  */
 static inline void iowait_queue(bool pkts_sent, struct iowait *w,
index 185c9b0..b8c9d0a 100644 (file)
@@ -67,6 +67,9 @@ struct hfi1_ipoib_circ_buf {
  * @sde: sdma engine
  * @tx_list: tx request list
  * @sent_txreqs: count of txreqs posted to sdma
+ * @stops: count of stops of queue
+ * @ring_full: ring has been filled
+ * @no_desc: descriptor shortage seen
  * @flow: tracks when list needs to be flushed for a flow change
  * @q_idx: ipoib Tx queue index
  * @pkts_sent: indicator packets have been sent from this queue
@@ -80,6 +83,9 @@ struct hfi1_ipoib_txq {
        struct sdma_engine *sde;
        struct list_head tx_list;
        u64 sent_txreqs;
+       atomic_t stops;
+       atomic_t ring_full;
+       atomic_t no_desc;
        union hfi1_ipoib_flow flow;
        u8 q_idx;
        bool pkts_sent;
index 883cb9d..9df292b 100644 (file)
@@ -55,23 +55,48 @@ static u64 hfi1_ipoib_txreqs(const u64 sent, const u64 completed)
        return sent - completed;
 }
 
-static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
+static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq)
 {
-       if (unlikely(hfi1_ipoib_txreqs(++txq->sent_txreqs,
-                                      atomic64_read(&txq->complete_txreqs)) >=
-           min_t(unsigned int, txq->priv->netdev->tx_queue_len,
-                 txq->tx_ring.max_items - 1)))
+       return hfi1_ipoib_txreqs(txq->sent_txreqs,
+                                atomic64_read(&txq->complete_txreqs));
+}
+
+static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq)
+{
+       if (atomic_inc_return(&txq->stops) == 1)
                netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
 }
 
+static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq)
+{
+       if (atomic_dec_and_test(&txq->stops))
+               netif_wake_subqueue(txq->priv->netdev, txq->q_idx);
+}
+
+static uint hfi1_ipoib_ring_hwat(struct hfi1_ipoib_txq *txq)
+{
+       return min_t(uint, txq->priv->netdev->tx_queue_len,
+                    txq->tx_ring.max_items - 1);
+}
+
+static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq)
+{
+       return min_t(uint, txq->priv->netdev->tx_queue_len,
+                    txq->tx_ring.max_items) >> 1;
+}
+
+static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
+{
+       ++txq->sent_txreqs;
+       if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) &&
+           !atomic_xchg(&txq->ring_full, 1))
+               hfi1_ipoib_stop_txq(txq);
+}
+
 static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
 {
        struct net_device *dev = txq->priv->netdev;
 
-       /* If the queue is already running just return */
-       if (likely(!__netif_subqueue_stopped(dev, txq->q_idx)))
-               return;
-
        /* If shutting down just return as queue state is irrelevant */
        if (unlikely(dev->reg_state != NETREG_REGISTERED))
                return;
@@ -86,11 +111,9 @@ static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
         * Use the minimum of the current tx_queue_len or the rings max txreqs
         * to protect against ring overflow.
         */
-       if (hfi1_ipoib_txreqs(txq->sent_txreqs,
-                             atomic64_read(&txq->complete_txreqs))
-           < min_t(unsigned int, dev->tx_queue_len,
-                   txq->tx_ring.max_items) >> 1)
-               netif_wake_subqueue(dev, txq->q_idx);
+       if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) &&
+           atomic_xchg(&txq->ring_full, 0))
+               hfi1_ipoib_wake_txq(txq);
 }
 
 static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
@@ -364,11 +387,12 @@ static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev,
        if (unlikely(!tx))
                return ERR_PTR(-ENOMEM);
 
-       /* so that we can test if the sdma decriptors are there */
+       /* so that we can test if the sdma descriptors are there */
        tx->txreq.num_desc = 0;
        tx->priv = priv;
        tx->txq = txp->txq;
        tx->skb = skb;
+       INIT_LIST_HEAD(&tx->txreq.list);
 
        hfi1_ipoib_build_ib_tx_headers(tx, txp);
 
@@ -469,6 +493,7 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev,
 
        ret = hfi1_ipoib_submit_tx(txq, tx);
        if (likely(!ret)) {
+tx_ok:
                trace_sdma_output_ibhdr(tx->priv->dd,
                                        &tx->sdma_hdr.hdr,
                                        ib_is_sc5(txp->flow.sc5));
@@ -478,20 +503,8 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev,
 
        txq->pkts_sent = false;
 
-       if (ret == -EBUSY) {
-               list_add_tail(&tx->txreq.list, &txq->tx_list);
-
-               trace_sdma_output_ibhdr(tx->priv->dd,
-                                       &tx->sdma_hdr.hdr,
-                                       ib_is_sc5(txp->flow.sc5));
-               hfi1_ipoib_check_queue_depth(txq);
-               return NETDEV_TX_OK;
-       }
-
-       if (ret == -ECOMM) {
-               hfi1_ipoib_check_queue_depth(txq);
-               return NETDEV_TX_OK;
-       }
+       if (ret == -EBUSY || ret == -ECOMM)
+               goto tx_ok;
 
        sdma_txclean(priv->dd, &tx->txreq);
        dev_kfree_skb_any(skb);
@@ -509,9 +522,17 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev,
        struct ipoib_txreq *tx;
 
        /* Has the flow change ? */
-       if (txq->flow.as_int != txp->flow.as_int)
-               (void)hfi1_ipoib_flush_tx_list(dev, txq);
-
+       if (txq->flow.as_int != txp->flow.as_int) {
+               int ret;
+
+               ret = hfi1_ipoib_flush_tx_list(dev, txq);
+               if (unlikely(ret)) {
+                       if (ret == -EBUSY)
+                               ++dev->stats.tx_dropped;
+                       dev_kfree_skb_any(skb);
+                       return NETDEV_TX_OK;
+               }
+       }
        tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
        if (IS_ERR(tx)) {
                int ret = PTR_ERR(tx);
@@ -610,10 +631,14 @@ static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde,
                        return -EAGAIN;
                }
 
-               netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
-
-               if (list_empty(&txq->wait.list))
+               if (list_empty(&txreq->list))
+                       /* came from non-list submit */
+                       list_add_tail(&txreq->list, &txq->tx_list);
+               if (list_empty(&txq->wait.list)) {
+                       if (!atomic_xchg(&txq->no_desc, 1))
+                               hfi1_ipoib_stop_txq(txq);
                        iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
+               }
 
                write_sequnlock(&sde->waitlock);
                return -EBUSY;
@@ -648,9 +673,9 @@ static void hfi1_ipoib_flush_txq(struct work_struct *work)
        struct net_device *dev = txq->priv->netdev;
 
        if (likely(dev->reg_state == NETREG_REGISTERED) &&
-           likely(__netif_subqueue_stopped(dev, txq->q_idx)) &&
            likely(!hfi1_ipoib_flush_tx_list(dev, txq)))
-               netif_wake_subqueue(dev, txq->q_idx);
+               if (atomic_xchg(&txq->no_desc, 0))
+                       hfi1_ipoib_wake_txq(txq);
 }
 
 int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
@@ -704,6 +729,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
                txq->sde = NULL;
                INIT_LIST_HEAD(&txq->tx_list);
                atomic64_set(&txq->complete_txreqs, 0);
+               atomic_set(&txq->stops, 0);
+               atomic_set(&txq->ring_full, 0);
+               atomic_set(&txq->no_desc, 0);
                txq->q_idx = i;
                txq->flow.tx_queue = 0xff;
                txq->flow.sc5 = 0xff;
@@ -769,7 +797,7 @@ static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
                atomic64_inc(complete_txreqs);
        }
 
-       if (hfi1_ipoib_txreqs(txq->sent_txreqs, atomic64_read(complete_txreqs)))
+       if (hfi1_ipoib_used(txq))
                dd_dev_warn(txq->priv->dd,
                            "txq %d not empty found %llu requests\n",
                            txq->q_idx,
index 63688e8..6d263c9 100644 (file)
@@ -373,7 +373,7 @@ void hfi1_netdev_free(struct hfi1_devdata *dd)
 {
        if (dd->dummy_netdev) {
                dd_dev_info(dd, "hfi1 netdev freed\n");
-               free_netdev(dd->dummy_netdev);
+               kfree(dd->dummy_netdev);
                dd->dummy_netdev = NULL;
        }
 }
index 0c2ae9f..be62284 100644 (file)
@@ -195,7 +195,7 @@ static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
 {
        /* Constraining 10KB packets to 8KB packets */
        if (mtu == (enum ib_mtu)OPA_MTU_10240)
-               mtu = OPA_MTU_8192;
+               mtu = (enum ib_mtu)OPA_MTU_8192;
        return opa_mtu_enum_to_int((enum opa_mtu)mtu);
 }
 
@@ -367,7 +367,10 @@ bool _hfi1_schedule_send(struct rvt_qp *qp)
        struct hfi1_ibport *ibp =
                to_iport(qp->ibqp.device, qp->port_num);
        struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
-       struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+       struct hfi1_devdata *dd = ppd->dd;
+
+       if (dd->flags & HFI1_SHUTDOWN)
+               return true;
 
        return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
                               priv->s_sde ?
index 243b4ba..facff13 100644 (file)
@@ -5406,7 +5406,10 @@ static bool _hfi1_schedule_tid_send(struct rvt_qp *qp)
        struct hfi1_ibport *ibp =
                to_iport(qp->ibqp.device, qp->port_num);
        struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
-       struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+       struct hfi1_devdata *dd = ppd->dd;
+
+       if ((dd->flags & HFI1_SHUTDOWN))
+               return true;
 
        return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq,
                                   priv->s_sde ?
index bfa6e08..d2d526c 100644 (file)
@@ -91,7 +91,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
        tx->mr = NULL;
        tx->sde = priv->s_sde;
        tx->psc = priv->s_sendcontext;
-       /* so that we can test if the sdma decriptors are there */
+       /* so that we can test if the sdma descriptors are there */
        tx->txreq.num_desc = 0;
        /* Set the header type */
        tx->phdr.hdr.hdr_type = priv->hdr_type;
index a77fa67..479fa55 100644 (file)
@@ -898,13 +898,14 @@ struct hns_roce_hw {
        int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
        void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
                        enum ib_mtu mtu);
-       int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr,
-                         unsigned long mtpt_idx);
+       int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
+                         struct hns_roce_mr *mr, unsigned long mtpt_idx);
        int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
                                struct hns_roce_mr *mr, int flags, u32 pdn,
                                int mr_access_flags, u64 iova, u64 size,
                                void *mb_buf);
-       int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr);
+       int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
+                              struct hns_roce_mr *mr);
        int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
        void (*write_cqc)(struct hns_roce_dev *hr_dev,
                          struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
index d02207c..cf39f56 100644 (file)
@@ -1756,10 +1756,10 @@ static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
                   val);
 }
 
-static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf,
+                                 struct hns_roce_mr *mr,
                                  unsigned long mtpt_idx)
 {
-       struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
        u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 };
        struct ib_device *ibdev = &hr_dev->ib_dev;
        struct hns_roce_v1_mpt_entry *mpt_entry;
index c597d72..dd01a51 100644 (file)
@@ -910,7 +910,7 @@ static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
        instance_stage = handle->rinfo.instance_state;
        reset_stage = handle->rinfo.reset_state;
        reset_cnt = ops->ae_dev_reset_cnt(handle);
-       hw_resetting = ops->get_hw_reset_stat(handle);
+       hw_resetting = ops->get_cmdq_stat(handle);
        sw_resetting = ops->ae_dev_resetting(handle);
 
        if (reset_cnt != hr_dev->reset_cnt)
@@ -2529,10 +2529,10 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
        return hns_roce_cmq_send(hr_dev, &desc, 1);
 }
 
-static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
+static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
+                       struct hns_roce_v2_mpt_entry *mpt_entry,
                        struct hns_roce_mr *mr)
 {
-       struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
        u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
        struct ib_device *ibdev = &hr_dev->ib_dev;
        dma_addr_t pbl_ba;
@@ -2571,7 +2571,8 @@ static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
        return 0;
 }
 
-static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
+                                 void *mb_buf, struct hns_roce_mr *mr,
                                  unsigned long mtpt_idx)
 {
        struct hns_roce_v2_mpt_entry *mpt_entry;
@@ -2620,7 +2621,7 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
        if (mr->type == MR_TYPE_DMA)
                return 0;
 
-       ret = set_mtpt_pbl(mpt_entry, mr);
+       ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
 
        return ret;
 }
@@ -2666,15 +2667,15 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
                mr->iova = iova;
                mr->size = size;
 
-               ret = set_mtpt_pbl(mpt_entry, mr);
+               ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
        }
 
        return ret;
 }
 
-static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
+static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
+                                      void *mb_buf, struct hns_roce_mr *mr)
 {
-       struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
        struct ib_device *ibdev = &hr_dev->ib_dev;
        struct hns_roce_v2_mpt_entry *mpt_entry;
        dma_addr_t pbl_ba = 0;
index 4c0bbb1..0e71ebe 100644 (file)
@@ -180,9 +180,10 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
        }
 
        if (mr->type != MR_TYPE_FRMR)
-               ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
+               ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr,
+                                            mtpt_idx);
        else
-               ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr);
+               ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
        if (ret) {
                dev_err(dev, "Write mtpt fail!\n");
                goto err_page;
index 343a8b8..6f99ed0 100644 (file)
@@ -511,7 +511,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
                                           mdev_port_num);
        if (err)
                goto out;
-       ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet);
+       ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
        eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
 
        props->active_width     = IB_WIDTH_4X;
index 81bf6b9..e050ead 100644 (file)
@@ -1862,7 +1862,7 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        if (!in)
                return -ENOMEM;
 
-       if (MLX5_CAP_GEN(mdev, ece_support))
+       if (MLX5_CAP_GEN(mdev, ece_support) && ucmd)
                MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
        qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
 
@@ -2341,18 +2341,18 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        unsigned long flags;
        int err;
 
-       if (qp->ibqp.rwq_ind_tbl) {
+       if (qp->is_rss) {
                destroy_rss_raw_qp_tir(dev, qp);
                return;
        }
 
-       base = (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
+       base = (qp->type == IB_QPT_RAW_PACKET ||
                qp->flags & IB_QP_CREATE_SOURCE_QPN) ?
-              &qp->raw_packet_qp.rq.base :
-              &qp->trans_qp.base;
+                      &qp->raw_packet_qp.rq.base :
+                      &qp->trans_qp.base;
 
        if (qp->state != IB_QPS_RESET) {
-               if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET &&
+               if (qp->type != IB_QPT_RAW_PACKET &&
                    !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
                        err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0,
                                                  NULL, &base->mqp, NULL);
@@ -2368,8 +2368,8 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
                                     base->mqp.qpn);
        }
 
-       get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
-               &send_cq, &recv_cq);
+       get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, &send_cq,
+               &recv_cq);
 
        spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
        mlx5_ib_lock_cqs(send_cq, recv_cq);
@@ -2391,7 +2391,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        mlx5_ib_unlock_cqs(send_cq, recv_cq);
        spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
 
-       if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
+       if (qp->type == IB_QPT_RAW_PACKET ||
            qp->flags & IB_QP_CREATE_SOURCE_QPN) {
                destroy_raw_packet_qp(dev, qp);
        } else {
@@ -2668,6 +2668,13 @@ static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl)
                return (create_flags) ? -EINVAL : 0;
 
+       process_create_flag(dev, &create_flags, IB_QP_CREATE_NETIF_QP,
+                           mlx5_get_flow_namespace(dev->mdev,
+                                                   MLX5_FLOW_NAMESPACE_BYPASS),
+                           qp);
+       process_create_flag(dev, &create_flags,
+                           IB_QP_CREATE_INTEGRITY_EN,
+                           MLX5_CAP_GEN(mdev, sho), qp);
        process_create_flag(dev, &create_flags,
                            IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
                            MLX5_CAP_GEN(mdev, block_lb_mc), qp);
@@ -2873,7 +2880,6 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
 static int check_ucmd_data(struct mlx5_ib_dev *dev,
                           struct mlx5_create_qp_params *params)
 {
-       struct ib_qp_init_attr *attr = params->attr;
        struct ib_udata *udata = params->udata;
        size_t size, last;
        int ret;
@@ -2885,14 +2891,7 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev,
                 */
                last = sizeof(struct mlx5_ib_create_qp_rss);
        else
-               /* IB_QPT_RAW_PACKET doesn't have ECE data */
-               switch (attr->qp_type) {
-               case IB_QPT_RAW_PACKET:
-                       last = offsetof(struct mlx5_ib_create_qp, ece_options);
-                       break;
-               default:
-                       last = offsetof(struct mlx5_ib_create_qp, reserved);
-               }
+               last = offsetof(struct mlx5_ib_create_qp, reserved);
 
        if (udata->inlen <= last)
                return 0;
@@ -2907,7 +2906,7 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev,
        if (!ret)
                mlx5_ib_dbg(
                        dev,
-                       "udata is not cleared, inlen = %lu, ucmd = %lu, last = %lu, size = %lu\n",
+                       "udata is not cleared, inlen = %zu, ucmd = %zu, last = %zu, size = %zu\n",
                        udata->inlen, params->ucmd_size, last, size);
        return ret ? 0 : -EINVAL;
 }
@@ -3002,10 +3001,19 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
        return &qp->ibqp;
 
 destroy_qp:
-       if (qp->type == MLX5_IB_QPT_DCT)
+       if (qp->type == MLX5_IB_QPT_DCT) {
                mlx5_ib_destroy_dct(qp);
-       else
+       } else {
+               /*
+                * These lines below are temp solution till QP allocation
+                * will be moved to be under IB/core responsiblity.
+                */
+               qp->ibqp.send_cq = attr->send_cq;
+               qp->ibqp.recv_cq = attr->recv_cq;
+               qp->ibqp.pd = pd;
                destroy_qp_common(dev, qp, udata);
+       }
+
        qp = NULL;
 free_qp:
        kfree(qp);
@@ -4162,8 +4170,6 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 
                if (udata->outlen < min_resp_len)
                        return -EINVAL;
-               resp.response_length = min_resp_len;
-
                /*
                 * If we don't have enough space for the ECE options,
                 * simply indicate it with resp.response_length.
@@ -4384,8 +4390,7 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
                                MLX5_GET(ads, path, src_addr_index),
                                MLX5_GET(ads, path, hop_limit),
                                MLX5_GET(ads, path, tclass));
-               memcpy(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip),
-                      MLX5_FLD_SZ_BYTES(ads, rgid_rip));
+               rdma_ah_set_dgid_raw(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip));
        }
 }
 
index c19d91d..7c3968e 100644 (file)
@@ -346,6 +346,9 @@ static int get_ece_from_mbox(void *out, u16 opcode)
        int ece = 0;
 
        switch (opcode) {
+       case MLX5_CMD_OP_INIT2INIT_QP:
+               ece = MLX5_GET(init2init_qp_out, out, ece);
+               break;
        case MLX5_CMD_OP_INIT2RTR_QP:
                ece = MLX5_GET(init2rtr_qp_out, out, ece);
                break;
@@ -355,6 +358,9 @@ static int get_ece_from_mbox(void *out, u16 opcode)
        case MLX5_CMD_OP_RTS2RTS_QP:
                ece = MLX5_GET(rts2rts_qp_out, out, ece);
                break;
+       case MLX5_CMD_OP_RST2INIT_QP:
+               ece = MLX5_GET(rst2init_qp_out, out, ece);
+               break;
        default:
                break;
        }
@@ -406,6 +412,7 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
                        return -ENOMEM;
                MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
                                  opt_param_mask, qpc, uid);
+               MLX5_SET(rst2init_qp_in, mbox->in, ece, ece);
                break;
        case MLX5_CMD_OP_INIT2RTR_QP:
                if (MBOX_ALLOC(mbox, init2rtr_qp))
@@ -439,6 +446,7 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
                        return -ENOMEM;
                MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
                                  opt_param_mask, qpc, uid);
+               MLX5_SET(init2init_qp_in, mbox->in, ece, ece);
                break;
        default:
                return -EINVAL;
index 792eecd..97fc7dd 100644 (file)
@@ -150,8 +150,17 @@ qedr_iw_issue_event(void *context,
        if (params->cm_info) {
                event.ird = params->cm_info->ird;
                event.ord = params->cm_info->ord;
-               event.private_data_len = params->cm_info->private_data_len;
-               event.private_data = (void *)params->cm_info->private_data;
+               /* Only connect_request and reply have valid private data
+                * the rest of the events this may be left overs from
+                * connection establishment. CONNECT_REQUEST is issued via
+                * qedr_iw_mpa_request
+                */
+               if (event_type == IW_CM_EVENT_CONNECT_REPLY) {
+                       event.private_data_len =
+                               params->cm_info->private_data_len;
+                       event.private_data =
+                               (void *)params->cm_info->private_data;
+               }
        }
 
        if (ep->cm_id)
index 511b728..7db35dd 100644 (file)
@@ -1204,7 +1204,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
                err = alloc_ud_wq_attr(qp, rdi->dparms.node);
                if (err) {
                        ret = (ERR_PTR(err));
-                       goto bail_driver_priv;
+                       goto bail_rq_rvt;
                }
 
                if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
@@ -1314,9 +1314,11 @@ bail_qpn:
        rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
 
 bail_rq_wq:
-       rvt_free_rq(&qp->r_rq);
        free_ud_wq_attr(qp);
 
+bail_rq_rvt:
+       rvt_free_rq(&qp->r_rq);
+
 bail_driver_priv:
        rdi->driver_f.qp_priv_free(rdi, qp);
 
index a0b8cc6..ed60c9e 100644 (file)
@@ -67,12 +67,13 @@ static int siw_device_register(struct siw_device *sdev, const char *name)
        static int dev_id = 1;
        int rv;
 
+       sdev->vendor_part_id = dev_id++;
+
        rv = ib_register_device(base_dev, name);
        if (rv) {
                pr_warn("siw: device registration error %d\n", rv);
                return rv;
        }
-       sdev->vendor_part_id = dev_id++;
 
        siw_dbg(base_dev, "HWaddr=%pM\n", sdev->netdev->dev_addr);
 
index 6505202..7271d70 100644 (file)
@@ -139,7 +139,8 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
                        break;
 
                bytes = min(bytes, len);
-               if (siw_rx_kva(srx, (void *)buf_addr, bytes) == bytes) {
+               if (siw_rx_kva(srx, (void *)(uintptr_t)buf_addr, bytes) ==
+                   bytes) {
                        copied += bytes;
                        offset += bytes;
                        len -= bytes;
index 3f9354b..6291fb5 100644 (file)
@@ -951,6 +951,8 @@ static void elan_report_absolute(struct elan_tp_data *data, u8 *packet)
        u8 hover_info = packet[ETP_HOVER_INFO_OFFSET];
        bool contact_valid, hover_event;
 
+       pm_wakeup_event(&data->client->dev, 0);
+
        hover_event = hover_info & 0x40;
        for (i = 0; i < ETP_MAX_FINGERS; i++) {
                contact_valid = tp_info & (1U << (3 + i));
@@ -974,6 +976,8 @@ static void elan_report_trackpoint(struct elan_tp_data *data, u8 *report)
        u8 *packet = &report[ETP_REPORT_ID_OFFSET + 1];
        int x, y;
 
+       pm_wakeup_event(&data->client->dev, 0);
+
        if (!data->tp_input) {
                dev_warn_once(&data->client->dev,
                              "received a trackpoint report while no trackpoint device has been created. Please report upstream.\n");
@@ -998,7 +1002,6 @@ static void elan_report_trackpoint(struct elan_tp_data *data, u8 *report)
 static irqreturn_t elan_isr(int irq, void *dev_id)
 {
        struct elan_tp_data *data = dev_id;
-       struct device *dev = &data->client->dev;
        int error;
        u8 report[ETP_MAX_REPORT_LEN];
 
@@ -1016,8 +1019,6 @@ static irqreturn_t elan_isr(int irq, void *dev_id)
        if (error)
                goto out;
 
-       pm_wakeup_event(dev, 0);
-
        switch (report[ETP_REPORT_ID_OFFSET]) {
        case ETP_REPORT_ID:
                elan_report_absolute(data, report);
@@ -1026,7 +1027,7 @@ static irqreturn_t elan_isr(int irq, void *dev_id)
                elan_report_trackpoint(data, report);
                break;
        default:
-               dev_err(dev, "invalid report id data (%x)\n",
+               dev_err(&data->client->dev, "invalid report id data (%x)\n",
                        report[ETP_REPORT_ID_OFFSET]);
        }
 
index 758dae8..4b81b2d 100644 (file)
@@ -179,6 +179,7 @@ static const char * const smbus_pnp_ids[] = {
        "LEN0093", /* T480 */
        "LEN0096", /* X280 */
        "LEN0097", /* X280 -> ALPS trackpoint */
+       "LEN0099", /* X1 Extreme 1st */
        "LEN009b", /* T580 */
        "LEN200f", /* T450s */
        "LEN2044", /* L470  */
index 7b08ff8..7d7f737 100644 (file)
@@ -426,6 +426,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
                },
        },
        {
+               /* Lenovo XiaoXin Air 12 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "80UN"),
+               },
+       },
+       {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
index 233cb10..5477a57 100644 (file)
@@ -1325,7 +1325,6 @@ static int elants_i2c_probe(struct i2c_client *client,
                             0, MT_TOOL_PALM, 0, 0);
        input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res);
        input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res);
-       input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, 1);
 
        touchscreen_parse_properties(ts->input, true, &ts->prop);
 
index b510f67..b0f308c 100644 (file)
@@ -211,7 +211,7 @@ config INTEL_IOMMU_DEBUGFS
 
 config INTEL_IOMMU_SVM
        bool "Support for Shared Virtual Memory with Intel IOMMU"
-       depends on INTEL_IOMMU && X86
+       depends on INTEL_IOMMU && X86_64
        select PCI_PASID
        select PCI_PRI
        select MMU_NOTIFIER
@@ -305,6 +305,7 @@ config ROCKCHIP_IOMMU
 
 config SUN50I_IOMMU
        bool "Allwinner H6 IOMMU Support"
+       depends on HAS_DMA
        depends on ARCH_SUNXI || COMPILE_TEST
        select ARM_DMA_USE_IOMMU
        select IOMMU_API
index f892992..5730971 100644 (file)
@@ -102,7 +102,7 @@ extern int __init add_special_device(u8 type, u8 id, u16 *devid,
 #ifdef CONFIG_DMI
 void amd_iommu_apply_ivrs_quirks(void);
 #else
-static void amd_iommu_apply_ivrs_quirks(void) { }
+static inline void amd_iommu_apply_ivrs_quirks(void) { }
 #endif
 
 #endif
index 74cca17..2f22326 100644 (file)
@@ -3985,9 +3985,10 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
        if (!fn)
                return -ENOMEM;
        iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu);
-       irq_domain_free_fwnode(fn);
-       if (!iommu->ir_domain)
+       if (!iommu->ir_domain) {
+               irq_domain_free_fwnode(fn);
                return -ENOMEM;
+       }
 
        iommu->ir_domain->parent = arch_get_ir_parent_domain();
        iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain,
index cf01d02..be43180 100644 (file)
@@ -12,7 +12,7 @@ struct qcom_smmu {
        struct arm_smmu_device smmu;
 };
 
-static const struct of_device_id qcom_smmu_client_of_match[] = {
+static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
        { .compatible = "qcom,adreno" },
        { .compatible = "qcom,mdp4" },
        { .compatible = "qcom,mdss" },
index 3c0c67a..8919c1c 100644 (file)
@@ -155,7 +155,10 @@ static int __init hyperv_prepare_irq_remapping(void)
                                0, IOAPIC_REMAPPING_ENTRY, fn,
                                &hyperv_ir_domain_ops, NULL);
 
-       irq_domain_free_fwnode(fn);
+       if (!ioapic_ir_domain) {
+               irq_domain_free_fwnode(fn);
+               return -ENOMEM;
+       }
 
        /*
         * Hyper-V doesn't provide irq remapping function for
index cc46dff..683b812 100644 (file)
@@ -898,7 +898,8 @@ int __init detect_intel_iommu(void)
        if (!ret)
                ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
                                           &validate_drhd_cb);
-       if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) {
+       if (!ret && !no_iommu && !iommu_detected &&
+           (!dmar_disabled || dmar_platform_optin())) {
                iommu_detected = 1;
                /* Make sure ACS will be enabled */
                pci_request_acs();
index 9129663..d759e72 100644 (file)
@@ -612,6 +612,12 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
        return g_iommus[iommu_id];
 }
 
+static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
+{
+       return sm_supported(iommu) ?
+                       ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
+}
+
 static void domain_update_iommu_coherency(struct dmar_domain *domain)
 {
        struct dmar_drhd_unit *drhd;
@@ -623,7 +629,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
 
        for_each_domain_iommu(i, domain) {
                found = true;
-               if (!ecap_coherent(g_iommus[i]->ecap)) {
+               if (!iommu_paging_structure_coherency(g_iommus[i])) {
                        domain->iommu_coherency = 0;
                        break;
                }
@@ -634,7 +640,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
        /* No hardware attached; use lowest common denominator */
        rcu_read_lock();
        for_each_active_iommu(iommu, drhd) {
-               if (!ecap_coherent(iommu->ecap)) {
+               if (!iommu_paging_structure_coherency(iommu)) {
                        domain->iommu_coherency = 0;
                        break;
                }
@@ -921,7 +927,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
                        domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
                        pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
                        if (domain_use_first_level(domain))
-                               pteval |= DMA_FL_PTE_XD;
+                               pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
                        if (cmpxchg64(&pte->val, 0ULL, pteval))
                                /* Someone else set it while we were thinking; use theirs. */
                                free_pgtable_page(tmp_page);
@@ -1951,7 +1957,6 @@ static inline void
 context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
 {
        context->hi |= pasid & ((1 << 20) - 1);
-       context->hi |= (1 << 20);
 }
 
 /*
@@ -2095,7 +2100,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
 
        context_set_fault_enable(context);
        context_set_present(context);
-       domain_flush_cache(domain, context, sizeof(*context));
+       if (!ecap_coherent(iommu->ecap))
+               clflush_cache_range(context, sizeof(*context));
 
        /*
         * It's a non-present to present mapping. If hardware doesn't cache
@@ -2243,7 +2249,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
 
        attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
        if (domain_use_first_level(domain))
-               attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD;
+               attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
 
        if (!sg) {
                sg_res = nr_pages;
@@ -2695,7 +2701,9 @@ static int __init si_domain_init(int hw)
                                    end >> agaw_to_width(si_domain->agaw)))
                                continue;
 
-                       ret = iommu_domain_identity_map(si_domain, start, end);
+                       ret = iommu_domain_identity_map(si_domain,
+                                       mm_to_dma_pfn(start >> PAGE_SHIFT),
+                                       mm_to_dma_pfn(end >> PAGE_SHIFT));
                        if (ret)
                                return ret;
                }
@@ -6021,6 +6029,23 @@ intel_iommu_domain_set_attr(struct iommu_domain *domain,
        return ret;
 }
 
+/*
+ * Check that the device does not live on an external facing PCI port that is
+ * marked as untrusted. Such devices should not be able to apply quirks and
+ * thus not be able to bypass the IOMMU restrictions.
+ */
+static bool risky_device(struct pci_dev *pdev)
+{
+       if (pdev->untrusted) {
+               pci_info(pdev,
+                        "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n",
+                        pdev->vendor, pdev->device);
+               pci_info(pdev, "Please check with your BIOS/Platform vendor about this\n");
+               return true;
+       }
+       return false;
+}
+
 const struct iommu_ops intel_iommu_ops = {
        .capable                = intel_iommu_capable,
        .domain_alloc           = intel_iommu_domain_alloc,
@@ -6060,6 +6085,9 @@ const struct iommu_ops intel_iommu_ops = {
 
 static void quirk_iommu_igfx(struct pci_dev *dev)
 {
+       if (risky_device(dev))
+               return;
+
        pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
        dmar_map_gfx = 0;
 }
@@ -6101,6 +6129,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);
 
 static void quirk_iommu_rwbf(struct pci_dev *dev)
 {
+       if (risky_device(dev))
+               return;
+
        /*
         * Mobile 4 Series Chipset neglects to set RWBF capability,
         * but needs it. Same seems to hold for the desktop versions.
@@ -6131,6 +6162,9 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
 {
        unsigned short ggc;
 
+       if (risky_device(dev))
+               return;
+
        if (pci_read_config_word(dev, GGC, &ggc))
                return;
 
@@ -6164,6 +6198,12 @@ static void __init check_tylersburg_isoch(void)
        pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
        if (!pdev)
                return;
+
+       if (risky_device(pdev)) {
+               pci_dev_put(pdev);
+               return;
+       }
+
        pci_dev_put(pdev);
 
        /* System Management Registers. Might be hidden, in which case
@@ -6173,6 +6213,11 @@ static void __init check_tylersburg_isoch(void)
        if (!pdev)
                return;
 
+       if (risky_device(pdev)) {
+               pci_dev_put(pdev);
+               return;
+       }
+
        if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
                pci_dev_put(pdev);
                return;
index 7f87698..9564d23 100644 (file)
@@ -563,8 +563,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
                                            0, INTR_REMAP_TABLE_ENTRIES,
                                            fn, &intel_ir_domain_ops,
                                            iommu);
-       irq_domain_free_fwnode(fn);
        if (!iommu->ir_domain) {
+               irq_domain_free_fwnode(fn);
                pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
                goto out_free_bitmap;
        }
index d43120e..b6858ad 100644 (file)
@@ -295,10 +295,10 @@ void iommu_release_device(struct device *dev)
                return;
 
        iommu_device_unlink(dev->iommu->iommu_dev, dev);
-       iommu_group_remove_device(dev);
 
        ops->release_device(dev);
 
+       iommu_group_remove_device(dev);
        module_put(ops->owner);
        dev_iommu_free(dev);
 }
index fce605e..3b1bf2f 100644 (file)
@@ -313,9 +313,9 @@ static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu)
                    IOMMU_TLB_FLUSH_MICRO_TLB(1) |
                    IOMMU_TLB_FLUSH_MICRO_TLB(0));
 
-       ret = readl_poll_timeout(iommu->base + IOMMU_TLB_FLUSH_REG,
-                                reg, !reg,
-                                1, 2000);
+       ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_FLUSH_REG,
+                                       reg, !reg,
+                                       1, 2000);
        if (ret)
                dev_warn(iommu->dev, "TLB Flush timed out!\n");
 
@@ -556,7 +556,6 @@ static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova
 {
        struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
        phys_addr_t pt_phys;
-       dma_addr_t pte_dma;
        u32 *pte_addr;
        u32 dte;
 
@@ -566,7 +565,6 @@ static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova
 
        pt_phys = sun50i_dte_get_pt_address(dte);
        pte_addr = (u32 *)phys_to_virt(pt_phys) + sun50i_iova_get_pte_index(iova);
-       pte_dma = pt_phys + sun50i_iova_get_pte_index(iova) * PT_ENTRY_SIZE;
 
        if (!sun50i_pte_is_page_valid(*pte_addr))
                return 0;
index 29fead2..216b3b8 100644 (file)
@@ -563,7 +563,7 @@ config LOONGSON_PCH_PIC
          Support for the Loongson PCH PIC Controller.
 
 config LOONGSON_PCH_MSI
-       bool "Loongson PCH PIC Controller"
+       bool "Loongson PCH MSI Controller"
        depends on MACH_LOONGSON64 || COMPILE_TEST
        depends on PCI
        default MACH_LOONGSON64
index cd685f5..beac4ca 100644 (file)
@@ -3797,10 +3797,10 @@ static void its_wait_vpt_parse_complete(void)
        if (!gic_rdists->has_vpend_valid_dirty)
                return;
 
-       WARN_ON_ONCE(readq_relaxed_poll_timeout(vlpi_base + GICR_VPENDBASER,
-                                               val,
-                                               !(val & GICR_VPENDBASER_Dirty),
-                                               10, 500));
+       WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER,
+                                                      val,
+                                                      !(val & GICR_VPENDBASER_Dirty),
+                                                      10, 500));
 }
 
 static void its_vpe_schedule(struct its_vpe *vpe)
@@ -4054,16 +4054,24 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
        u64 val;
 
        if (info->req_db) {
+               unsigned long flags;
+
                /*
                 * vPE is going to block: make the vPE non-resident with
                 * PendingLast clear and DB set. The GIC guarantees that if
                 * we read-back PendingLast clear, then a doorbell will be
                 * delivered when an interrupt comes.
+                *
+                * Note the locking to deal with the concurrent update of
+                * pending_last from the doorbell interrupt handler that can
+                * run concurrently.
                 */
+               raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
                val = its_clear_vpend_valid(vlpi_base,
                                            GICR_VPENDBASER_PendingLast,
                                            GICR_VPENDBASER_4_1_DB);
                vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
+               raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
        } else {
                /*
                 * We're not blocking, so just make the vPE non-resident
index 00de05a..c17fabd 100644 (file)
@@ -329,10 +329,8 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
                            bool force)
 {
-       void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
-       unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
-       u32 val, mask, bit;
-       unsigned long flags;
+       void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
+       unsigned int cpu;
 
        if (!force)
                cpu = cpumask_any_and(mask_val, cpu_online_mask);
@@ -342,13 +340,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
        if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
                return -EINVAL;
 
-       gic_lock_irqsave(flags);
-       mask = 0xff << shift;
-       bit = gic_cpu_map[cpu] << shift;
-       val = readl_relaxed(reg) & ~mask;
-       writel_relaxed(val | bit, reg);
-       gic_unlock_irqrestore(flags);
-
+       writeb_relaxed(gic_cpu_map[cpu], reg);
        irq_data_update_effective_affinity(d, cpumask_of(cpu));
 
        return IRQ_SET_MASK_OK_DONE;
index a6f97fa..8017f6d 100644 (file)
@@ -99,7 +99,7 @@ static int __init riscv_intc_init(struct device_node *node,
 
        hartid = riscv_of_parent_hartid(node);
        if (hartid < 0) {
-               pr_warn("unable to fine hart id for %pOF\n", node);
+               pr_warn("unable to find hart id for %pOF\n", node);
                return 0;
        }
 
index ac83f50..489935d 100644 (file)
@@ -1471,7 +1471,7 @@ static void retrieve_deps(struct dm_table *table,
        /*
         * Check we have enough space.
         */
-       needed = sizeof(*deps) + (sizeof(*deps->dev) * count);
+       needed = struct_size(deps, dev, count);
        if (len < needed) {
                param->flags |= DM_BUFFER_FULL_FLAG;
                return;
index f60c025..85e0daa 100644 (file)
@@ -146,10 +146,6 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
  */
 static void rq_completed(struct mapped_device *md)
 {
-       /* nudge anyone waiting on suspend queue */
-       if (unlikely(wq_has_sleeper(&md->wait)))
-               wake_up(&md->wait);
-
        /*
         * dm_put() must be at the end of this function. See the comment above
         */
index 74f3c50..5358894 100644 (file)
@@ -282,6 +282,8 @@ static int persistent_memory_claim(struct dm_writecache *wc)
                        while (daa-- && i < p) {
                                pages[i++] = pfn_t_to_page(pfn);
                                pfn.val++;
+                               if (!(i & 15))
+                                       cond_resched();
                        }
                } while (i < p);
                wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
@@ -849,10 +851,14 @@ static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_
 
                if (likely(!e->write_in_progress)) {
                        if (!discarded_something) {
-                               writecache_wait_for_ios(wc, READ);
-                               writecache_wait_for_ios(wc, WRITE);
+                               if (!WC_MODE_PMEM(wc)) {
+                                       writecache_wait_for_ios(wc, READ);
+                                       writecache_wait_for_ios(wc, WRITE);
+                               }
                                discarded_something = true;
                        }
+                       if (!writecache_entry_is_committed(wc, e))
+                               wc->uncommitted_blocks--;
                        writecache_free_entry(wc, e);
                }
 
@@ -2260,6 +2266,12 @@ invalid_optional:
        }
 
        if (WC_MODE_PMEM(wc)) {
+               if (!dax_synchronous(wc->ssd_dev->dax_dev)) {
+                       r = -EOPNOTSUPP;
+                       ti->error = "Asynchronous persistent memory not supported as pmem cache";
+                       goto bad;
+               }
+
                r = persistent_memory_claim(wc);
                if (r) {
                        ti->error = "Unable to map persistent memory for cache";
index 130b5a6..b298fef 100644 (file)
@@ -1078,7 +1078,8 @@ static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_sb *dsb,
        nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + zmd->zone_nr_blocks - 1)
                >> zmd->zone_nr_blocks_shift;
        if (!nr_meta_zones ||
-           nr_meta_zones >= zmd->nr_rnd_zones) {
+           (zmd->nr_devs <= 1 && nr_meta_zones >= zmd->nr_rnd_zones) ||
+           (zmd->nr_devs > 1 && nr_meta_zones >= zmd->nr_cache_zones)) {
                dmz_dev_err(dev, "Invalid number of metadata blocks");
                return -ENXIO;
        }
@@ -1949,7 +1950,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd,
                                                    unsigned int idx, bool idle)
 {
        struct dm_zone *dzone = NULL;
-       struct dm_zone *zone, *last = NULL;
+       struct dm_zone *zone, *maxw_z = NULL;
        struct list_head *zone_list;
 
        /* If we have cache zones select from the cache zone list */
@@ -1961,18 +1962,37 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd,
        } else
                zone_list = &zmd->dev[idx].map_rnd_list;
 
+       /*
+        * Find the buffer zone with the heaviest weight or the first (oldest)
+        * data zone that can be reclaimed.
+        */
        list_for_each_entry(zone, zone_list, link) {
                if (dmz_is_buf(zone)) {
                        dzone = zone->bzone;
-                       if (dzone->dev->dev_idx != idx)
-                               continue;
-                       if (!last) {
-                               last = dzone;
+                       if (dmz_is_rnd(dzone) && dzone->dev->dev_idx != idx)
                                continue;
-                       }
-                       if (last->weight < dzone->weight)
+                       if (!maxw_z || maxw_z->weight < dzone->weight)
+                               maxw_z = dzone;
+               } else {
+                       dzone = zone;
+                       if (dmz_lock_zone_reclaim(dzone))
+                               return dzone;
+               }
+       }
+
+       if (maxw_z && dmz_lock_zone_reclaim(maxw_z))
+               return maxw_z;
+
+       /*
+        * If we come here, none of the zones inspected could be locked for
+        * reclaim. Try again, being more aggressive, that is, find the
+        * first zone that can be reclaimed regardless of its weitght.
+        */
+       list_for_each_entry(zone, zone_list, link) {
+               if (dmz_is_buf(zone)) {
+                       dzone = zone->bzone;
+                       if (dmz_is_rnd(dzone) && dzone->dev->dev_idx != idx)
                                continue;
-                       dzone = last;
                } else
                        dzone = zone;
                if (dmz_lock_zone_reclaim(dzone))
@@ -2006,7 +2026,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd,
 struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd,
                                         unsigned int dev_idx, bool idle)
 {
-       struct dm_zone *zone;
+       struct dm_zone *zone = NULL;
 
        /*
         * Search for a zone candidate to reclaim: 2 cases are possible.
@@ -2019,7 +2039,7 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd,
        dmz_lock_map(zmd);
        if (list_empty(&zmd->reserved_seq_zones_list))
                zone = dmz_get_seq_zone_for_reclaim(zmd, dev_idx);
-       else
+       if (!zone)
                zone = dmz_get_rnd_zone_for_reclaim(zmd, dev_idx, idle);
        dmz_unlock_map(zmd);
 
@@ -2197,8 +2217,15 @@ struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned int dev_idx,
 {
        struct list_head *list;
        struct dm_zone *zone;
-       int i = 0;
+       int i;
+
+       /* Schedule reclaim to ensure free zones are available */
+       if (!(flags & DMZ_ALLOC_RECLAIM)) {
+               for (i = 0; i < zmd->nr_devs; i++)
+                       dmz_schedule_reclaim(zmd->dev[i].reclaim);
+       }
 
+       i = 0;
 again:
        if (flags & DMZ_ALLOC_CACHE)
                list = &zmd->unmap_cache_list;
index 2261b4d..9c0ecc9 100644 (file)
@@ -377,6 +377,7 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc)
                        dmz_metadata_label(zmd), zrc->dev_idx);
                return -EBUSY;
        }
+       rzone = dzone;
 
        start = jiffies;
        if (dmz_is_cache(dzone) || dmz_is_rnd(dzone)) {
@@ -391,8 +392,6 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc)
                         */
                        ret = dmz_reclaim_rnd_data(zrc, dzone);
                }
-               rzone = dzone;
-
        } else {
                struct dm_zone *bzone = dzone->bzone;
                sector_t chunk_block = 0;
@@ -415,7 +414,6 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc)
                         * be later reclaimed.
                         */
                        ret = dmz_reclaim_seq_data(zrc, dzone);
-                       rzone = dzone;
                }
        }
 out:
@@ -458,6 +456,8 @@ static unsigned int dmz_reclaim_percentage(struct dmz_reclaim *zrc)
                nr_zones = dmz_nr_rnd_zones(zmd, zrc->dev_idx);
                nr_unmap = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx);
        }
+       if (nr_unmap <= 1)
+               return 0;
        return nr_unmap * 100 / nr_zones;
 }
 
@@ -503,7 +503,7 @@ static void dmz_reclaim_work(struct work_struct *work)
 {
        struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work);
        struct dmz_metadata *zmd = zrc->metadata;
-       unsigned int p_unmap, nr_unmap_rnd = 0, nr_rnd = 0;
+       unsigned int p_unmap;
        int ret;
 
        if (dmz_dev_is_dying(zmd))
@@ -529,9 +529,6 @@ static void dmz_reclaim_work(struct work_struct *work)
                zrc->kc_throttle.throttle = min(75U, 100U - p_unmap / 2);
        }
 
-       nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx);
-       nr_rnd = dmz_nr_rnd_zones(zmd, zrc->dev_idx);
-
        DMDEBUG("(%s/%u): Reclaim (%u): %s, %u%% free zones (%u/%u cache %u/%u random)",
                dmz_metadata_label(zmd), zrc->dev_idx,
                zrc->kc_throttle.throttle,
index a907a94..42aa513 100644 (file)
@@ -400,15 +400,7 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
                dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
        struct dmz_metadata *zmd = dmz->metadata;
        struct dm_zone *zone;
-       int i, ret;
-
-       /*
-        * Write may trigger a zone allocation. So make sure the
-        * allocation can succeed.
-        */
-       if (bio_op(bio) == REQ_OP_WRITE)
-               for (i = 0; i < dmz->nr_ddevs; i++)
-                       dmz_schedule_reclaim(dmz->dev[i].reclaim);
+       int ret;
 
        dmz_lock_metadata(zmd);
 
@@ -890,7 +882,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        }
 
        /* Set target (no write same support) */
-       ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata) << 9;
+       ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata);
        ti->num_flush_bios = 1;
        ti->num_discard_bios = 1;
        ti->num_write_zeroes_bios = 1;
index 109e81f..52449af 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/sched/mm.h>
 #include <linux/sched/signal.h>
 #include <linux/blkpg.h>
 #include <linux/bio.h>
@@ -654,28 +655,6 @@ static void free_tio(struct dm_target_io *tio)
        bio_put(&tio->clone);
 }
 
-static bool md_in_flight_bios(struct mapped_device *md)
-{
-       int cpu;
-       struct hd_struct *part = &dm_disk(md)->part0;
-       long sum = 0;
-
-       for_each_possible_cpu(cpu) {
-               sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
-               sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
-       }
-
-       return sum != 0;
-}
-
-static bool md_in_flight(struct mapped_device *md)
-{
-       if (queue_is_mq(md->queue))
-               return blk_mq_queue_inflight(md->queue);
-       else
-               return md_in_flight_bios(md);
-}
-
 u64 dm_start_time_ns_from_clone(struct bio *bio)
 {
        struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
@@ -1009,6 +988,7 @@ static void clone_endio(struct bio *bio)
        struct dm_io *io = tio->io;
        struct mapped_device *md = tio->io->md;
        dm_endio_fn endio = tio->ti->type->end_io;
+       struct bio *orig_bio = io->orig_bio;
 
        if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
                if (bio_op(bio) == REQ_OP_DISCARD &&
@@ -1022,6 +1002,18 @@ static void clone_endio(struct bio *bio)
                        disable_write_zeroes(md);
        }
 
+       /*
+        * For zone-append bios get offset in zone of the written
+        * sector and add that to the original bio sector pos.
+        */
+       if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) {
+               sector_t written_sector = bio->bi_iter.bi_sector;
+               struct request_queue *q = orig_bio->bi_disk->queue;
+               u64 mask = (u64)blk_queue_zone_sectors(q) - 1;
+
+               orig_bio->bi_iter.bi_sector += written_sector & mask;
+       }
+
        if (endio) {
                int r = endio(tio->ti, bio, &error);
                switch (r) {
@@ -1452,9 +1444,6 @@ static int __send_empty_flush(struct clone_info *ci)
        BUG_ON(bio_has_data(ci->bio));
        while ((ti = dm_table_get_target(ci->map, target_nr++)))
                __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
-
-       bio_disassociate_blkg(ci->bio);
-
        return 0;
 }
 
@@ -1642,6 +1631,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
                ci.bio = &flush_bio;
                ci.sector_count = 0;
                error = __send_empty_flush(&ci);
+               bio_uninit(ci.bio);
                /* dec_pending submits any data associated with flush */
        } else if (op_is_zone_mgmt(bio_op(bio))) {
                ci.bio = bio;
@@ -1716,6 +1706,7 @@ static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
                ci.bio = &flush_bio;
                ci.sector_count = 0;
                error = __send_empty_flush(&ci);
+               bio_uninit(ci.bio);
                /* dec_pending submits any data associated with flush */
        } else {
                struct dm_target_io *tio;
@@ -2457,15 +2448,29 @@ void dm_put(struct mapped_device *md)
 }
 EXPORT_SYMBOL_GPL(dm_put);
 
-static int dm_wait_for_completion(struct mapped_device *md, long task_state)
+static bool md_in_flight_bios(struct mapped_device *md)
+{
+       int cpu;
+       struct hd_struct *part = &dm_disk(md)->part0;
+       long sum = 0;
+
+       for_each_possible_cpu(cpu) {
+               sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
+               sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
+       }
+
+       return sum != 0;
+}
+
+static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state)
 {
        int r = 0;
        DEFINE_WAIT(wait);
 
-       while (1) {
+       while (true) {
                prepare_to_wait(&md->wait, &wait, task_state);
 
-               if (!md_in_flight(md))
+               if (!md_in_flight_bios(md))
                        break;
 
                if (signal_pending_state(task_state, current)) {
@@ -2480,6 +2485,28 @@ static int dm_wait_for_completion(struct mapped_device *md, long task_state)
        return r;
 }
 
+static int dm_wait_for_completion(struct mapped_device *md, long task_state)
+{
+       int r = 0;
+
+       if (!queue_is_mq(md->queue))
+               return dm_wait_for_bios_completion(md, task_state);
+
+       while (true) {
+               if (!blk_mq_queue_inflight(md->queue))
+                       break;
+
+               if (signal_pending_state(task_state, current)) {
+                       r = -EINTR;
+                       break;
+               }
+
+               msleep(5);
+       }
+
+       return r;
+}
+
 /*
  * Process the deferred bios
  */
@@ -2913,17 +2940,25 @@ EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
                       unsigned cookie)
 {
+       int r;
+       unsigned noio_flag;
        char udev_cookie[DM_COOKIE_LENGTH];
        char *envp[] = { udev_cookie, NULL };
 
+       noio_flag = memalloc_noio_save();
+
        if (!cookie)
-               return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
+               r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
        else {
                snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
                         DM_COOKIE_ENV_VAR_NAME, cookie);
-               return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
-                                         action, envp);
+               r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
+                                      action, envp);
        }
+
+       memalloc_noio_restore(noio_flag);
+
+       return r;
 }
 
 uint32_t dm_next_uevent_seq(struct mapped_device *md)
index a4ee6b8..b91e472 100644 (file)
@@ -39,8 +39,6 @@
  *     Troy Laramy <t-laramy@ti.com>
  */
 
-#include <asm/cacheflush.h>
-
 #include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/delay.h>
index 10c214b..1ac9aef 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
-#include <asm/cacheflush.h>
 
 #include <media/v4l2-dev.h>
 #include <media/v4l2-ioctl.h>
index 68aea22..5216487 100644 (file)
@@ -1324,13 +1324,13 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
                        return 0; /* fw doesn't need any host buffers */
 
                /* spin till we get enough memory */
-               while(host_page_buffer_sz > 0) {
-
-                       if((ioc->HostPageBuffer = pci_alloc_consistent(
-                           ioc->pcidev,
-                           host_page_buffer_sz,
-                           &ioc->HostPageBuffer_dma)) != NULL) {
-
+               while (host_page_buffer_sz > 0) {
+                       ioc->HostPageBuffer =
+                               dma_alloc_coherent(&ioc->pcidev->dev,
+                                               host_page_buffer_sz,
+                                               &ioc->HostPageBuffer_dma,
+                                               GFP_KERNEL);
+                       if (ioc->HostPageBuffer) {
                                dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
                                    "host_page_buffer @ %p, dma @ %x, sz=%d bytes\n",
                                    ioc->name, ioc->HostPageBuffer,
@@ -2741,8 +2741,8 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
                sz = ioc->alloc_sz;
                dexitprintk(ioc, printk(MYIOC_s_INFO_FMT "free  @ %p, sz=%d bytes\n",
                    ioc->name, ioc->alloc, ioc->alloc_sz));
-               pci_free_consistent(ioc->pcidev, sz,
-                               ioc->alloc, ioc->alloc_dma);
+               dma_free_coherent(&ioc->pcidev->dev, sz, ioc->alloc,
+                               ioc->alloc_dma);
                ioc->reply_frames = NULL;
                ioc->req_frames = NULL;
                ioc->alloc = NULL;
@@ -2751,8 +2751,8 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
 
        if (ioc->sense_buf_pool != NULL) {
                sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
-               pci_free_consistent(ioc->pcidev, sz,
-                               ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
+               dma_free_coherent(&ioc->pcidev->dev, sz, ioc->sense_buf_pool,
+                               ioc->sense_buf_pool_dma);
                ioc->sense_buf_pool = NULL;
                ioc->alloc_total -= sz;
        }
@@ -2802,7 +2802,7 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
                        "HostPageBuffer free  @ %p, sz=%d bytes\n",
                        ioc->name, ioc->HostPageBuffer,
                        ioc->HostPageBuffer_sz));
-               pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz,
+               dma_free_coherent(&ioc->pcidev->dev, ioc->HostPageBuffer_sz,
                    ioc->HostPageBuffer, ioc->HostPageBuffer_dma);
                ioc->HostPageBuffer = NULL;
                ioc->HostPageBuffer_sz = 0;
@@ -4497,7 +4497,8 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
                                ioc->name, sz, sz, num_chain));
 
                total_size += sz;
-               mem = pci_alloc_consistent(ioc->pcidev, total_size, &alloc_dma);
+               mem = dma_alloc_coherent(&ioc->pcidev->dev, total_size,
+                               &alloc_dma, GFP_KERNEL);
                if (mem == NULL) {
                        printk(MYIOC_s_ERR_FMT "Unable to allocate Reply, Request, Chain Buffers!\n",
                                ioc->name);
@@ -4574,8 +4575,8 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
                spin_unlock_irqrestore(&ioc->FreeQlock, flags);
 
                sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
-               ioc->sense_buf_pool =
-                       pci_alloc_consistent(ioc->pcidev, sz, &ioc->sense_buf_pool_dma);
+               ioc->sense_buf_pool = dma_alloc_coherent(&ioc->pcidev->dev, sz,
+                               &ioc->sense_buf_pool_dma, GFP_KERNEL);
                if (ioc->sense_buf_pool == NULL) {
                        printk(MYIOC_s_ERR_FMT "Unable to allocate Sense Buffers!\n",
                                ioc->name);
@@ -4613,18 +4614,16 @@ out_fail:
 
        if (ioc->alloc != NULL) {
                sz = ioc->alloc_sz;
-               pci_free_consistent(ioc->pcidev,
-                               sz,
-                               ioc->alloc, ioc->alloc_dma);
+               dma_free_coherent(&ioc->pcidev->dev, sz, ioc->alloc,
+                               ioc->alloc_dma);
                ioc->reply_frames = NULL;
                ioc->req_frames = NULL;
                ioc->alloc_total -= sz;
        }
        if (ioc->sense_buf_pool != NULL) {
                sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
-               pci_free_consistent(ioc->pcidev,
-                               sz,
-                               ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
+               dma_free_coherent(&ioc->pcidev->dev, sz, ioc->sense_buf_pool,
+                               ioc->sense_buf_pool_dma);
                ioc->sense_buf_pool = NULL;
        }
 
index f0737c5..1491561 100644 (file)
@@ -118,8 +118,6 @@ int                 mptscsih_suspend(struct pci_dev *pdev, pm_message_t state);
 int            mptscsih_resume(struct pci_dev *pdev);
 #endif
 
-#define SNS_LEN(scp)   SCSI_SENSE_BUFFERSIZE
-
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /*
@@ -2422,7 +2420,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
                /* Copy the sense received into the scsi command block. */
                req_index = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
                sense_data = ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC));
-               memcpy(sc->sense_buffer, sense_data, SNS_LEN(sc));
+               memcpy(sc->sense_buffer, sense_data, MPT_SENSE_BUFFER_ALLOC);
 
                /* Log SMART data (asc = 0x5D, non-IM case only) if required.
                 */
index 02998d4..74cee7c 100644 (file)
@@ -142,10 +142,11 @@ static int ioc3_irq_domain_setup(struct ioc3_priv_data *ipd, int irq)
                goto err;
 
        domain = irq_domain_create_linear(fn, 24, &ioc3_irq_domain_ops, ipd);
-       if (!domain)
+       if (!domain) {
+               irq_domain_free_fwnode(fn);
                goto err;
+       }
 
-       irq_domain_free_fwnode(fn);
        ipd->domain = domain;
 
        irq_set_chained_handler_and_data(irq, ioc3_irq_handler, domain);
index ab4144e..d6cd553 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/io.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
 #include <linux/atmel-ssc.h>
 #include <linux/slab.h>
 #include <linux/module.h>
@@ -20,7 +20,7 @@
 #include "../../sound/soc/atmel/atmel_ssc_dai.h"
 
 /* Serialize access to ssc_list and user count */
-static DEFINE_SPINLOCK(user_lock);
+static DEFINE_MUTEX(user_lock);
 static LIST_HEAD(ssc_list);
 
 struct ssc_device *ssc_request(unsigned int ssc_num)
@@ -28,7 +28,7 @@ struct ssc_device *ssc_request(unsigned int ssc_num)
        int ssc_valid = 0;
        struct ssc_device *ssc;
 
-       spin_lock(&user_lock);
+       mutex_lock(&user_lock);
        list_for_each_entry(ssc, &ssc_list, list) {
                if (ssc->pdev->dev.of_node) {
                        if (of_alias_get_id(ssc->pdev->dev.of_node, "ssc")
@@ -44,18 +44,18 @@ struct ssc_device *ssc_request(unsigned int ssc_num)
        }
 
        if (!ssc_valid) {
-               spin_unlock(&user_lock);
+               mutex_unlock(&user_lock);
                pr_err("ssc: ssc%d platform device is missing\n", ssc_num);
                return ERR_PTR(-ENODEV);
        }
 
        if (ssc->user) {
-               spin_unlock(&user_lock);
+               mutex_unlock(&user_lock);
                dev_dbg(&ssc->pdev->dev, "module busy\n");
                return ERR_PTR(-EBUSY);
        }
        ssc->user++;
-       spin_unlock(&user_lock);
+       mutex_unlock(&user_lock);
 
        clk_prepare(ssc->clk);
 
@@ -67,14 +67,14 @@ void ssc_free(struct ssc_device *ssc)
 {
        bool disable_clk = true;
 
-       spin_lock(&user_lock);
+       mutex_lock(&user_lock);
        if (ssc->user)
                ssc->user--;
        else {
                disable_clk = false;
                dev_dbg(&ssc->pdev->dev, "device already free\n");
        }
-       spin_unlock(&user_lock);
+       mutex_unlock(&user_lock);
 
        if (disable_clk)
                clk_unprepare(ssc->clk);
@@ -237,9 +237,9 @@ static int ssc_probe(struct platform_device *pdev)
                return -ENXIO;
        }
 
-       spin_lock(&user_lock);
+       mutex_lock(&user_lock);
        list_add_tail(&ssc->list, &ssc_list);
-       spin_unlock(&user_lock);
+       mutex_unlock(&user_lock);
 
        platform_set_drvdata(pdev, ssc);
 
@@ -258,9 +258,9 @@ static int ssc_remove(struct platform_device *pdev)
 
        ssc_sound_dai_remove(ssc);
 
-       spin_lock(&user_lock);
+       mutex_lock(&user_lock);
        list_del(&ssc->list);
-       spin_unlock(&user_lock);
+       mutex_unlock(&user_lock);
 
        return 0;
 }
index f82974a..b0f62cb 100644 (file)
@@ -62,6 +62,12 @@ static void hl_fence_release(struct dma_fence *fence)
                container_of(fence, struct hl_cs_compl, base_fence);
        struct hl_device *hdev = hl_cs_cmpl->hdev;
 
+       /* EBUSY means the CS was never submitted and hence we don't have
+        * an attached hw_sob object that we should handle here
+        */
+       if (fence->error == -EBUSY)
+               goto free;
+
        if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
                        (hl_cs_cmpl->type == CS_TYPE_WAIT)) {
 
@@ -92,6 +98,7 @@ static void hl_fence_release(struct dma_fence *fence)
                kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset);
        }
 
+free:
        kfree_rcu(hl_cs_cmpl, base_fence.rcu);
 }
 
@@ -328,10 +335,16 @@ static void cs_do_release(struct kref *ref)
 
        hl_ctx_put(cs->ctx);
 
+       /* We need to mark an error for not submitted because in that case
+        * the dma fence release flow is different. Mainly, we don't need
+        * to handle hw_sob for signal/wait
+        */
        if (cs->timedout)
                dma_fence_set_error(cs->fence, -ETIMEDOUT);
        else if (cs->aborted)
                dma_fence_set_error(cs->fence, -EIO);
+       else if (!cs->submitted)
+               dma_fence_set_error(cs->fence, -EBUSY);
 
        dma_fence_signal(cs->fence);
        dma_fence_put(cs->fence);
index 3c8dcdf..fc4372c 100644 (file)
@@ -480,7 +480,7 @@ out:
        return 0;
 }
 
-static ssize_t mmu_write(struct file *file, const char __user *buf,
+static ssize_t mmu_asid_va_write(struct file *file, const char __user *buf,
                size_t count, loff_t *f_pos)
 {
        struct seq_file *s = file->private_data;
@@ -1125,7 +1125,7 @@ static const struct hl_info_list hl_debugfs_list[] = {
        {"command_submission_jobs", command_submission_jobs_show, NULL},
        {"userptr", userptr_show, NULL},
        {"vm", vm_show, NULL},
-       {"mmu", mmu_show, mmu_write},
+       {"mmu", mmu_show, mmu_asid_va_write},
        {"engines", engines_show, NULL}
 };
 
index 61f88e9..834470d 100644 (file)
@@ -96,7 +96,7 @@
 
 #define GAUDI_NUM_OF_QM_ARB_ERR_CAUSE  3
 
-#define GAUDI_ARB_WDT_TIMEOUT          0x400000
+#define GAUDI_ARB_WDT_TIMEOUT          0x1000000
 
 static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
                "gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3",
@@ -1893,6 +1893,8 @@ static void gaudi_init_pci_dma_qman(struct hl_device *hdev, int dma_id,
        WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 + q_off, so_base_ws_lo);
        WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 + q_off, so_base_ws_hi);
 
+       WREG32(mmDMA0_QM_CP_BARRIER_CFG_0 + q_off, 0x100);
+
        /* The following configuration is needed only once per QMAN */
        if (qman_id == 0) {
                /* Configure RAZWI IRQ */
@@ -2725,6 +2727,12 @@ static int gaudi_mmu_init(struct hl_device *hdev)
        WREG32(mmSTLB_HOP_CONFIGURATION,
                        hdev->mmu_huge_page_opt ? 0x30440 : 0x40440);
 
+       /*
+        * The H/W expects the first PI after init to be 1. After wraparound
+        * we'll write 0.
+        */
+       gaudi->mmu_cache_inv_pi = 1;
+
        gaudi->hw_cap_initialized |= HW_CAP_MMU;
 
        return 0;
@@ -3790,6 +3798,25 @@ static int gaudi_validate_dma_pkt_no_mmu(struct hl_device *hdev,
                                                src_in_host);
 }
 
+static int gaudi_validate_load_and_exe_pkt(struct hl_device *hdev,
+                                       struct hl_cs_parser *parser,
+                                       struct packet_load_and_exe *user_pkt)
+{
+       u32 cfg;
+
+       cfg = le32_to_cpu(user_pkt->cfg);
+
+       if (cfg & GAUDI_PKT_LOAD_AND_EXE_CFG_DST_MASK) {
+               dev_err(hdev->dev,
+                       "User not allowed to use Load and Execute\n");
+               return -EPERM;
+       }
+
+       parser->patched_cb_size += sizeof(struct packet_load_and_exe);
+
+       return 0;
+}
+
 static int gaudi_validate_cb(struct hl_device *hdev,
                        struct hl_cs_parser *parser, bool is_mmu)
 {
@@ -3838,6 +3865,11 @@ static int gaudi_validate_cb(struct hl_device *hdev,
                        rc = -EPERM;
                        break;
 
+               case PACKET_LOAD_AND_EXE:
+                       rc = gaudi_validate_load_and_exe_pkt(hdev, parser,
+                               (struct packet_load_and_exe *) user_pkt);
+                       break;
+
                case PACKET_LIN_DMA:
                        parser->contains_dma_pkt = true;
                        if (is_mmu)
@@ -3855,7 +3887,6 @@ static int gaudi_validate_cb(struct hl_device *hdev,
                case PACKET_FENCE:
                case PACKET_NOP:
                case PACKET_ARB_POINT:
-               case PACKET_LOAD_AND_EXE:
                        parser->patched_cb_size += pkt_size;
                        break;
 
@@ -5994,6 +6025,8 @@ static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
        mutex_lock(&hdev->mmu_cache_lock);
 
        /* L0 & L1 invalidation */
+       WREG32(mmSTLB_INV_PS, 3);
+       WREG32(mmSTLB_CACHE_INV, gaudi->mmu_cache_inv_pi++);
        WREG32(mmSTLB_INV_PS, 2);
 
        rc = hl_poll_timeout(
index a46530d..41a8d9b 100644 (file)
@@ -229,6 +229,8 @@ struct gaudi_internal_qman_info {
  * @multi_msi_mode: whether we are working in multi MSI single MSI mode.
  *                  Multi MSI is possible only with IOMMU enabled.
  * @ext_queue_idx: helper index for external queues initialization.
+ * @mmu_cache_inv_pi: PI for MMU cache invalidation flow. The H/W expects an
+ *                    8-bit value so use u8.
  */
 struct gaudi_device {
        int (*armcp_info_get)(struct hl_device *hdev);
@@ -248,6 +250,7 @@ struct gaudi_device {
        u32                             hw_cap_initialized;
        u8                              multi_msi_mode;
        u8                              ext_queue_idx;
+       u8                              mmu_cache_inv_pi;
 };
 
 void gaudi_init_security(struct hl_device *hdev);
index 9a5800b..0f0cd06 100644 (file)
@@ -197,6 +197,9 @@ struct packet_wait {
        __le32 ctl;
 };
 
+#define GAUDI_PKT_LOAD_AND_EXE_CFG_DST_SHIFT   0
+#define GAUDI_PKT_LOAD_AND_EXE_CFG_DST_MASK    0x00000001
+
 struct packet_load_and_exe {
        __le32 cfg;
        __le32 ctl;
index 8d468e0..f476dbc 100644 (file)
@@ -745,9 +745,8 @@ static int mei_cl_device_remove(struct device *dev)
 
        mei_cl_bus_module_put(cldev);
        module_put(THIS_MODULE);
-       dev->driver = NULL;
-       return ret;
 
+       return ret;
 }
 
 static ssize_t name_show(struct device *dev, struct device_attribute *a,
index 9392934..7becfc7 100644 (file)
@@ -94,6 +94,7 @@
 #define MEI_DEV_ID_JSP_N      0x4DE0  /* Jasper Lake Point N */
 
 #define MEI_DEV_ID_TGP_LP     0xA0E0  /* Tiger Lake Point LP */
+#define MEI_DEV_ID_TGP_H      0x43E0  /* Tiger Lake Point H */
 
 #define MEI_DEV_ID_MCC        0x4B70  /* Mule Creek Canyon (EHL) */
 #define MEI_DEV_ID_MCC_4      0x4B75  /* Mule Creek Canyon 4 (EHL) */
 #  define PCI_CFG_HFS_1_D0I3_MSK     0x80000000
 #define PCI_CFG_HFS_2         0x48
 #define PCI_CFG_HFS_3         0x60
+#  define PCI_CFG_HFS_3_FW_SKU_MSK   0x00000070
+#  define PCI_CFG_HFS_3_FW_SKU_SPS   0x00000060
 #define PCI_CFG_HFS_4         0x64
 #define PCI_CFG_HFS_5         0x68
 #define PCI_CFG_HFS_6         0x6C
index f620442..7649710 100644 (file)
@@ -1366,7 +1366,7 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev)
 #define MEI_CFG_FW_NM                           \
        .quirk_probe = mei_me_fw_type_nm
 
-static bool mei_me_fw_type_sps(struct pci_dev *pdev)
+static bool mei_me_fw_type_sps_4(struct pci_dev *pdev)
 {
        u32 reg;
        unsigned int devfn;
@@ -1382,7 +1382,36 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
        return (reg & 0xf0000) == 0xf0000;
 }
 
-#define MEI_CFG_FW_SPS                           \
+#define MEI_CFG_FW_SPS_4                          \
+       .quirk_probe = mei_me_fw_type_sps_4
+
+/**
+ * mei_me_fw_sku_sps() - check for sps sku
+ *
+ * Read ME FW Status register to check for SPS Firmware.
+ * The SPS FW is only signaled in pci function 0
+ *
+ * @pdev: pci device
+ *
+ * Return: true in case of SPS firmware
+ */
+static bool mei_me_fw_type_sps(struct pci_dev *pdev)
+{
+       u32 reg;
+       u32 fw_type;
+       unsigned int devfn;
+
+       devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
+       pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_3, &reg);
+       trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_3", PCI_CFG_HFS_3, reg);
+       fw_type = (reg & PCI_CFG_HFS_3_FW_SKU_MSK);
+
+       dev_dbg(&pdev->dev, "fw type is %d\n", fw_type);
+
+       return fw_type == PCI_CFG_HFS_3_FW_SKU_SPS;
+}
+
+#define MEI_CFG_FW_SPS                          \
        .quirk_probe = mei_me_fw_type_sps
 
 #define MEI_CFG_FW_VER_SUPP                     \
@@ -1452,10 +1481,17 @@ static const struct mei_cfg mei_me_pch8_cfg = {
 };
 
 /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
-static const struct mei_cfg mei_me_pch8_sps_cfg = {
+static const struct mei_cfg mei_me_pch8_sps_4_cfg = {
        MEI_CFG_PCH8_HFS,
        MEI_CFG_FW_VER_SUPP,
-       MEI_CFG_FW_SPS,
+       MEI_CFG_FW_SPS_4,
+};
+
+/* LBG with quirk for SPS (4.0) Firmware exclusion */
+static const struct mei_cfg mei_me_pch12_sps_4_cfg = {
+       MEI_CFG_PCH8_HFS,
+       MEI_CFG_FW_VER_SUPP,
+       MEI_CFG_FW_SPS_4,
 };
 
 /* Cannon Lake and newer devices */
@@ -1465,10 +1501,20 @@ static const struct mei_cfg mei_me_pch12_cfg = {
        MEI_CFG_DMA_128,
 };
 
-/* LBG with quirk for SPS Firmware exclusion */
+/* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion */
 static const struct mei_cfg mei_me_pch12_sps_cfg = {
        MEI_CFG_PCH8_HFS,
        MEI_CFG_FW_VER_SUPP,
+       MEI_CFG_DMA_128,
+       MEI_CFG_FW_SPS,
+};
+
+/* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion
+ * w/o DMA support
+ */
+static const struct mei_cfg mei_me_pch12_nodma_sps_cfg = {
+       MEI_CFG_PCH8_HFS,
+       MEI_CFG_FW_VER_SUPP,
        MEI_CFG_FW_SPS,
 };
 
@@ -1480,6 +1526,15 @@ static const struct mei_cfg mei_me_pch15_cfg = {
        MEI_CFG_TRC,
 };
 
+/* Tiger Lake with quirk for SPS 5.0 and newer Firmware exclusion */
+static const struct mei_cfg mei_me_pch15_sps_cfg = {
+       MEI_CFG_PCH8_HFS,
+       MEI_CFG_FW_VER_SUPP,
+       MEI_CFG_DMA_128,
+       MEI_CFG_TRC,
+       MEI_CFG_FW_SPS,
+};
+
 /*
  * mei_cfg_list - A list of platform platform specific configurations.
  * Note: has to be synchronized with  enum mei_cfg_idx.
@@ -1492,10 +1547,13 @@ static const struct mei_cfg *const mei_cfg_list[] = {
        [MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg,
        [MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
        [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
-       [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
+       [MEI_ME_PCH8_SPS_4_CFG] = &mei_me_pch8_sps_4_cfg,
        [MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
+       [MEI_ME_PCH12_SPS_4_CFG] = &mei_me_pch12_sps_4_cfg,
        [MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg,
+       [MEI_ME_PCH12_SPS_NODMA_CFG] = &mei_me_pch12_nodma_sps_cfg,
        [MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
+       [MEI_ME_PCH15_SPS_CFG] = &mei_me_pch15_sps_cfg,
 };
 
 const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
index b6b94e2..6a89736 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2012-2020, Intel Corporation. All rights reserved.
  * Intel Management Engine Interface (Intel MEI) Linux driver
  */
 
@@ -76,14 +76,20 @@ struct mei_me_hw {
  *                         with quirk for Node Manager exclusion.
  * @MEI_ME_PCH8_CFG:       Platform Controller Hub Gen8 and newer
  *                         client platforms.
- * @MEI_ME_PCH8_SPS_CFG:   Platform Controller Hub Gen8 and newer
+ * @MEI_ME_PCH8_SPS_4_CFG: Platform Controller Hub Gen8 and newer
  *                         servers platforms with quirk for
  *                         SPS firmware exclusion.
  * @MEI_ME_PCH12_CFG:      Platform Controller Hub Gen12 and newer
- * @MEI_ME_PCH12_SPS_CFG:  Platform Controller Hub Gen12 and newer
+ * @MEI_ME_PCH12_SPS_4_CFG:Platform Controller Hub Gen12 up to 4.0
+ *                         servers platforms with quirk for
+ *                         SPS firmware exclusion.
+ * @MEI_ME_PCH12_SPS_CFG:  Platform Controller Hub Gen12 5.0 and newer
  *                         servers platforms with quirk for
  *                         SPS firmware exclusion.
  * @MEI_ME_PCH15_CFG:      Platform Controller Hub Gen15 and newer
+ * @MEI_ME_PCH15_SPS_CFG:  Platform Controller Hub Gen15 and newer
+ *                         servers platforms with quirk for
+ *                         SPS firmware exclusion.
  * @MEI_ME_NUM_CFG:        Upper Sentinel.
  */
 enum mei_cfg_idx {
@@ -94,10 +100,13 @@ enum mei_cfg_idx {
        MEI_ME_PCH7_CFG,
        MEI_ME_PCH_CPT_PBG_CFG,
        MEI_ME_PCH8_CFG,
-       MEI_ME_PCH8_SPS_CFG,
+       MEI_ME_PCH8_SPS_4_CFG,
        MEI_ME_PCH12_CFG,
+       MEI_ME_PCH12_SPS_4_CFG,
        MEI_ME_PCH12_SPS_CFG,
+       MEI_ME_PCH12_SPS_NODMA_CFG,
        MEI_ME_PCH15_CFG,
+       MEI_ME_PCH15_SPS_CFG,
        MEI_ME_NUM_CFG,
 };
 
index 71f795b..2a3f2fd 100644 (file)
@@ -59,18 +59,18 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
        {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_4_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_4_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_4_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, MEI_ME_PCH8_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, MEI_ME_PCH8_CFG)},
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, MEI_ME_PCH8_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_4_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_4_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_4_CFG)},
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
@@ -84,8 +84,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_3, MEI_ME_PCH8_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH8_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_SPS_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH12_SPS_NODMA_CFG)},
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
@@ -96,6 +96,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
        {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_H, MEI_ME_PCH15_SPS_CFG)},
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_JSP_N, MEI_ME_PCH15_CFG)},
 
index 7eb38d7..08a3b1c 100644 (file)
@@ -1146,9 +1146,11 @@ static int meson_mmc_probe(struct platform_device *pdev)
 
        mmc->caps |= MMC_CAP_CMD23;
        if (host->dram_access_quirk) {
+               /* Limit segments to 1 due to low available sram memory */
+               mmc->max_segs = 1;
                /* Limit to the available sram memory */
-               mmc->max_segs = SD_EMMC_SRAM_DATA_BUF_LEN / mmc->max_blk_size;
-               mmc->max_blk_count = mmc->max_segs;
+               mmc->max_blk_count = SD_EMMC_SRAM_DATA_BUF_LEN /
+                                    mmc->max_blk_size;
        } else {
                mmc->max_blk_count = CMD_CFG_LENGTH_MASK;
                mmc->max_segs = SD_EMMC_DESC_BUF_LEN /
index 5e20c09..df43f42 100644 (file)
@@ -689,7 +689,7 @@ MODULE_DEVICE_TABLE(of, owl_mmc_of_match);
 static struct platform_driver owl_mmc_driver = {
        .driver = {
                .name   = "owl_mmc",
-               .of_match_table = of_match_ptr(owl_mmc_of_match),
+               .of_match_table = owl_mmc_of_match,
        },
        .probe          = owl_mmc_probe,
        .remove         = owl_mmc_remove,
index b277dd7..c0d58e9 100644 (file)
@@ -618,8 +618,9 @@ static int msm_init_cm_dll(struct sdhci_host *host)
        config &= ~CORE_CLK_PWRSAVE;
        writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
 
-       config = msm_host->dll_config;
-       writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
+       if (msm_host->dll_config)
+               writel_relaxed(msm_host->dll_config,
+                               host->ioaddr + msm_offset->core_dll_config);
 
        if (msm_host->use_14lpp_dll_reset) {
                config = readl_relaxed(host->ioaddr +
index 76d832a..7d93056 100644 (file)
@@ -1273,8 +1273,8 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
                return -EROFS;
        if (!len)
                return 0;
-       if (!mtd->oops_panic_write)
-               mtd->oops_panic_write = true;
+       if (!master->oops_panic_write)
+               master->oops_panic_write = true;
 
        return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
                                    retlen, buf);
index 0a5cb77..f5a53aa 100644 (file)
@@ -1761,7 +1761,7 @@ static void ns_switch_state(struct nandsim *ns)
 
                NS_DBG("switch_state: operation is unknown, try to find it\n");
 
-               if (!ns_find_operation(ns, 0))
+               if (ns_find_operation(ns, 0))
                        return;
 
                if ((ns->state & ACTION_MASK) &&
index 94bfba9..2925547 100644 (file)
@@ -224,7 +224,7 @@ static int xway_nand_remove(struct platform_device *pdev)
        struct nand_chip *chip = &data->chip;
        int ret;
 
-       ret = mtd_device_unregister(mtd);
+       ret = mtd_device_unregister(nand_to_mtd(chip));
        WARN_ON(ret);
        nand_cleanup(chip);
 
index 5d3c691..3dd46cd 100644 (file)
@@ -572,6 +572,9 @@ static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf,
        if (data[IFLA_BAREUDP_SRCPORT_MIN])
                conf->sport_min =  nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]);
 
+       if (data[IFLA_BAREUDP_MULTIPROTO_MODE])
+               conf->multi_proto_mode = true;
+
        return 0;
 }
 
index c7ac63f..946e41f 100644 (file)
@@ -1147,6 +1147,8 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
        set_bit(0, priv->cfp.used);
        set_bit(0, priv->cfp.unique);
 
+       /* Balance of_node_put() done by of_find_node_by_name() */
+       of_node_get(dn);
        ports = of_find_node_by_name(dn, "ports");
        if (ports) {
                bcm_sf2_identify_ports(priv, ports);
index 47d65b7..7c17b0f 100644 (file)
@@ -1268,6 +1268,9 @@ static int ksz8795_switch_init(struct ksz_device *dev)
                        return -ENOMEM;
        }
 
+       /* set the real number of ports */
+       dev->ds->num_ports = dev->port_cnt;
+
        return 0;
 }
 
index 9a51b8a..8d15c30 100644 (file)
@@ -1588,6 +1588,9 @@ static int ksz9477_switch_init(struct ksz_device *dev)
                        return -ENOMEM;
        }
 
+       /* set the real number of ports */
+       dev->ds->num_ports = dev->port_cnt;
+
        return 0;
 }
 
index 7d050fa..7951f52 100644 (file)
@@ -79,6 +79,7 @@ MODULE_DEVICE_TABLE(i2c, ksz9477_i2c_id);
 static const struct of_device_id ksz9477_dt_ids[] = {
        { .compatible = "microchip,ksz9477" },
        { .compatible = "microchip,ksz9897" },
+       { .compatible = "microchip,ksz9893" },
        { .compatible = "microchip,ksz9567" },
        {},
 };
index bdfd6c4..af35651 100644 (file)
@@ -7,6 +7,165 @@
 
 #define SJA1105_SIZE_VL_STATUS                 8
 
+/* Insert into the global gate list, sorted by gate action time. */
+static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg,
+                                    struct sja1105_rule *rule,
+                                    u8 gate_state, s64 entry_time,
+                                    struct netlink_ext_ack *extack)
+{
+       struct sja1105_gate_entry *e;
+       int rc;
+
+       e = kzalloc(sizeof(*e), GFP_KERNEL);
+       if (!e)
+               return -ENOMEM;
+
+       e->rule = rule;
+       e->gate_state = gate_state;
+       e->interval = entry_time;
+
+       if (list_empty(&gating_cfg->entries)) {
+               list_add(&e->list, &gating_cfg->entries);
+       } else {
+               struct sja1105_gate_entry *p;
+
+               list_for_each_entry(p, &gating_cfg->entries, list) {
+                       if (p->interval == e->interval) {
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "Gate conflict");
+                               rc = -EBUSY;
+                               goto err;
+                       }
+
+                       if (e->interval < p->interval)
+                               break;
+               }
+               list_add(&e->list, p->list.prev);
+       }
+
+       gating_cfg->num_entries++;
+
+       return 0;
+err:
+       kfree(e);
+       return rc;
+}
+
+/* The gate entries contain absolute times in their e->interval field. Convert
+ * that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5").
+ */
+static void
+sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg,
+                                   u64 cycle_time)
+{
+       struct sja1105_gate_entry *last_e;
+       struct sja1105_gate_entry *e;
+       struct list_head *prev;
+
+       list_for_each_entry(e, &gating_cfg->entries, list) {
+               struct sja1105_gate_entry *p;
+
+               prev = e->list.prev;
+
+               if (prev == &gating_cfg->entries)
+                       continue;
+
+               p = list_entry(prev, struct sja1105_gate_entry, list);
+               p->interval = e->interval - p->interval;
+       }
+       last_e = list_last_entry(&gating_cfg->entries,
+                                struct sja1105_gate_entry, list);
+       last_e->interval = cycle_time - last_e->interval;
+}
+
+static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg)
+{
+       struct sja1105_gate_entry *e, *n;
+
+       list_for_each_entry_safe(e, n, &gating_cfg->entries, list) {
+               list_del(&e->list);
+               kfree(e);
+       }
+}
+
+static int sja1105_compose_gating_subschedule(struct sja1105_private *priv,
+                                             struct netlink_ext_ack *extack)
+{
+       struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
+       struct sja1105_rule *rule;
+       s64 max_cycle_time = 0;
+       s64 its_base_time = 0;
+       int i, rc = 0;
+
+       sja1105_free_gating_config(gating_cfg);
+
+       list_for_each_entry(rule, &priv->flow_block.rules, list) {
+               if (rule->type != SJA1105_RULE_VL)
+                       continue;
+               if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+                       continue;
+
+               if (max_cycle_time < rule->vl.cycle_time) {
+                       max_cycle_time = rule->vl.cycle_time;
+                       its_base_time = rule->vl.base_time;
+               }
+       }
+
+       if (!max_cycle_time)
+               return 0;
+
+       dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n",
+               max_cycle_time, its_base_time);
+
+       gating_cfg->base_time = its_base_time;
+       gating_cfg->cycle_time = max_cycle_time;
+       gating_cfg->num_entries = 0;
+
+       list_for_each_entry(rule, &priv->flow_block.rules, list) {
+               s64 time;
+               s64 rbt;
+
+               if (rule->type != SJA1105_RULE_VL)
+                       continue;
+               if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+                       continue;
+
+               /* Calculate the difference between this gating schedule's
+                * base time, and the base time of the gating schedule with the
+                * longest cycle time. We call it the relative base time (rbt).
+                */
+               rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time,
+                                      its_base_time);
+               rbt -= its_base_time;
+
+               time = rbt;
+
+               for (i = 0; i < rule->vl.num_entries; i++) {
+                       u8 gate_state = rule->vl.entries[i].gate_state;
+                       s64 entry_time = time;
+
+                       while (entry_time < max_cycle_time) {
+                               rc = sja1105_insert_gate_entry(gating_cfg, rule,
+                                                              gate_state,
+                                                              entry_time,
+                                                              extack);
+                               if (rc)
+                                       goto err;
+
+                               entry_time += rule->vl.cycle_time;
+                       }
+                       time += rule->vl.entries[i].interval;
+               }
+       }
+
+       sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time);
+
+       return 0;
+err:
+       sja1105_free_gating_config(gating_cfg);
+       return rc;
+}
+
 /* The switch flow classification core implements TTEthernet, which 'thinks' in
  * terms of Virtual Links (VL), a concept borrowed from ARINC 664 part 7.
  * However it also has one other operating mode (VLLUPFORMAT=0) where it acts
@@ -342,7 +501,9 @@ int sja1105_vl_redirect(struct sja1105_private *priv, int port,
                NL_SET_ERR_MSG_MOD(extack,
                                   "Can only redirect based on DMAC");
                return -EOPNOTSUPP;
-       } else if (key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+       } else if ((priv->vlan_state == SJA1105_VLAN_BEST_EFFORT ||
+                   priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) &&
+                  key->type != SJA1105_KEY_VLAN_AWARE_VL) {
                NL_SET_ERR_MSG_MOD(extack,
                                   "Can only redirect based on {DMAC, VID, PCP}");
                return -EOPNOTSUPP;
@@ -388,171 +549,19 @@ int sja1105_vl_delete(struct sja1105_private *priv, int port,
                kfree(rule);
        }
 
-       rc = sja1105_init_virtual_links(priv, extack);
+       rc = sja1105_compose_gating_subschedule(priv, extack);
        if (rc)
                return rc;
 
-       return sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
-}
-
-/* Insert into the global gate list, sorted by gate action time. */
-static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg,
-                                    struct sja1105_rule *rule,
-                                    u8 gate_state, s64 entry_time,
-                                    struct netlink_ext_ack *extack)
-{
-       struct sja1105_gate_entry *e;
-       int rc;
-
-       e = kzalloc(sizeof(*e), GFP_KERNEL);
-       if (!e)
-               return -ENOMEM;
-
-       e->rule = rule;
-       e->gate_state = gate_state;
-       e->interval = entry_time;
-
-       if (list_empty(&gating_cfg->entries)) {
-               list_add(&e->list, &gating_cfg->entries);
-       } else {
-               struct sja1105_gate_entry *p;
-
-               list_for_each_entry(p, &gating_cfg->entries, list) {
-                       if (p->interval == e->interval) {
-                               NL_SET_ERR_MSG_MOD(extack,
-                                                  "Gate conflict");
-                               rc = -EBUSY;
-                               goto err;
-                       }
-
-                       if (e->interval < p->interval)
-                               break;
-               }
-               list_add(&e->list, p->list.prev);
-       }
-
-       gating_cfg->num_entries++;
-
-       return 0;
-err:
-       kfree(e);
-       return rc;
-}
-
-/* The gate entries contain absolute times in their e->interval field. Convert
- * that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5").
- */
-static void
-sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg,
-                                   u64 cycle_time)
-{
-       struct sja1105_gate_entry *last_e;
-       struct sja1105_gate_entry *e;
-       struct list_head *prev;
-
-       list_for_each_entry(e, &gating_cfg->entries, list) {
-               struct sja1105_gate_entry *p;
-
-               prev = e->list.prev;
-
-               if (prev == &gating_cfg->entries)
-                       continue;
-
-               p = list_entry(prev, struct sja1105_gate_entry, list);
-               p->interval = e->interval - p->interval;
-       }
-       last_e = list_last_entry(&gating_cfg->entries,
-                                struct sja1105_gate_entry, list);
-       if (last_e->list.prev != &gating_cfg->entries)
-               last_e->interval = cycle_time - last_e->interval;
-}
-
-static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg)
-{
-       struct sja1105_gate_entry *e, *n;
-
-       list_for_each_entry_safe(e, n, &gating_cfg->entries, list) {
-               list_del(&e->list);
-               kfree(e);
-       }
-}
-
-static int sja1105_compose_gating_subschedule(struct sja1105_private *priv,
-                                             struct netlink_ext_ack *extack)
-{
-       struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
-       struct sja1105_rule *rule;
-       s64 max_cycle_time = 0;
-       s64 its_base_time = 0;
-       int i, rc = 0;
-
-       list_for_each_entry(rule, &priv->flow_block.rules, list) {
-               if (rule->type != SJA1105_RULE_VL)
-                       continue;
-               if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
-                       continue;
-
-               if (max_cycle_time < rule->vl.cycle_time) {
-                       max_cycle_time = rule->vl.cycle_time;
-                       its_base_time = rule->vl.base_time;
-               }
-       }
-
-       if (!max_cycle_time)
-               return 0;
-
-       dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n",
-               max_cycle_time, its_base_time);
-
-       sja1105_free_gating_config(gating_cfg);
-
-       gating_cfg->base_time = its_base_time;
-       gating_cfg->cycle_time = max_cycle_time;
-       gating_cfg->num_entries = 0;
-
-       list_for_each_entry(rule, &priv->flow_block.rules, list) {
-               s64 time;
-               s64 rbt;
-
-               if (rule->type != SJA1105_RULE_VL)
-                       continue;
-               if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
-                       continue;
-
-               /* Calculate the difference between this gating schedule's
-                * base time, and the base time of the gating schedule with the
-                * longest cycle time. We call it the relative base time (rbt).
-                */
-               rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time,
-                                      its_base_time);
-               rbt -= its_base_time;
-
-               time = rbt;
-
-               for (i = 0; i < rule->vl.num_entries; i++) {
-                       u8 gate_state = rule->vl.entries[i].gate_state;
-                       s64 entry_time = time;
-
-                       while (entry_time < max_cycle_time) {
-                               rc = sja1105_insert_gate_entry(gating_cfg, rule,
-                                                              gate_state,
-                                                              entry_time,
-                                                              extack);
-                               if (rc)
-                                       goto err;
-
-                               entry_time += rule->vl.cycle_time;
-                       }
-                       time += rule->vl.entries[i].interval;
-               }
-       }
+       rc = sja1105_init_virtual_links(priv, extack);
+       if (rc)
+               return rc;
 
-       sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time);
+       rc = sja1105_init_scheduling(priv);
+       if (rc < 0)
+               return rc;
 
-       return 0;
-err:
-       sja1105_free_gating_config(gating_cfg);
-       return rc;
+       return sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
 }
 
 int sja1105_vl_gate(struct sja1105_private *priv, int port,
@@ -588,14 +597,12 @@ int sja1105_vl_gate(struct sja1105_private *priv, int port,
 
        if (priv->vlan_state == SJA1105_VLAN_UNAWARE &&
            key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
-               dev_err(priv->ds->dev, "1: vlan state %d key type %d\n",
-                       priv->vlan_state, key->type);
                NL_SET_ERR_MSG_MOD(extack,
                                   "Can only gate based on DMAC");
                return -EOPNOTSUPP;
-       } else if (key->type != SJA1105_KEY_VLAN_AWARE_VL) {
-               dev_err(priv->ds->dev, "2: vlan state %d key type %d\n",
-                       priv->vlan_state, key->type);
+       } else if ((priv->vlan_state == SJA1105_VLAN_BEST_EFFORT ||
+                   priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) &&
+                  key->type != SJA1105_KEY_VLAN_AWARE_VL) {
                NL_SET_ERR_MSG_MOD(extack,
                                   "Can only gate based on {DMAC, VID, PCP}");
                return -EOPNOTSUPP;
index 3c8e804..d775b23 100644 (file)
@@ -1700,7 +1700,7 @@ void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
        for (i = 0; i < 4; ++i)
                aq_hw_write_reg(aq_hw,
                                HW_ATL_RPF_L3_SRCA_ADR(location + i),
-                               ipv6_src[i]);
+                               ipv6_src[3 - i]);
 }
 
 void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
@@ -1711,7 +1711,7 @@ void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
        for (i = 0; i < 4; ++i)
                aq_hw_write_reg(aq_hw,
                                HW_ATL_RPF_L3_DSTA_ADR(location + i),
-                               ipv6_dest[i]);
+                               ipv6_dest[3 - i]);
 }
 
 u32 hw_atl_sem_ram_get(struct aq_hw_s *self)
index 0622079..7430ff0 100644 (file)
  */
 
  /* Register address for bitfield pif_rpf_l3_da0_i[31:0] */
-#define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053B0 + (filter) * 0x4)
+#define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053D0 + (filter) * 0x4)
 /* Bitmask for bitfield l3_da0[1F:0] */
 #define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu
 /* Inverted bitmask for bitfield l3_da0[1F:0] */
index b93e05f..6a884df 100644 (file)
@@ -6292,6 +6292,7 @@ int bnxt_hwrm_set_coal(struct bnxt *bp)
 
 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
 {
+       struct hwrm_stat_ctx_clr_stats_input req0 = {0};
        struct hwrm_stat_ctx_free_input req = {0};
        int i;
 
@@ -6301,6 +6302,7 @@ static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
        if (BNXT_CHIP_TYPE_NITRO_A0(bp))
                return;
 
+       bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1);
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
 
        mutex_lock(&bp->hwrm_cmd_lock);
@@ -6310,7 +6312,11 @@ static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
 
                if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
                        req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
-
+                       if (BNXT_FW_MAJ(bp) <= 20) {
+                               req0.stat_ctx_id = req.stat_ctx_id;
+                               _hwrm_send_message(bp, &req0, sizeof(req0),
+                                                  HWRM_CMD_TIMEOUT);
+                       }
                        _hwrm_send_message(bp, &req, sizeof(req),
                                           HWRM_CMD_TIMEOUT);
 
@@ -6976,7 +6982,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
                bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
 
        bp->tx_push_thresh = 0;
-       if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
+       if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
+           BNXT_FW_MAJ(bp) > 217)
                bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
 
        hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
@@ -7240,8 +7247,9 @@ static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
 static int bnxt_hwrm_ver_get(struct bnxt *bp)
 {
        struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+       u16 fw_maj, fw_min, fw_bld, fw_rsv;
        u32 dev_caps_cfg, hwrm_ver;
-       int rc;
+       int rc, len;
 
        bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
        mutex_lock(&bp->hwrm_cmd_lock);
@@ -7273,9 +7281,22 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
                         resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
                         resp->hwrm_intf_upd_8b);
 
-       snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
-                resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
-                resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
+       fw_maj = le16_to_cpu(resp->hwrm_fw_major);
+       if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
+               fw_min = le16_to_cpu(resp->hwrm_fw_minor);
+               fw_bld = le16_to_cpu(resp->hwrm_fw_build);
+               fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
+               len = FW_VER_STR_LEN;
+       } else {
+               fw_maj = resp->hwrm_fw_maj_8b;
+               fw_min = resp->hwrm_fw_min_8b;
+               fw_bld = resp->hwrm_fw_bld_8b;
+               fw_rsv = resp->hwrm_fw_rsvd_8b;
+               len = BC_HWRM_STR_LEN;
+       }
+       bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
+       snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
+                fw_rsv);
 
        if (strlen(resp->active_pkg_name)) {
                int fw_ver_len = strlen(bp->fw_ver_str);
@@ -11892,7 +11913,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        dev->ethtool_ops = &bnxt_ethtool_ops;
        pci_set_drvdata(pdev, dev);
 
-       bnxt_vpd_read_info(bp);
+       if (BNXT_PF(bp))
+               bnxt_vpd_read_info(bp);
 
        rc = bnxt_alloc_hwrm_resources(bp);
        if (rc)
index 9e173d7..78e2fd6 100644 (file)
@@ -1746,6 +1746,11 @@ struct bnxt {
 #define PHY_VER_STR_LEN         (FW_VER_STR_LEN - BC_HWRM_STR_LEN)
        char                    fw_ver_str[FW_VER_STR_LEN];
        char                    hwrm_ver_supp[FW_VER_STR_LEN];
+       u64                     fw_ver_code;
+#define BNXT_FW_VER_CODE(maj, min, bld, rsv)                   \
+       ((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv))
+#define BNXT_FW_MAJ(bp)                ((bp)->fw_ver_code >> 48)
+
        __be16                  vxlan_port;
        u8                      vxlan_port_cnt;
        __le16                  vxlan_fw_dst_port_id;
index 3a9a51f..392e32c 100644 (file)
@@ -396,6 +396,7 @@ static void bnxt_free_vf_resources(struct bnxt *bp)
                }
        }
 
+       bp->pf.active_vfs = 0;
        kfree(bp->pf.vf);
        bp->pf.vf = NULL;
 }
@@ -835,7 +836,6 @@ void bnxt_sriov_disable(struct bnxt *bp)
 
        bnxt_free_vf_resources(bp);
 
-       bp->pf.active_vfs = 0;
        /* Reclaim all resources for the PF. */
        rtnl_lock();
        bnxt_restore_pf_fw_resources(bp);
index 0eef4f5..4a11c1e 100644 (file)
@@ -1889,7 +1889,8 @@ static void bnxt_tc_setup_indr_rel(void *cb_priv)
 }
 
 static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
-                                   struct flow_block_offload *f)
+                                   struct flow_block_offload *f, void *data,
+                                   void (*cleanup)(struct flow_block_cb *block_cb))
 {
        struct bnxt_flower_indr_block_cb_priv *cb_priv;
        struct flow_block_cb *block_cb;
@@ -1907,9 +1908,10 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
                cb_priv->bp = bp;
                list_add(&cb_priv->list, &bp->tc_indr_block_list);
 
-               block_cb = flow_block_cb_alloc(bnxt_tc_setup_indr_block_cb,
-                                              cb_priv, cb_priv,
-                                              bnxt_tc_setup_indr_rel);
+               block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb,
+                                                   cb_priv, cb_priv,
+                                                   bnxt_tc_setup_indr_rel, f,
+                                                   netdev, data, bp, cleanup);
                if (IS_ERR(block_cb)) {
                        list_del(&cb_priv->list);
                        kfree(cb_priv);
@@ -1930,7 +1932,7 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
                if (!block_cb)
                        return -ENOENT;
 
-               flow_block_cb_remove(block_cb, f);
+               flow_indr_block_cb_remove(block_cb, f);
                list_del(&block_cb->driver_list);
                break;
        default:
@@ -1945,14 +1947,17 @@ static bool bnxt_is_netdev_indr_offload(struct net_device *netdev)
 }
 
 static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv,
-                                enum tc_setup_type type, void *type_data)
+                                enum tc_setup_type type, void *type_data,
+                                void *data,
+                                void (*cleanup)(struct flow_block_cb *block_cb))
 {
        if (!bnxt_is_netdev_indr_offload(netdev))
                return -EOPNOTSUPP;
 
        switch (type) {
        case TC_SETUP_BLOCK:
-               return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data);
+               return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data, data,
+                                               cleanup);
        default:
                break;
        }
@@ -2074,7 +2079,7 @@ void bnxt_shutdown_tc(struct bnxt *bp)
                return;
 
        flow_indr_dev_unregister(bnxt_tc_setup_indr_cb, bp,
-                                bnxt_tc_setup_indr_block_cb);
+                                bnxt_tc_setup_indr_rel);
        rhashtable_destroy(&tc_info->flow_table);
        rhashtable_destroy(&tc_info->l2_table);
        rhashtable_destroy(&tc_info->decap_l2_table);
index ff31da0..af924a8 100644 (file)
@@ -459,17 +459,6 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
                        genet_dma_ring_regs[r]);
 }
 
-static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv,
-                                          u32 f_index)
-{
-       u32 offset;
-       u32 reg;
-
-       offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
-       reg = bcmgenet_hfb_reg_readl(priv, offset);
-       return !!(reg & (1 << (f_index % 32)));
-}
-
 static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
 {
        u32 offset;
@@ -533,19 +522,6 @@ static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
        bcmgenet_hfb_reg_writel(priv, reg, offset);
 }
 
-static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv)
-{
-       u32 f_index;
-
-       /* First MAX_NUM_OF_FS_RULES are reserved for Rx NFC filters */
-       for (f_index = MAX_NUM_OF_FS_RULES;
-            f_index < priv->hw_params->hfb_filter_cnt; f_index++)
-               if (!bcmgenet_hfb_is_filter_enabled(priv, f_index))
-                       return f_index;
-
-       return -ENOMEM;
-}
-
 static int bcmgenet_hfb_validate_mask(void *mask, size_t size)
 {
        while (size) {
@@ -634,8 +610,9 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
 {
        struct ethtool_rx_flow_spec *fs = &rule->fs;
        int err = 0, offset = 0, f_length = 0;
-       u16 val_16, mask_16;
        u8 val_8, mask_8;
+       __be16 val_16;
+       u16 mask_16;
        size_t size;
        u32 *f_data;
 
@@ -744,59 +721,6 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
        return err;
 }
 
-/* bcmgenet_hfb_add_filter
- *
- * Add new filter to Hardware Filter Block to match and direct Rx traffic to
- * desired Rx queue.
- *
- * f_data is an array of unsigned 32-bit integers where each 32-bit integer
- * provides filter data for 2 bytes (4 nibbles) of Rx frame:
- *
- * bits 31:20 - unused
- * bit  19    - nibble 0 match enable
- * bit  18    - nibble 1 match enable
- * bit  17    - nibble 2 match enable
- * bit  16    - nibble 3 match enable
- * bits 15:12 - nibble 0 data
- * bits 11:8  - nibble 1 data
- * bits 7:4   - nibble 2 data
- * bits 3:0   - nibble 3 data
- *
- * Example:
- * In order to match:
- * - Ethernet frame type = 0x0800 (IP)
- * - IP version field = 4
- * - IP protocol field = 0x11 (UDP)
- *
- * The following filter is needed:
- * u32 hfb_filter_ipv4_udp[] = {
- *   Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- *   Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000,
- *   Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011,
- * };
- *
- * To add the filter to HFB and direct the traffic to Rx queue 0, call:
- * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp,
- *                         ARRAY_SIZE(hfb_filter_ipv4_udp), 0);
- */
-int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data,
-                           u32 f_length, u32 rx_queue)
-{
-       int f_index;
-
-       f_index = bcmgenet_hfb_find_unused_filter(priv);
-       if (f_index < 0)
-               return -ENOMEM;
-
-       if (f_length > priv->hw_params->hfb_filter_size)
-               return -EINVAL;
-
-       bcmgenet_hfb_set_filter(priv, f_data, f_length, rx_queue, f_index);
-       bcmgenet_hfb_enable_filter(priv, f_index);
-
-       return 0;
-}
-
 /* bcmgenet_hfb_clear
  *
  * Clear Hardware Filter Block and disable all filtering.
@@ -2118,11 +2042,6 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
                goto out;
        }
 
-       if (skb_padto(skb, ETH_ZLEN)) {
-               ret = NETDEV_TX_OK;
-               goto out;
-       }
-
        /* Retain how many bytes will be sent on the wire, without TSB inserted
         * by transmit checksum offload
         */
@@ -2169,6 +2088,9 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
                len_stat = (size << DMA_BUFLENGTH_SHIFT) |
                           (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
 
+               /* Note: if we ever change from DMA_TX_APPEND_CRC below we
+                * will need to restore software padding of "runt" packets
+                */
                if (!i) {
                        len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
                        if (skb->ip_summed == CHECKSUM_PARTIAL)
index 7a3b22b..ebff1fc 100644 (file)
@@ -18168,8 +18168,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
 
        rtnl_lock();
 
-       /* We probably don't have netdev yet */
-       if (!netdev || !netif_running(netdev))
+       /* Could be second call or maybe we don't have netdev yet */
+       if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
                goto done;
 
        /* We needn't recover from permanent error */
index 6793307..f1f0976 100644 (file)
@@ -2558,7 +2558,7 @@ static int macb_open(struct net_device *dev)
 
        err = macb_phylink_connect(bp);
        if (err)
-               goto napi_exit;
+               goto reset_hw;
 
        netif_tx_start_all_queues(dev);
 
@@ -2567,9 +2567,11 @@ static int macb_open(struct net_device *dev)
 
        return 0;
 
-napi_exit:
+reset_hw:
+       macb_reset_hw(bp);
        for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
                napi_disable(&queue->napi);
+       macb_free_consistent(bp);
 pm_exit:
        pm_runtime_put_sync(&bp->pdev->dev);
        return err;
@@ -2819,11 +2821,13 @@ static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 {
        struct macb *bp = netdev_priv(netdev);
 
-       wol->supported = 0;
-       wol->wolopts = 0;
-
-       if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET)
+       if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
                phylink_ethtool_get_wol(bp->phylink, wol);
+               wol->supported |= WAKE_MAGIC;
+
+               if (bp->wol & MACB_WOL_ENABLED)
+                       wol->wolopts |= WAKE_MAGIC;
+       }
 }
 
 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -2831,9 +2835,13 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
        struct macb *bp = netdev_priv(netdev);
        int ret;
 
+       /* Pass the order to phylink layer */
        ret = phylink_ethtool_set_wol(bp->phylink, wol);
-       if (!ret)
-               return 0;
+       /* Don't manage WoL on MAC if handled by the PHY
+        * or if there's a failure in talking to the PHY
+        */
+       if (!ret || ret != -EOPNOTSUPP)
+               return ret;
 
        if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
            (wol->wolopts & ~WAKE_MAGIC))
@@ -3760,15 +3768,9 @@ static int macb_init(struct platform_device *pdev)
 
 static struct sifive_fu540_macb_mgmt *mgmt;
 
-/* Initialize and start the Receiver and Transmit subsystems */
-static int at91ether_start(struct net_device *dev)
+static int at91ether_alloc_coherent(struct macb *lp)
 {
-       struct macb *lp = netdev_priv(dev);
        struct macb_queue *q = &lp->queues[0];
-       struct macb_dma_desc *desc;
-       dma_addr_t addr;
-       u32 ctl;
-       int i;
 
        q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
                                         (AT91ETHER_MAX_RX_DESCR *
@@ -3790,6 +3792,43 @@ static int at91ether_start(struct net_device *dev)
                return -ENOMEM;
        }
 
+       return 0;
+}
+
+static void at91ether_free_coherent(struct macb *lp)
+{
+       struct macb_queue *q = &lp->queues[0];
+
+       if (q->rx_ring) {
+               dma_free_coherent(&lp->pdev->dev,
+                                 AT91ETHER_MAX_RX_DESCR *
+                                 macb_dma_desc_get_size(lp),
+                                 q->rx_ring, q->rx_ring_dma);
+               q->rx_ring = NULL;
+       }
+
+       if (q->rx_buffers) {
+               dma_free_coherent(&lp->pdev->dev,
+                                 AT91ETHER_MAX_RX_DESCR *
+                                 AT91ETHER_MAX_RBUFF_SZ,
+                                 q->rx_buffers, q->rx_buffers_dma);
+               q->rx_buffers = NULL;
+       }
+}
+
+/* Initialize and start the Receiver and Transmit subsystems */
+static int at91ether_start(struct macb *lp)
+{
+       struct macb_queue *q = &lp->queues[0];
+       struct macb_dma_desc *desc;
+       dma_addr_t addr;
+       u32 ctl;
+       int i, ret;
+
+       ret = at91ether_alloc_coherent(lp);
+       if (ret)
+               return ret;
+
        addr = q->rx_buffers_dma;
        for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
                desc = macb_rx_desc(q, i);
@@ -3811,9 +3850,39 @@ static int at91ether_start(struct net_device *dev)
        ctl = macb_readl(lp, NCR);
        macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
 
+       /* Enable MAC interrupts */
+       macb_writel(lp, IER, MACB_BIT(RCOMP)    |
+                            MACB_BIT(RXUBR)    |
+                            MACB_BIT(ISR_TUND) |
+                            MACB_BIT(ISR_RLE)  |
+                            MACB_BIT(TCOMP)    |
+                            MACB_BIT(ISR_ROVR) |
+                            MACB_BIT(HRESP));
+
        return 0;
 }
 
+static void at91ether_stop(struct macb *lp)
+{
+       u32 ctl;
+
+       /* Disable MAC interrupts */
+       macb_writel(lp, IDR, MACB_BIT(RCOMP)    |
+                            MACB_BIT(RXUBR)    |
+                            MACB_BIT(ISR_TUND) |
+                            MACB_BIT(ISR_RLE)  |
+                            MACB_BIT(TCOMP)    |
+                            MACB_BIT(ISR_ROVR) |
+                            MACB_BIT(HRESP));
+
+       /* Disable Receiver and Transmitter */
+       ctl = macb_readl(lp, NCR);
+       macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
+
+       /* Free resources. */
+       at91ether_free_coherent(lp);
+}
+
 /* Open the ethernet interface */
 static int at91ether_open(struct net_device *dev)
 {
@@ -3833,63 +3902,36 @@ static int at91ether_open(struct net_device *dev)
 
        macb_set_hwaddr(lp);
 
-       ret = at91ether_start(dev);
+       ret = at91ether_start(lp);
        if (ret)
-               return ret;
-
-       /* Enable MAC interrupts */
-       macb_writel(lp, IER, MACB_BIT(RCOMP)    |
-                            MACB_BIT(RXUBR)    |
-                            MACB_BIT(ISR_TUND) |
-                            MACB_BIT(ISR_RLE)  |
-                            MACB_BIT(TCOMP)    |
-                            MACB_BIT(ISR_ROVR) |
-                            MACB_BIT(HRESP));
+               goto pm_exit;
 
        ret = macb_phylink_connect(lp);
        if (ret)
-               return ret;
+               goto stop;
 
        netif_start_queue(dev);
 
        return 0;
+
+stop:
+       at91ether_stop(lp);
+pm_exit:
+       pm_runtime_put_sync(&lp->pdev->dev);
+       return ret;
 }
 
 /* Close the interface */
 static int at91ether_close(struct net_device *dev)
 {
        struct macb *lp = netdev_priv(dev);
-       struct macb_queue *q = &lp->queues[0];
-       u32 ctl;
-
-       /* Disable Receiver and Transmitter */
-       ctl = macb_readl(lp, NCR);
-       macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
-
-       /* Disable MAC interrupts */
-       macb_writel(lp, IDR, MACB_BIT(RCOMP)    |
-                            MACB_BIT(RXUBR)    |
-                            MACB_BIT(ISR_TUND) |
-                            MACB_BIT(ISR_RLE)  |
-                            MACB_BIT(TCOMP)    |
-                            MACB_BIT(ISR_ROVR) |
-                            MACB_BIT(HRESP));
 
        netif_stop_queue(dev);
 
        phylink_stop(lp->phylink);
        phylink_disconnect_phy(lp->phylink);
 
-       dma_free_coherent(&lp->pdev->dev,
-                         AT91ETHER_MAX_RX_DESCR *
-                         macb_dma_desc_get_size(lp),
-                         q->rx_ring, q->rx_ring_dma);
-       q->rx_ring = NULL;
-
-       dma_free_coherent(&lp->pdev->dev,
-                         AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
-                         q->rx_buffers, q->rx_buffers_dma);
-       q->rx_buffers = NULL;
+       at91ether_stop(lp);
 
        return pm_runtime_put(&lp->pdev->dev);
 }
@@ -4386,7 +4428,7 @@ static int macb_probe(struct platform_device *pdev)
        bp->wol = 0;
        if (of_get_property(np, "magic-packet", NULL))
                bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
-       device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
+       device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
 
        spin_lock_init(&bp->lock);
 
@@ -4562,10 +4604,10 @@ static int __maybe_unused macb_suspend(struct device *dev)
                        bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
        }
 
-       netif_carrier_off(netdev);
        if (bp->ptp_info)
                bp->ptp_info->ptp_remove(netdev);
-       pm_runtime_force_suspend(dev);
+       if (!device_may_wakeup(dev))
+               pm_runtime_force_suspend(dev);
 
        return 0;
 }
@@ -4580,7 +4622,8 @@ static int __maybe_unused macb_resume(struct device *dev)
        if (!netif_running(netdev))
                return 0;
 
-       pm_runtime_force_resume(dev);
+       if (!device_may_wakeup(dev))
+               pm_runtime_force_resume(dev);
 
        if (bp->wol & MACB_WOL_ENABLED) {
                macb_writel(bp, IDR, MACB_BIT(WOL));
@@ -4618,7 +4661,7 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev)
        struct net_device *netdev = dev_get_drvdata(dev);
        struct macb *bp = netdev_priv(netdev);
 
-       if (!(device_may_wakeup(&bp->dev->dev))) {
+       if (!(device_may_wakeup(dev))) {
                clk_disable_unprepare(bp->tx_clk);
                clk_disable_unprepare(bp->hclk);
                clk_disable_unprepare(bp->pclk);
@@ -4634,7 +4677,7 @@ static int __maybe_unused macb_runtime_resume(struct device *dev)
        struct net_device *netdev = dev_get_drvdata(dev);
        struct macb *bp = netdev_priv(netdev);
 
-       if (!(device_may_wakeup(&bp->dev->dev))) {
+       if (!(device_may_wakeup(dev))) {
                clk_prepare_enable(bp->pclk);
                clk_prepare_enable(bp->hclk);
                clk_prepare_enable(bp->tx_clk);
index 7b9cd69..d8ab8e3 100644 (file)
@@ -1975,7 +1975,6 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
        u8 mem_type[CTXT_INGRESS + 1] = { 0 };
        struct cudbg_buffer temp_buff = { 0 };
        struct cudbg_ch_cntxt *buff;
-       u64 *dst_off, *src_off;
        u8 *ctx_buf;
        u8 i, k;
        int rc;
@@ -2044,8 +2043,11 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
                }
 
                for (j = 0; j < max_ctx_qid; j++) {
+                       __be64 *dst_off;
+                       u64 *src_off;
+
                        src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE);
-                       dst_off = (u64 *)buff->data;
+                       dst_off = (__be64 *)buff->data;
 
                        /* The data is stored in 64-bit cpu order.  Convert it
                         * to big endian before parsing.
index d3c654b..80c6627 100644 (file)
@@ -136,6 +136,9 @@ static inline __u8 bitswap_1(unsigned char val)
               ((val & 0x02) << 5) |
               ((val & 0x01) << 7);
 }
+
+extern const char * const dcb_ver_array[];
+
 #define CXGB4_DCB_ENABLED true
 
 #else /* !CONFIG_CHELSIO_T4_DCB */
index 8284992..b477b88 100644 (file)
@@ -2379,7 +2379,6 @@ static const struct file_operations rss_vf_config_debugfs_fops = {
 };
 
 #ifdef CONFIG_CHELSIO_T4_DCB
-extern char *dcb_ver_array[];
 
 /* Data Center Briging information for each port.
  */
index 9fd4967..f27be11 100644 (file)
@@ -588,7 +588,7 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
 /**
  *     lmm_to_fw_caps - translate ethtool Link Mode Mask to Firmware
  *     capabilities
- *     @et_lmm: ethtool Link Mode Mask
+ *     @link_mode_mask: ethtool Link Mode Mask
  *
  *     Translate ethtool Link Mode Mask into a Firmware Port capabilities
  *     value.
index 7965552..d02d346 100644 (file)
@@ -165,6 +165,9 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
                           unsigned int tid, bool dip, bool sip, bool dp,
                           bool sp)
 {
+       u8 *nat_lp = (u8 *)&f->fs.nat_lport;
+       u8 *nat_fp = (u8 *)&f->fs.nat_fport;
+
        if (dip) {
                if (f->fs.type) {
                        set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W,
@@ -236,8 +239,9 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
        }
 
        set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK,
-                     (dp ? f->fs.nat_lport : 0) |
-                     (sp ? f->fs.nat_fport << 16 : 0), 1);
+                     (dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) |
+                     (sp ? (nat_fp[1] << 16 | nat_fp[0] << 24) : 0),
+                     1);
 }
 
 /* Validate filter spec against configuration done on the card. */
@@ -909,6 +913,9 @@ int set_filter_wr(struct adapter *adapter, int fidx)
        fwr->fpm = htons(f->fs.mask.fport);
 
        if (adapter->params.filter2_wr_support) {
+               u8 *nat_lp = (u8 *)&f->fs.nat_lport;
+               u8 *nat_fp = (u8 *)&f->fs.nat_fport;
+
                fwr->natmode_to_ulp_type =
                        FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ?
                                                 ULP_MODE_TCPDDP :
@@ -916,8 +923,8 @@ int set_filter_wr(struct adapter *adapter, int fidx)
                        FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode);
                memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
                memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
-               fwr->newlport = htons(f->fs.nat_lport);
-               fwr->newfport = htons(f->fs.nat_fport);
+               fwr->newlport = htons(nat_lp[1] | nat_lp[0] << 8);
+               fwr->newfport = htons(nat_fp[1] | nat_fp[0] << 8);
        }
 
        /* Mark the filter as "pending" and ship off the Filter Work Request.
@@ -1105,16 +1112,16 @@ static bool is_addr_all_mask(u8 *ipmask, int family)
                struct in_addr *addr;
 
                addr = (struct in_addr *)ipmask;
-               if (addr->s_addr == 0xffffffff)
+               if (addr->s_addr == htonl(0xffffffff))
                        return true;
        } else if (family == AF_INET6) {
                struct in6_addr *addr6;
 
                addr6 = (struct in6_addr *)ipmask;
-               if (addr6->s6_addr32[0] == 0xffffffff &&
-                   addr6->s6_addr32[1] == 0xffffffff &&
-                   addr6->s6_addr32[2] == 0xffffffff &&
-                   addr6->s6_addr32[3] == 0xffffffff)
+               if (addr6->s6_addr32[0] == htonl(0xffffffff) &&
+                   addr6->s6_addr32[1] == htonl(0xffffffff) &&
+                   addr6->s6_addr32[2] == htonl(0xffffffff) &&
+                   addr6->s6_addr32[3] == htonl(0xffffffff))
                        return true;
        }
        return false;
index 854b171..0329a6b 100644 (file)
@@ -449,7 +449,7 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
  *                or -1
  *     @addr: the new MAC address value
  *     @persist: whether a new MAC allocation should be persistent
- *     @add_smt: if true also add the address to the HW SMT
+ *     @smt_idx: the destination to store the new SMT index.
  *
  *     Modifies an MPS filter and sets it to the new MAC address if
  *     @tcam_idx >= 0, or adds the MAC address to a new filter if
@@ -1615,6 +1615,7 @@ static int tid_init(struct tid_info *t)
  *     @stid: the server TID
  *     @sip: local IP address to bind server to
  *     @sport: the server's TCP port
+ *     @vlan: the VLAN header information
  *     @queue: queue to direct messages from this server to
  *
  *     Create an IP server for the given port and address.
@@ -2609,7 +2610,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
 
        /* Clear out filter specifications */
        memset(&f->fs, 0, sizeof(struct ch_filter_specification));
-       f->fs.val.lport = cpu_to_be16(sport);
+       f->fs.val.lport = be16_to_cpu(sport);
        f->fs.mask.lport  = ~0;
        val = (u8 *)&sip;
        if ((val[0] | val[1] | val[2] | val[3]) != 0) {
@@ -5377,10 +5378,10 @@ static inline bool is_x_10g_port(const struct link_config *lc)
 static int cfg_queues(struct adapter *adap)
 {
        u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
-       u32 i, n10g = 0, qidx = 0, n1g = 0;
        u32 ncpus = num_online_cpus();
        u32 niqflint, neq, num_ulds;
        struct sge *s = &adap->sge;
+       u32 i, n10g = 0, qidx = 0;
        u32 q10g = 0, q1g;
 
        /* Reduce memory usage in kdump environment, disable all offload. */
@@ -5426,7 +5427,6 @@ static int cfg_queues(struct adapter *adap)
        if (n10g)
                q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
 
-       n1g = adap->params.nports - n10g;
 #ifdef CONFIG_CHELSIO_T4_DCB
        /* For Data Center Bridging support we need to be able to support up
         * to 8 Traffic Priorities; each of which will be assigned to its
@@ -5444,7 +5444,8 @@ static int cfg_queues(struct adapter *adap)
        else
                q10g = max(8U, q10g);
 
-       while ((q10g * n10g) > (avail_eth_qsets - n1g * q1g))
+       while ((q10g * n10g) >
+              (avail_eth_qsets - (adap->params.nports - n10g) * q1g))
                q10g--;
 
 #else /* !CONFIG_CHELSIO_T4_DCB */
index f5bc996..70dbee8 100644 (file)
@@ -194,6 +194,7 @@ int cxgb4_ptp_redirect_rx_packet(struct adapter *adapter, struct port_info *pi)
 }
 
 /**
+ * cxgb4_ptp_adjfreq - Adjust frequency of PHC cycle counter
  * @ptp: ptp clock structure
  * @ppb: Desired frequency change in parts per billion
  *
@@ -229,7 +230,7 @@ static int cxgb4_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
 
 /**
  * cxgb4_ptp_fineadjtime - Shift the time of the hardware clock
- * @ptp: ptp clock structure
+ * @adapter: board private structure
  * @delta: Desired change in nanoseconds
  *
  * Adjust the timer by resetting the timecounter structure.
index 4a5fa9e..59b65d4 100644 (file)
@@ -58,10 +58,6 @@ static struct ch_tc_pedit_fields pedits[] = {
        PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4),
        PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8),
        PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
-       PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0),
-       PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0),
-       PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0),
-       PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0),
 };
 
 static struct ch_tc_flower_entry *allocate_flower_entry(void)
@@ -156,14 +152,14 @@ static void cxgb4_process_flow_match(struct net_device *dev,
                struct flow_match_ports match;
 
                flow_rule_match_ports(rule, &match);
-               fs->val.lport = cpu_to_be16(match.key->dst);
-               fs->mask.lport = cpu_to_be16(match.mask->dst);
-               fs->val.fport = cpu_to_be16(match.key->src);
-               fs->mask.fport = cpu_to_be16(match.mask->src);
+               fs->val.lport = be16_to_cpu(match.key->dst);
+               fs->mask.lport = be16_to_cpu(match.mask->dst);
+               fs->val.fport = be16_to_cpu(match.key->src);
+               fs->mask.fport = be16_to_cpu(match.mask->src);
 
                /* also initialize nat_lport/fport to same values */
-               fs->nat_lport = cpu_to_be16(match.key->dst);
-               fs->nat_fport = cpu_to_be16(match.key->src);
+               fs->nat_lport = fs->val.lport;
+               fs->nat_fport = fs->val.fport;
        }
 
        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
@@ -354,12 +350,9 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
                switch (offset) {
                case PEDIT_TCP_SPORT_DPORT:
                        if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
-                               offload_pedit(fs, cpu_to_be32(val) >> 16,
-                                             cpu_to_be32(mask) >> 16,
-                                             TCP_SPORT);
+                               fs->nat_fport = val;
                        else
-                               offload_pedit(fs, cpu_to_be32(val),
-                                             cpu_to_be32(mask), TCP_DPORT);
+                               fs->nat_lport = val >> 16;
                }
                fs->nat_mode = NAT_MODE_ALL;
                break;
@@ -367,12 +360,9 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
                switch (offset) {
                case PEDIT_UDP_SPORT_DPORT:
                        if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
-                               offload_pedit(fs, cpu_to_be32(val) >> 16,
-                                             cpu_to_be32(mask) >> 16,
-                                             UDP_SPORT);
+                               fs->nat_fport = val;
                        else
-                               offload_pedit(fs, cpu_to_be32(val),
-                                             cpu_to_be32(mask), UDP_DPORT);
+                               fs->nat_lport = val >> 16;
                }
                fs->nat_mode = NAT_MODE_ALL;
        }
index 3f3c11e..dede025 100644 (file)
@@ -48,7 +48,7 @@ static int fill_match_fields(struct adapter *adap,
                             bool next_header)
 {
        unsigned int i, j;
-       u32 val, mask;
+       __be32 val, mask;
        int off, err;
        bool found;
 
@@ -228,7 +228,7 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
                const struct cxgb4_next_header *next;
                bool found = false;
                unsigned int i, j;
-               u32 val, mask;
+               __be32 val, mask;
                int off;
 
                if (t->table[link_uhtid - 1].link_handle) {
@@ -242,10 +242,10 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
 
                /* Try to find matches that allow jumps to next header. */
                for (i = 0; next[i].jump; i++) {
-                       if (next[i].offoff != cls->knode.sel->offoff ||
-                           next[i].shift != cls->knode.sel->offshift ||
-                           next[i].mask != cls->knode.sel->offmask ||
-                           next[i].offset != cls->knode.sel->off)
+                       if (next[i].sel.offoff != cls->knode.sel->offoff ||
+                           next[i].sel.offshift != cls->knode.sel->offshift ||
+                           next[i].sel.offmask != cls->knode.sel->offmask ||
+                           next[i].sel.off != cls->knode.sel->off)
                                continue;
 
                        /* Found a possible candidate.  Find a key that
@@ -257,9 +257,9 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
                                val = cls->knode.sel->keys[j].val;
                                mask = cls->knode.sel->keys[j].mask;
 
-                               if (next[i].match_off == off &&
-                                   next[i].match_val == val &&
-                                   next[i].match_mask == mask) {
+                               if (next[i].key.off == off &&
+                                   next[i].key.val == val &&
+                                   next[i].key.mask == mask) {
                                        found = true;
                                        break;
                                }
index 125868c..f59dd4b 100644 (file)
 struct cxgb4_match_field {
        int off; /* Offset from the beginning of the header to match */
        /* Fill the value/mask pair in the spec if matched */
-       int (*val)(struct ch_filter_specification *f, u32 val, u32 mask);
+       int (*val)(struct ch_filter_specification *f, __be32 val, __be32 mask);
 };
 
 /* IPv4 match fields */
 static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f,
-                                     u32 val, u32 mask)
+                                     __be32 val, __be32 mask)
 {
        f->val.tos  = (ntohl(val)  >> 16) & 0x000000FF;
        f->mask.tos = (ntohl(mask) >> 16) & 0x000000FF;
@@ -52,7 +52,7 @@ static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f,
-                                      u32 val, u32 mask)
+                                      __be32 val, __be32 mask)
 {
        u32 mask_val;
        u8 frag_val;
@@ -74,7 +74,7 @@ static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f,
-                                       u32 val, u32 mask)
+                                       __be32 val, __be32 mask)
 {
        f->val.proto  = (ntohl(val)  >> 16) & 0x000000FF;
        f->mask.proto = (ntohl(mask) >> 16) & 0x000000FF;
@@ -83,7 +83,7 @@ static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f,
-                                        u32 val, u32 mask)
+                                        __be32 val, __be32 mask)
 {
        memcpy(&f->val.fip[0],  &val,  sizeof(u32));
        memcpy(&f->mask.fip[0], &mask, sizeof(u32));
@@ -92,7 +92,7 @@ static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv4_dst_ip(struct ch_filter_specification *f,
-                                        u32 val, u32 mask)
+                                        __be32 val, __be32 mask)
 {
        memcpy(&f->val.lip[0],  &val,  sizeof(u32));
        memcpy(&f->mask.lip[0], &mask, sizeof(u32));
@@ -111,7 +111,7 @@ static const struct cxgb4_match_field cxgb4_ipv4_fields[] = {
 
 /* IPv6 match fields */
 static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f,
-                                     u32 val, u32 mask)
+                                     __be32 val, __be32 mask)
 {
        f->val.tos  = (ntohl(val)  >> 20) & 0x000000FF;
        f->mask.tos = (ntohl(mask) >> 20) & 0x000000FF;
@@ -120,7 +120,7 @@ static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f,
-                                       u32 val, u32 mask)
+                                       __be32 val, __be32 mask)
 {
        f->val.proto  = (ntohl(val)  >> 8) & 0x000000FF;
        f->mask.proto = (ntohl(mask) >> 8) & 0x000000FF;
@@ -129,7 +129,7 @@ static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f,
-                                         u32 val, u32 mask)
+                                         __be32 val, __be32 mask)
 {
        memcpy(&f->val.fip[0],  &val,  sizeof(u32));
        memcpy(&f->mask.fip[0], &mask, sizeof(u32));
@@ -138,7 +138,7 @@ static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f,
-                                         u32 val, u32 mask)
+                                         __be32 val, __be32 mask)
 {
        memcpy(&f->val.fip[4],  &val,  sizeof(u32));
        memcpy(&f->mask.fip[4], &mask, sizeof(u32));
@@ -147,7 +147,7 @@ static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f,
-                                         u32 val, u32 mask)
+                                         __be32 val, __be32 mask)
 {
        memcpy(&f->val.fip[8],  &val,  sizeof(u32));
        memcpy(&f->mask.fip[8], &mask, sizeof(u32));
@@ -156,7 +156,7 @@ static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f,
-                                         u32 val, u32 mask)
+                                         __be32 val, __be32 mask)
 {
        memcpy(&f->val.fip[12],  &val,  sizeof(u32));
        memcpy(&f->mask.fip[12], &mask, sizeof(u32));
@@ -165,7 +165,7 @@ static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f,
-                                         u32 val, u32 mask)
+                                         __be32 val, __be32 mask)
 {
        memcpy(&f->val.lip[0],  &val,  sizeof(u32));
        memcpy(&f->mask.lip[0], &mask, sizeof(u32));
@@ -174,7 +174,7 @@ static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f,
-                                         u32 val, u32 mask)
+                                         __be32 val, __be32 mask)
 {
        memcpy(&f->val.lip[4],  &val,  sizeof(u32));
        memcpy(&f->mask.lip[4], &mask, sizeof(u32));
@@ -183,7 +183,7 @@ static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f,
-                                         u32 val, u32 mask)
+                                         __be32 val, __be32 mask)
 {
        memcpy(&f->val.lip[8],  &val,  sizeof(u32));
        memcpy(&f->mask.lip[8], &mask, sizeof(u32));
@@ -192,7 +192,7 @@ static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_dst_ip3(struct ch_filter_specification *f,
-                                         u32 val, u32 mask)
+                                         __be32 val, __be32 mask)
 {
        memcpy(&f->val.lip[12],  &val,  sizeof(u32));
        memcpy(&f->mask.lip[12], &mask, sizeof(u32));
@@ -216,7 +216,7 @@ static const struct cxgb4_match_field cxgb4_ipv6_fields[] = {
 
 /* TCP/UDP match */
 static inline int cxgb4_fill_l4_ports(struct ch_filter_specification *f,
-                                     u32 val, u32 mask)
+                                     __be32 val, __be32 mask)
 {
        f->val.fport  = ntohl(val)  >> 16;
        f->mask.fport = ntohl(mask) >> 16;
@@ -237,19 +237,13 @@ static const struct cxgb4_match_field cxgb4_udp_fields[] = {
 };
 
 struct cxgb4_next_header {
-       unsigned int offset; /* Offset to next header */
-       /* offset, shift, and mask added to offset above
+       /* Offset, shift, and mask added to beginning of the header
         * to get to next header.  Useful when using a header
         * field's value to jump to next header such as IHL field
         * in IPv4 header.
         */
-       unsigned int offoff;
-       u32 shift;
-       u32 mask;
-       /* match criteria to make this jump */
-       unsigned int match_off;
-       u32 match_val;
-       u32 match_mask;
+       struct tc_u32_sel sel;
+       struct tc_u32_key key;
        /* location of jump to make */
        const struct cxgb4_match_field *jump;
 };
@@ -258,26 +252,74 @@ struct cxgb4_next_header {
  * IPv4 header.
  */
 static const struct cxgb4_next_header cxgb4_ipv4_jumps[] = {
-       { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
-         .match_off = 8, .match_val = 0x600, .match_mask = 0xFF00,
-         .jump = cxgb4_tcp_fields },
-       { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
-         .match_off = 8, .match_val = 0x1100, .match_mask = 0xFF00,
-         .jump = cxgb4_udp_fields },
-       { .jump = NULL }
+       {
+               /* TCP Jump */
+               .sel = {
+                       .off = 0,
+                       .offoff = 0,
+                       .offshift = 6,
+                       .offmask = cpu_to_be16(0x0f00),
+               },
+               .key = {
+                       .off = 8,
+                       .val = cpu_to_be32(0x00060000),
+                       .mask = cpu_to_be32(0x00ff0000),
+               },
+               .jump = cxgb4_tcp_fields,
+       },
+       {
+               /* UDP Jump */
+               .sel = {
+                       .off = 0,
+                       .offoff = 0,
+                       .offshift = 6,
+                       .offmask = cpu_to_be16(0x0f00),
+               },
+               .key = {
+                       .off = 8,
+                       .val = cpu_to_be32(0x00110000),
+                       .mask = cpu_to_be32(0x00ff0000),
+               },
+               .jump = cxgb4_udp_fields,
+       },
+       { .jump = NULL },
 };
 
 /* Accept a rule with a jump directly past the 40 Bytes of IPv6 fixed header
  * to get to transport layer header.
  */
 static const struct cxgb4_next_header cxgb4_ipv6_jumps[] = {
-       { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
-         .match_off = 4, .match_val = 0x60000, .match_mask = 0xFF0000,
-         .jump = cxgb4_tcp_fields },
-       { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
-         .match_off = 4, .match_val = 0x110000, .match_mask = 0xFF0000,
-         .jump = cxgb4_udp_fields },
-       { .jump = NULL }
+       {
+               /* TCP Jump */
+               .sel = {
+                       .off = 40,
+                       .offoff = 0,
+                       .offshift = 0,
+                       .offmask = 0,
+               },
+               .key = {
+                       .off = 4,
+                       .val = cpu_to_be32(0x00000600),
+                       .mask = cpu_to_be32(0x0000ff00),
+               },
+               .jump = cxgb4_tcp_fields,
+       },
+       {
+               /* UDP Jump */
+               .sel = {
+                       .off = 40,
+                       .offoff = 0,
+                       .offshift = 0,
+                       .offmask = 0,
+               },
+               .key = {
+                       .off = 4,
+                       .val = cpu_to_be32(0x00001100),
+                       .mask = cpu_to_be32(0x0000ff00),
+               },
+               .jump = cxgb4_udp_fields,
+       },
+       { .jump = NULL },
 };
 
 struct cxgb4_link {
index 72b37a6..c486412 100644 (file)
@@ -503,40 +503,19 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
 EXPORT_SYMBOL(cxgb4_select_ntuple);
 
 /*
- * Called when address resolution fails for an L2T entry to handle packets
- * on the arpq head.  If a packet specifies a failure handler it is invoked,
- * otherwise the packet is sent to the device.
- */
-static void handle_failed_resolution(struct adapter *adap, struct l2t_entry *e)
-{
-       struct sk_buff *skb;
-
-       while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
-               const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
-
-               spin_unlock(&e->lock);
-               if (cb->arp_err_handler)
-                       cb->arp_err_handler(cb->handle, skb);
-               else
-                       t4_ofld_send(adap, skb);
-               spin_lock(&e->lock);
-       }
-}
-
-/*
  * Called when the host's neighbor layer makes a change to some entry that is
  * loaded into the HW L2 table.
  */
 void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
 {
-       struct l2t_entry *e;
-       struct sk_buff_head *arpq = NULL;
-       struct l2t_data *d = adap->l2t;
        unsigned int addr_len = neigh->tbl->key_len;
        u32 *addr = (u32 *) neigh->primary_key;
-       int ifidx = neigh->dev->ifindex;
-       int hash = addr_hash(d, addr, addr_len, ifidx);
+       int hash, ifidx = neigh->dev->ifindex;
+       struct sk_buff_head *arpq = NULL;
+       struct l2t_data *d = adap->l2t;
+       struct l2t_entry *e;
 
+       hash = addr_hash(d, addr, addr_len, ifidx);
        read_lock_bh(&d->lock);
        for (e = d->l2tab[hash].first; e; e = e->next)
                if (!addreq(e, addr) && e->ifindex == ifidx) {
@@ -569,8 +548,25 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
                        write_l2e(adap, e, 0);
        }
 
-       if (arpq)
-               handle_failed_resolution(adap, e);
+       if (arpq) {
+               struct sk_buff *skb;
+
+               /* Called when address resolution fails for an L2T
+                * entry to handle packets on the arpq head. If a
+                * packet specifies a failure handler it is invoked,
+                * otherwise the packet is sent to the device.
+                */
+               while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
+                       const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
+
+                       spin_unlock(&e->lock);
+                       if (cb->arp_err_handler)
+                               cb->arp_err_handler(cb->handle, skb);
+                       else
+                               t4_ofld_send(adap, skb);
+                       spin_lock(&e->lock);
+               }
+       }
        spin_unlock_bh(&e->lock);
 }
 
@@ -613,6 +609,7 @@ struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan,
 }
 
 /**
+ * cxgb4_l2t_alloc_switching - Allocates an L2T entry for switch filters
  * @dev: net_device pointer
  * @vlan: VLAN Id
  * @port: Associated port
index fde93c5..a1b1446 100644 (file)
@@ -598,7 +598,7 @@ struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
 /**
  * cxgb4_sched_class_free - free a scheduling class
  * @dev: net_device pointer
- * @e: scheduling class
+ * @classid: scheduling class id to free
  *
  * Frees a scheduling class if there are no users.
  */
index 1359158..32a45dc 100644 (file)
@@ -302,7 +302,7 @@ static void deferred_unmap_destructor(struct sk_buff *skb)
 
 /**
  *     free_tx_desc - reclaims Tx descriptors and their buffers
- *     @adapter: the adapter
+ *     @adap: the adapter
  *     @q: the Tx queue to reclaim descriptors from
  *     @n: the number of descriptors to reclaim
  *     @unmap: whether the buffers should be unmapped for DMA
@@ -722,6 +722,7 @@ static inline unsigned int flits_to_desc(unsigned int n)
 /**
  *     is_eth_imm - can an Ethernet packet be sent as immediate data?
  *     @skb: the packet
+ *     @chip_ver: chip version
  *
  *     Returns whether an Ethernet packet is small enough to fit as
  *     immediate data. Return value corresponds to headroom required.
@@ -749,6 +750,7 @@ static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
 /**
  *     calc_tx_flits - calculate the number of flits for a packet Tx WR
  *     @skb: the packet
+ *     @chip_ver: chip version
  *
  *     Returns the number of flits needed for a Tx WR for the given Ethernet
  *     packet, including the needed WR and CPL headers.
@@ -804,6 +806,7 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
 /**
  *     calc_tx_descs - calculate the number of Tx descriptors for a packet
  *     @skb: the packet
+ *     @chip_ver: chip version
  *
  *     Returns the number of Tx descriptors needed for the given Ethernet
  *     packet, including the needed WR and CPL headers.
@@ -1425,12 +1428,10 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 
        qidx = skb_get_queue_mapping(skb);
        if (ptp_enabled) {
-               spin_lock(&adap->ptp_lock);
                if (!(adap->ptp_tx_skb)) {
                        skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
                        adap->ptp_tx_skb = skb_get(skb);
                } else {
-                       spin_unlock(&adap->ptp_lock);
                        goto out_free;
                }
                q = &adap->sge.ptptxq;
@@ -1444,11 +1445,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 
 #ifdef CONFIG_CHELSIO_T4_FCOE
        ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
-       if (unlikely(ret == -ENOTSUPP)) {
-               if (ptp_enabled)
-                       spin_unlock(&adap->ptp_lock);
+       if (unlikely(ret == -EOPNOTSUPP))
                goto out_free;
-       }
 #endif /* CONFIG_CHELSIO_T4_FCOE */
 
        chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
@@ -1461,8 +1459,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
                dev_err(adap->pdev_dev,
                        "%s: Tx ring %u full while queue awake!\n",
                        dev->name, qidx);
-               if (ptp_enabled)
-                       spin_unlock(&adap->ptp_lock);
                return NETDEV_TX_BUSY;
        }
 
@@ -1481,8 +1477,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
            unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
                memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
                q->mapping_err++;
-               if (ptp_enabled)
-                       spin_unlock(&adap->ptp_lock);
                goto out_free;
        }
 
@@ -1533,8 +1527,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
                        if (iph->version == 4) {
                                iph->check = 0;
                                iph->tot_len = 0;
-                               iph->check = (u16)(~ip_fast_csum((u8 *)iph,
-                                                                iph->ihl));
+                               iph->check = ~ip_fast_csum((u8 *)iph, iph->ihl);
                        }
                        if (skb->ip_summed == CHECKSUM_PARTIAL)
                                cntrl = hwcsum(adap->params.chip, skb);
@@ -1630,8 +1623,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
        txq_advance(&q->q, ndesc);
 
        cxgb4_ring_tx_db(adap, &q->q, ndesc);
-       if (ptp_enabled)
-               spin_unlock(&adap->ptp_lock);
        return NETDEV_TX_OK;
 
 out_free:
@@ -2377,6 +2368,16 @@ netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(qid >= pi->nqsets))
                return cxgb4_ethofld_xmit(skb, dev);
 
+       if (is_ptp_enabled(skb, dev)) {
+               struct adapter *adap = netdev2adap(dev);
+               netdev_tx_t ret;
+
+               spin_lock(&adap->ptp_lock);
+               ret = cxgb4_eth_xmit(skb, dev);
+               spin_unlock(&adap->ptp_lock);
+               return ret;
+       }
+
        return cxgb4_eth_xmit(skb, dev);
 }
 
@@ -2410,9 +2411,9 @@ static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq)
 
 /**
  * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
- * @dev - netdevice
- * @eotid - ETHOFLD tid to bind/unbind
- * @tc - traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid
+ * @dev: netdevice
+ * @eotid: ETHOFLD tid to bind/unbind
+ * @tc: traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid
  *
  * Send a FLOWC work request to bind an ETHOFLD TID to a traffic class.
  * If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from
@@ -2691,7 +2692,6 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
 
 /**
  *     txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
- *     @adap: the adapter
  *     @q: the queue to stop
  *
  *     Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
@@ -3286,7 +3286,7 @@ enum {
 
 /**
  *     t4_systim_to_hwstamp - read hardware time stamp
- *     @adap: the adapter
+ *     @adapter: the adapter
  *     @skb: the packet
  *
  *     Read Time Stamp from MPS packet and insert in skb which
@@ -3313,15 +3313,16 @@ static noinline int t4_systim_to_hwstamp(struct adapter *adapter,
 
        hwtstamps = skb_hwtstamps(skb);
        memset(hwtstamps, 0, sizeof(*hwtstamps));
-       hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data)));
+       hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data));
 
        return RX_PTP_PKT_SUC;
 }
 
 /**
  *     t4_rx_hststamp - Recv PTP Event Message
- *     @adap: the adapter
+ *     @adapter: the adapter
  *     @rsp: the response queue descriptor holding the RX_PKT message
+ *     @rxq: the response queue holding the RX_PKT message
  *     @skb: the packet
  *
  *     PTP enabled and MPS packet, read HW timestamp
@@ -3345,7 +3346,7 @@ static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp,
 
 /**
  *      t4_tx_hststamp - Loopback PTP Transmit Event Message
- *      @adap: the adapter
+ *      @adapter: the adapter
  *      @skb: the packet
  *      @dev: the ingress net device
  *
index 01c65d1..cbe72ed 100644 (file)
@@ -103,6 +103,7 @@ static void t4_smte_free(struct smt_entry *e)
 }
 
 /**
+ * cxgb4_smt_release - Release SMT entry
  * @e: smt entry to release
  *
  * Releases ref count and frees up an smt entry from SMT table
@@ -231,6 +232,7 @@ static struct smt_entry *t4_smt_alloc_switching(struct adapter *adap, u16 pfvf,
 }
 
 /**
+ * cxgb4_smt_alloc_switching - Allocates an SMT entry for switch filters.
  * @dev: net_device pointer
  * @smac: MAC address to add to SMT
  * Returns pointer to the SMT entry created
index 1c8068c..ad522f8 100644 (file)
@@ -3163,7 +3163,7 @@ int t4_get_tp_version(struct adapter *adapter, u32 *vers)
 
 /**
  *     t4_get_exprom_version - return the Expansion ROM version (if any)
- *     @adapter: the adapter
+ *     @adap: the adapter
  *     @vers: where to place the version
  *
  *     Reads the Expansion ROM header from FLASH and returns the version
@@ -3493,7 +3493,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
        drv_fw = &fw_info->fw_hdr;
 
        /* Read the header of the firmware on the card */
-       ret = -t4_read_flash(adap, FLASH_FW_START,
+       ret = t4_read_flash(adap, FLASH_FW_START,
                            sizeof(*card_fw) / sizeof(uint32_t),
                            (uint32_t *)card_fw, 1);
        if (ret == 0) {
@@ -3522,8 +3522,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
                   should_install_fs_fw(adap, card_fw_usable,
                                        be32_to_cpu(fs_fw->fw_ver),
                                        be32_to_cpu(card_fw->fw_ver))) {
-               ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
-                                    fw_size, 0);
+               ret = t4_fw_upgrade(adap, adap->mbox, fw_data,
+                                   fw_size, 0);
                if (ret != 0) {
                        dev_err(adap->pdev_dev,
                                "failed to install firmware: %d\n", ret);
@@ -3554,7 +3554,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
                        FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
                        FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
                        FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
-               ret = EINVAL;
+               ret = -EINVAL;
                goto bye;
        }
 
@@ -5310,7 +5310,7 @@ static unsigned int t4_use_ldst(struct adapter *adap)
  * @cmd: TP fw ldst address space type
  * @vals: where the indirect register values are stored/written
  * @nregs: how many indirect registers to read/write
- * @start_idx: index of first indirect register to read/write
+ * @start_index: index of first indirect register to read/write
  * @rw: Read (1) or Write (0)
  * @sleep_ok: if true we may sleep while awaiting command completion
  *
@@ -6115,7 +6115,7 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
 
 /**
  *     compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
- *     @adap: the adapter
+ *     @adapter: the adapter
  *     @pidx: the port index
  *
  *     Computes and returns a bitmap indicating which MPS buffer groups are
@@ -6252,7 +6252,7 @@ static unsigned int t4_get_tp_e2c_map(struct adapter *adapter, int pidx)
 
 /**
  *     t4_get_tp_ch_map - return TP ingress channels associated with a port
- *     @adapter: the adapter
+ *     @adap: the adapter
  *     @pidx: the port index
  *
  *     Returns a bitmap indicating which TP Ingress Channels are associated
@@ -6589,7 +6589,7 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
  *     @phy_addr: the PHY address
  *     @mmd: the PHY MMD to access (0 for clause 22 PHYs)
  *     @reg: the register to write
- *     @valp: value to write
+ *     @val: value to write
  *
  *     Issues a FW command through the given mailbox to write a PHY register.
  */
@@ -6615,7 +6615,7 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
 
 /**
  *     t4_sge_decode_idma_state - decode the idma state
- *     @adap: the adapter
+ *     @adapter: the adapter
  *     @state: the state idma is stuck in
  */
 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
@@ -6782,7 +6782,7 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
  *      t4_sge_ctxt_flush - flush the SGE context cache
  *      @adap: the adapter
  *      @mbox: mailbox to use for the FW command
- *      @ctx_type: Egress or Ingress
+ *      @ctxt_type: Egress or Ingress
  *
  *      Issues a FW command through the given mailbox to flush the
  *      SGE context cache.
@@ -6809,7 +6809,7 @@ int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
 
 /**
  *     t4_read_sge_dbqtimers - read SGE Doorbell Queue Timer values
- *     @adap - the adapter
+ *     @adap: the adapter
  *     @ndbqtimers: size of the provided SGE Doorbell Queue Timer table
  *     @dbqtimers: SGE Doorbell Queue Timer table
  *
@@ -7092,6 +7092,7 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
 /**
  *     t4_fw_restart - restart the firmware by taking the uP out of RESET
  *     @adap: the adapter
+ *     @mbox: mailbox to use for the FW command
  *     @reset: if we want to do a RESET to restart things
  *
  *     Restart firmware previously halted by t4_fw_halt().  On successful
@@ -7630,6 +7631,8 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
  *     @nmac: number of MAC addresses needed (1 to 5)
  *     @mac: the MAC addresses of the VI
  *     @rss_size: size of RSS table slice associated with this VI
+ *     @vivld: the destination to store the VI Valid value.
+ *     @vin: the destination to store the VIN value.
  *
  *     Allocates a virtual interface for the given physical port.  If @mac is
  *     not %NULL it contains the MAC addresses of the VI as assigned by FW.
@@ -7848,7 +7851,7 @@ int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
  *      t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
  *      @adap: the adapter
  *      @viid: the VI id
- *      @mac: the MAC address
+ *      @addr: the MAC address
  *      @mask: the mask
  *      @vni: the VNI id for the tunnel protocol
  *      @vni_mask: mask for the VNI id
@@ -7897,11 +7900,11 @@ int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
  *     t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
  *     @adap: the adapter
  *     @viid: the VI id
- *     @mac: the MAC address
+ *     @addr: the MAC address
  *     @mask: the mask
  *     @idx: index at which to add this entry
- *     @port_id: the port index
  *     @lookup_type: MAC address for inner (1) or outer (0) header
+ *     @port_id: the port index
  *     @sleep_ok: call is allowed to sleep
  *
  *     Adds the mac entry at the specified index using raw mac interface.
@@ -8126,7 +8129,7 @@ int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
  *     @idx: index of existing filter for old value of MAC address, or -1
  *     @addr: the new MAC address value
  *     @persist: whether a new MAC allocation should be persistent
- *     @add_smt: if true also add the address to the HW SMT
+ *     @smt_idx: the destination to store the new SMT index.
  *
  *     Modifies an exact-match filter and sets it to the new MAC address.
  *     Note that in general it is not possible to modify the value of a given
@@ -8448,7 +8451,6 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 
 /**
  *     t4_link_down_rc_str - return a string for a Link Down Reason Code
- *     @adap: the adapter
  *     @link_down_rc: Link Down Reason Code
  *
  *     Returns a string representation of the Link Down Reason Code.
@@ -8472,9 +8474,7 @@ static const char *t4_link_down_rc_str(unsigned char link_down_rc)
        return reason[link_down_rc];
 }
 
-/**
- * Return the highest speed set in the port capabilities, in Mb/s.
- */
+/* Return the highest speed set in the port capabilities, in Mb/s. */
 static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
 {
        #define TEST_SPEED_RETURN(__caps_speed, __speed) \
@@ -9110,7 +9110,6 @@ found:
 /**
  *     t4_prep_adapter - prepare SW and HW for operation
  *     @adapter: the adapter
- *     @reset: if true perform a HW reset
  *
  *     Initialize adapter SW state for the various HW modules, set initial
  *     values for some adapter tunables, take PHYs out of reset, and
@@ -10395,6 +10394,7 @@ int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode,
 /**
  *     t4_i2c_rd - read I2C data from adapter
  *     @adap: the adapter
+ *     @mbox: mailbox to use for the FW command
  *     @port: Port number if per-port device; <0 if not
  *     @devid: per-port device ID or absolute device ID
  *     @offset: byte offset into device I2C space
@@ -10450,7 +10450,7 @@ int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
 
 /**
  *      t4_set_vlan_acl - Set a VLAN id for the specified VF
- *      @adapter: the adapter
+ *      @adap: the adapter
  *      @mbox: mailbox to use for the FW command
  *      @vf: one of the VFs instantiated by the specified PF
  *      @vlan: The vlanid to be set
index cec865a..a7641be 100644 (file)
@@ -260,8 +260,7 @@ static int cxgb4vf_set_addr_hash(struct port_info *pi)
  *     @tcam_idx: TCAM index of existing filter for old value of MAC address,
  *                or -1
  *     @addr: the new MAC address value
- *     @persist: whether a new MAC allocation should be persistent
- *     @add_smt: if true also add the address to the HW SMT
+ *     @persistent: whether a new MAC allocation should be persistent
  *
  *     Modifies an MPS filter and sets it to the new MAC address if
  *     @tcam_idx >= 0, or adds the MAC address to a new filter if
index f71c973..8c3d6e1 100644 (file)
@@ -1692,7 +1692,7 @@ static inline bool is_new_response(const struct rsp_ctrl *rc,
  *     restore_rx_bufs - put back a packet's RX buffers
  *     @gl: the packet gather list
  *     @fl: the SGE Free List
- *     @nfrags: how many fragments in @si
+ *     @frags: how many fragments in @si
  *
  *     Called when we find out that the current packet, @si, can't be
  *     processed right away for some reason.  This is a very rare event and
@@ -2054,7 +2054,7 @@ irq_handler_t t4vf_intr_handler(struct adapter *adapter)
 
 /**
  *     sge_rx_timer_cb - perform periodic maintenance of SGE RX queues
- *     @data: the adapter
+ *     @t: Rx timer
  *
  *     Runs periodically from a timer to perform maintenance of SGE RX queues.
  *
@@ -2113,7 +2113,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
 
 /**
  *     sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues
- *     @data: the adapter
+ *     @t: Tx timer
  *
  *     Runs periodically from a timer to perform maintenance of SGE TX queues.
  *
@@ -2405,6 +2405,7 @@ err:
  *     t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue
  *     @adapter: the adapter
  *     @txq: pointer to the new txq to be filled in
+ *     @dev: the network device
  *     @devq: the network TX queue associated with the new txq
  *     @iqid: the relative ingress queue ID to which events relating to
  *             the new txq should be directed
index 9d49ff2..a31b873 100644 (file)
@@ -389,9 +389,7 @@ static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
        return cc_fec;
 }
 
-/**
- * Return the highest speed set in the port capabilities, in Mb/s.
- */
+/* Return the highest speed set in the port capabilities, in Mb/s. */
 static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
 {
        #define TEST_SPEED_RETURN(__caps_speed, __speed) \
@@ -1467,6 +1465,7 @@ int t4vf_identify_port(struct adapter *adapter, unsigned int viid,
  *     @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
  *     @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
  *             -1 no change
+ *     @sleep_ok: call is allowed to sleep
  *
  *     Sets Rx properties of a virtual interface.
  */
@@ -1906,7 +1905,7 @@ static const char *t4vf_link_down_rc_str(unsigned char link_down_rc)
 /**
  *     t4vf_handle_get_port_info - process a FW reply message
  *     @pi: the port info
- *     @rpl: start of the FW message
+ *     @cmd: start of the FW message
  *
  *     Processes a GET_PORT_INFO FW reply message.
  */
@@ -2137,8 +2136,6 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
        return 0;
 }
 
-/**
- */
 int t4vf_prep_adapter(struct adapter *adapter)
 {
        int err;
index 298c557..22105d0 100644 (file)
@@ -266,7 +266,7 @@ static irqreturn_t enetc_msix(int irq, void *data)
        /* disable interrupts */
        enetc_wr_reg(v->rbier, 0);
 
-       for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
+       for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
                enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
 
        napi_schedule_irqoff(&v->napi);
@@ -302,7 +302,7 @@ static int enetc_poll(struct napi_struct *napi, int budget)
        /* enable interrupts */
        enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE);
 
-       for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
+       for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
                enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i),
                             ENETC_TBIER_TXTIE);
 
@@ -1595,6 +1595,24 @@ static int enetc_set_psfp(struct net_device *ndev, int en)
        return 0;
 }
 
+static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
+{
+       struct enetc_ndev_priv *priv = netdev_priv(ndev);
+       int i;
+
+       for (i = 0; i < priv->num_rx_rings; i++)
+               enetc_bdr_enable_rxvlan(&priv->si->hw, i, en);
+}
+
+static void enetc_enable_txvlan(struct net_device *ndev, bool en)
+{
+       struct enetc_ndev_priv *priv = netdev_priv(ndev);
+       int i;
+
+       for (i = 0; i < priv->num_tx_rings; i++)
+               enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
+}
+
 int enetc_set_features(struct net_device *ndev,
                       netdev_features_t features)
 {
@@ -1604,6 +1622,14 @@ int enetc_set_features(struct net_device *ndev,
        if (changed & NETIF_F_RXHASH)
                enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
 
+       if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+               enetc_enable_rxvlan(ndev,
+                                   !!(features & NETIF_F_HW_VLAN_CTAG_RX));
+
+       if (changed & NETIF_F_HW_VLAN_CTAG_TX)
+               enetc_enable_txvlan(ndev,
+                                   !!(features & NETIF_F_HW_VLAN_CTAG_TX));
+
        if (changed & NETIF_F_HW_TC)
                err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
 
index 6314051..ce0d321 100644 (file)
@@ -531,22 +531,22 @@ struct enetc_msg_cmd_header {
 
 /* Common H/W utility functions */
 
-static inline void enetc_enable_rxvlan(struct enetc_hw *hw, int si_idx,
-                                      bool en)
+static inline void enetc_bdr_enable_rxvlan(struct enetc_hw *hw, int idx,
+                                          bool en)
 {
-       u32 val = enetc_rxbdr_rd(hw, si_idx, ENETC_RBMR);
+       u32 val = enetc_rxbdr_rd(hw, idx, ENETC_RBMR);
 
        val = (val & ~ENETC_RBMR_VTE) | (en ? ENETC_RBMR_VTE : 0);
-       enetc_rxbdr_wr(hw, si_idx, ENETC_RBMR, val);
+       enetc_rxbdr_wr(hw, idx, ENETC_RBMR, val);
 }
 
-static inline void enetc_enable_txvlan(struct enetc_hw *hw, int si_idx,
-                                      bool en)
+static inline void enetc_bdr_enable_txvlan(struct enetc_hw *hw, int idx,
+                                          bool en)
 {
-       u32 val = enetc_txbdr_rd(hw, si_idx, ENETC_TBMR);
+       u32 val = enetc_txbdr_rd(hw, idx, ENETC_TBMR);
 
        val = (val & ~ENETC_TBMR_VIH) | (en ? ENETC_TBMR_VIH : 0);
-       enetc_txbdr_wr(hw, si_idx, ENETC_TBMR, val);
+       enetc_txbdr_wr(hw, idx, ENETC_TBMR, val);
 }
 
 static inline void enetc_set_bdr_prio(struct enetc_hw *hw, int bdr_idx,
index 824d211..4fac57d 100644 (file)
@@ -649,14 +649,6 @@ static int enetc_pf_set_features(struct net_device *ndev,
        netdev_features_t changed = ndev->features ^ features;
        struct enetc_ndev_priv *priv = netdev_priv(ndev);
 
-       if (changed & NETIF_F_HW_VLAN_CTAG_RX)
-               enetc_enable_rxvlan(&priv->si->hw, 0,
-                                   !!(features & NETIF_F_HW_VLAN_CTAG_RX));
-
-       if (changed & NETIF_F_HW_VLAN_CTAG_TX)
-               enetc_enable_txvlan(&priv->si->hw, 0,
-                                   !!(features & NETIF_F_HW_VLAN_CTAG_TX));
-
        if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
                struct enetc_pf *pf = enetc_si_priv(priv->si);
 
index a6cdd5b..d8d76da 100644 (file)
@@ -525,11 +525,6 @@ struct fec_enet_private {
        unsigned int total_tx_ring_size;
        unsigned int total_rx_ring_size;
 
-       unsigned long work_tx;
-       unsigned long work_rx;
-       unsigned long work_ts;
-       unsigned long work_mdio;
-
        struct  platform_device *pdev;
 
        int     dev_id;
index 2d0d313..3982285 100644 (file)
@@ -75,8 +75,6 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
 
 #define DRIVER_NAME    "fec"
 
-#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0))
-
 /* Pause frame feild and FIFO threshold */
 #define FEC_ENET_FCE   (1 << 5)
 #define FEC_ENET_RSEM_V        0x84
@@ -1248,8 +1246,6 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
 
        fep = netdev_priv(ndev);
 
-       queue_id = FEC_ENET_GET_QUQUE(queue_id);
-
        txq = fep->tx_queue[queue_id];
        /* get next bdp of dirty_tx */
        nq = netdev_get_tx_queue(ndev, queue_id);
@@ -1340,17 +1336,14 @@ skb_done:
                writel(0, txq->bd.reg_desc_active);
 }
 
-static void
-fec_enet_tx(struct net_device *ndev)
+static void fec_enet_tx(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
-       u16 queue_id;
-       /* First process class A queue, then Class B and Best Effort queue */
-       for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) {
-               clear_bit(queue_id, &fep->work_tx);
-               fec_enet_tx_queue(ndev, queue_id);
-       }
-       return;
+       int i;
+
+       /* Make sure that AVB queues are processed first. */
+       for (i = fep->num_tx_queues - 1; i >= 0; i--)
+               fec_enet_tx_queue(ndev, i);
 }
 
 static int
@@ -1426,7 +1419,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
 #ifdef CONFIG_M532x
        flush_cache_all();
 #endif
-       queue_id = FEC_ENET_GET_QUQUE(queue_id);
        rxq = fep->rx_queue[queue_id];
 
        /* First, grab all of the stats for the incoming packet.
@@ -1550,6 +1542,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
                                               htons(ETH_P_8021Q),
                                               vlan_tag);
 
+               skb_record_rx_queue(skb, queue_id);
                napi_gro_receive(&fep->napi, skb);
 
                if (is_copybreak) {
@@ -1595,48 +1588,30 @@ rx_processing_done:
        return pkt_received;
 }
 
-static int
-fec_enet_rx(struct net_device *ndev, int budget)
+static int fec_enet_rx(struct net_device *ndev, int budget)
 {
-       int     pkt_received = 0;
-       u16     queue_id;
        struct fec_enet_private *fep = netdev_priv(ndev);
+       int i, done = 0;
 
-       for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
-               int ret;
-
-               ret = fec_enet_rx_queue(ndev,
-                                       budget - pkt_received, queue_id);
+       /* Make sure that AVB queues are processed first. */
+       for (i = fep->num_rx_queues - 1; i >= 0; i--)
+               done += fec_enet_rx_queue(ndev, budget - done, i);
 
-               if (ret < budget - pkt_received)
-                       clear_bit(queue_id, &fep->work_rx);
-
-               pkt_received += ret;
-       }
-       return pkt_received;
+       return done;
 }
 
-static bool
-fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
+static bool fec_enet_collect_events(struct fec_enet_private *fep)
 {
-       if (int_events == 0)
-               return false;
+       uint int_events;
+
+       int_events = readl(fep->hwp + FEC_IEVENT);
 
-       if (int_events & FEC_ENET_RXF_0)
-               fep->work_rx |= (1 << 2);
-       if (int_events & FEC_ENET_RXF_1)
-               fep->work_rx |= (1 << 0);
-       if (int_events & FEC_ENET_RXF_2)
-               fep->work_rx |= (1 << 1);
+       /* Don't clear MDIO events, we poll for those */
+       int_events &= ~FEC_ENET_MII;
 
-       if (int_events & FEC_ENET_TXF_0)
-               fep->work_tx |= (1 << 2);
-       if (int_events & FEC_ENET_TXF_1)
-               fep->work_tx |= (1 << 0);
-       if (int_events & FEC_ENET_TXF_2)
-               fep->work_tx |= (1 << 1);
+       writel(int_events, fep->hwp + FEC_IEVENT);
 
-       return true;
+       return int_events != 0;
 }
 
 static irqreturn_t
@@ -1644,18 +1619,9 @@ fec_enet_interrupt(int irq, void *dev_id)
 {
        struct net_device *ndev = dev_id;
        struct fec_enet_private *fep = netdev_priv(ndev);
-       uint int_events;
        irqreturn_t ret = IRQ_NONE;
 
-       int_events = readl(fep->hwp + FEC_IEVENT);
-
-       /* Don't clear MDIO events, we poll for those */
-       int_events &= ~FEC_ENET_MII;
-
-       writel(int_events, fep->hwp + FEC_IEVENT);
-       fec_enet_collect_events(fep, int_events);
-
-       if ((fep->work_tx || fep->work_rx) && fep->link) {
+       if (fec_enet_collect_events(fep) && fep->link) {
                ret = IRQ_HANDLED;
 
                if (napi_schedule_prep(&fep->napi)) {
@@ -1672,17 +1638,19 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
 {
        struct net_device *ndev = napi->dev;
        struct fec_enet_private *fep = netdev_priv(ndev);
-       int pkts;
+       int done = 0;
 
-       pkts = fec_enet_rx(ndev, budget);
-
-       fec_enet_tx(ndev);
+       do {
+               done += fec_enet_rx(ndev, budget - done);
+               fec_enet_tx(ndev);
+       } while ((done < budget) && fec_enet_collect_events(fep));
 
-       if (pkts < budget) {
-               napi_complete_done(napi, pkts);
+       if (done < budget) {
+               napi_complete_done(napi, done);
                writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
        }
-       return pkts;
+
+       return done;
 }
 
 /* ------------------------------------------------------------------------- */
index c117074..23f278e 100644 (file)
@@ -699,7 +699,7 @@ static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
        struct net_device *ndev = ring_data->napi.dev;
 
        skb->protocol = eth_type_trans(skb, ndev);
-       (void)napi_gro_receive(&ring_data->napi, skb);
+       napi_gro_receive(&ring_data->napi, skb);
 }
 
 static int hns_desc_unused(struct hnae_ring *ring)
index b14f2ab..c38f3bb 100644 (file)
@@ -4127,9 +4127,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
 
        hns3_put_ring_config(priv);
 
-       hns3_dbg_uninit(handle);
-
 out_netdev_free:
+       hns3_dbg_uninit(handle);
        free_netdev(netdev);
 }
 
index 6b1545f..2622e04 100644 (file)
@@ -180,18 +180,21 @@ static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring,
 {
        struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector;
        unsigned char *packet = skb->data;
+       u32 len = skb_headlen(skb);
        u32 i;
 
-       for (i = 0; i < skb->len; i++)
+       len = min_t(u32, len, HNS3_NIC_LB_TEST_PACKET_SIZE);
+
+       for (i = 0; i < len; i++)
                if (packet[i] != (unsigned char)(i & 0xff))
                        break;
 
        /* The packet is correctly received */
-       if (i == skb->len)
+       if (i == HNS3_NIC_LB_TEST_PACKET_SIZE)
                tqp_vector->rx_group.total_packets++;
        else
                print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1,
-                              skb->data, skb->len, true);
+                              skb->data, len, true);
 
        dev_kfree_skb_any(skb);
 }
index 96bfad5..d6bfdc6 100644 (file)
@@ -9859,7 +9859,7 @@ retry:
        set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
        hdev->reset_type = HNAE3_FLR_RESET;
        ret = hclge_reset_prepare(hdev);
-       if (ret) {
+       if (ret || hdev->reset_pending) {
                dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
                        ret);
                if (hdev->reset_pending ||
index 1b9578d..a10b022 100644 (file)
@@ -1793,6 +1793,11 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
        if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
                hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0);
                ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
+               if (ret) {
+                       dev_err(&hdev->pdev->dev,
+                               "failed to assert VF reset, ret = %d\n", ret);
+                       return ret;
+               }
                hdev->rst_stats.vf_func_rst_cnt++;
        }
 
index 0245da0..b735bc5 100644 (file)
@@ -814,6 +814,8 @@ err_aeqs_init:
 err_init_msix:
 err_pfhwdev_alloc:
        hinic_free_hwif(hwif);
+       if (err > 0)
+               err = -EIO;
        return ERR_PTR(err);
 }
 
index c33eb11..e0f5a81 100644 (file)
@@ -370,48 +370,89 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
                                MSG_NOT_RESP, timeout);
 }
 
-/**
- * mgmt_recv_msg_handler - handler for message from mgmt cpu
- * @pf_to_mgmt: PF to MGMT channel
- * @recv_msg: received message details
- **/
-static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
-                                 struct hinic_recv_msg *recv_msg)
+static void recv_mgmt_msg_work_handler(struct work_struct *work)
 {
-       struct hinic_hwif *hwif = pf_to_mgmt->hwif;
-       struct pci_dev *pdev = hwif->pdev;
-       u8 *buf_out = recv_msg->buf_out;
+       struct hinic_mgmt_msg_handle_work *mgmt_work =
+               container_of(work, struct hinic_mgmt_msg_handle_work, work);
+       struct hinic_pf_to_mgmt *pf_to_mgmt = mgmt_work->pf_to_mgmt;
+       struct pci_dev *pdev = pf_to_mgmt->hwif->pdev;
+       u8 *buf_out = pf_to_mgmt->mgmt_ack_buf;
        struct hinic_mgmt_cb *mgmt_cb;
        unsigned long cb_state;
        u16 out_size = 0;
 
-       if (recv_msg->mod >= HINIC_MOD_MAX) {
+       memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE);
+
+       if (mgmt_work->mod >= HINIC_MOD_MAX) {
                dev_err(&pdev->dev, "Unknown MGMT MSG module = %d\n",
-                       recv_msg->mod);
+                       mgmt_work->mod);
+               kfree(mgmt_work->msg);
+               kfree(mgmt_work);
                return;
        }
 
-       mgmt_cb = &pf_to_mgmt->mgmt_cb[recv_msg->mod];
+       mgmt_cb = &pf_to_mgmt->mgmt_cb[mgmt_work->mod];
 
        cb_state = cmpxchg(&mgmt_cb->state,
                           HINIC_MGMT_CB_ENABLED,
                           HINIC_MGMT_CB_ENABLED | HINIC_MGMT_CB_RUNNING);
 
        if ((cb_state == HINIC_MGMT_CB_ENABLED) && (mgmt_cb->cb))
-               mgmt_cb->cb(mgmt_cb->handle, recv_msg->cmd,
-                           recv_msg->msg, recv_msg->msg_len,
+               mgmt_cb->cb(mgmt_cb->handle, mgmt_work->cmd,
+                           mgmt_work->msg, mgmt_work->msg_len,
                            buf_out, &out_size);
        else
                dev_err(&pdev->dev, "No MGMT msg handler, mod: %d, cmd: %d\n",
-                       recv_msg->mod, recv_msg->cmd);
+                       mgmt_work->mod, mgmt_work->cmd);
 
        mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING;
 
-       if (!recv_msg->async_mgmt_to_pf)
+       if (!mgmt_work->async_mgmt_to_pf)
                /* MGMT sent sync msg, send the response */
-               msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod, recv_msg->cmd,
+               msg_to_mgmt_async(pf_to_mgmt, mgmt_work->mod, mgmt_work->cmd,
                                  buf_out, out_size, MGMT_RESP,
-                                 recv_msg->msg_id);
+                                 mgmt_work->msg_id);
+
+       kfree(mgmt_work->msg);
+       kfree(mgmt_work);
+}
+
+/**
+ * mgmt_recv_msg_handler - handler for message from mgmt cpu
+ * @pf_to_mgmt: PF to MGMT channel
+ * @recv_msg: received message details
+ **/
+static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
+                                 struct hinic_recv_msg *recv_msg)
+{
+       struct hinic_mgmt_msg_handle_work *mgmt_work = NULL;
+       struct pci_dev *pdev = pf_to_mgmt->hwif->pdev;
+
+       mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL);
+       if (!mgmt_work) {
+               dev_err(&pdev->dev, "Allocate mgmt work memory failed\n");
+               return;
+       }
+
+       if (recv_msg->msg_len) {
+               mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL);
+               if (!mgmt_work->msg) {
+                       dev_err(&pdev->dev, "Allocate mgmt msg memory failed\n");
+                       kfree(mgmt_work);
+                       return;
+               }
+       }
+
+       mgmt_work->pf_to_mgmt = pf_to_mgmt;
+       mgmt_work->msg_len = recv_msg->msg_len;
+       memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len);
+       mgmt_work->msg_id = recv_msg->msg_id;
+       mgmt_work->mod = recv_msg->mod;
+       mgmt_work->cmd = recv_msg->cmd;
+       mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf;
+
+       INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler);
+       queue_work(pf_to_mgmt->workq, &mgmt_work->work);
 }
 
 /**
@@ -546,6 +587,12 @@ static int alloc_msg_buf(struct hinic_pf_to_mgmt *pf_to_mgmt)
        if (!pf_to_mgmt->sync_msg_buf)
                return -ENOMEM;
 
+       pf_to_mgmt->mgmt_ack_buf = devm_kzalloc(&pdev->dev,
+                                               MAX_PF_MGMT_BUF_SIZE,
+                                               GFP_KERNEL);
+       if (!pf_to_mgmt->mgmt_ack_buf)
+               return -ENOMEM;
+
        return 0;
 }
 
@@ -571,6 +618,11 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
                return 0;
 
        sema_init(&pf_to_mgmt->sync_msg_lock, 1);
+       pf_to_mgmt->workq = create_singlethread_workqueue("hinic_mgmt");
+       if (!pf_to_mgmt->workq) {
+               dev_err(&pdev->dev, "Failed to initialize MGMT workqueue\n");
+               return -ENOMEM;
+       }
        pf_to_mgmt->sync_msg_id = 0;
 
        err = alloc_msg_buf(pf_to_mgmt);
@@ -605,4 +657,5 @@ void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt)
 
        hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU);
        hinic_api_cmd_free(pf_to_mgmt->cmd_chain);
+       destroy_workqueue(pf_to_mgmt->workq);
 }
index c2b142c..a824fbd 100644 (file)
@@ -119,6 +119,7 @@ struct hinic_pf_to_mgmt {
        struct semaphore                sync_msg_lock;
        u16                             sync_msg_id;
        u8                              *sync_msg_buf;
+       void                            *mgmt_ack_buf;
 
        struct hinic_recv_msg           recv_resp_msg_from_mgmt;
        struct hinic_recv_msg           recv_msg_from_mgmt;
@@ -126,6 +127,21 @@ struct hinic_pf_to_mgmt {
        struct hinic_api_cmd_chain      *cmd_chain[HINIC_API_CMD_MAX];
 
        struct hinic_mgmt_cb            mgmt_cb[HINIC_MOD_MAX];
+
+       struct workqueue_struct         *workq;
+};
+
+struct hinic_mgmt_msg_handle_work {
+       struct work_struct work;
+       struct hinic_pf_to_mgmt *pf_to_mgmt;
+
+       void                    *msg;
+       u16                     msg_len;
+
+       enum hinic_mod_type     mod;
+       u8                      cmd;
+       u16                     msg_id;
+       int                     async_mgmt_to_pf;
 };
 
 void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt,
index 96d36ae..c5c7326 100644 (file)
@@ -1715,7 +1715,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
        }
 
        netdev->min_mtu = IBMVETH_MIN_MTU;
-       netdev->max_mtu = ETH_MAX_MTU;
+       netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH;
 
        memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
 
index 2baf7b3..0fd7eae 100644 (file)
@@ -1971,13 +1971,18 @@ static int do_reset(struct ibmvnic_adapter *adapter,
                        release_sub_crqs(adapter, 1);
                } else {
                        rc = ibmvnic_reset_crq(adapter);
-                       if (!rc)
+                       if (rc == H_CLOSED || rc == H_SUCCESS) {
                                rc = vio_enable_interrupts(adapter->vdev);
+                               if (rc)
+                                       netdev_err(adapter->netdev,
+                                                  "Reset failed to enable interrupts. rc=%d\n",
+                                                  rc);
+                       }
                }
 
                if (rc) {
                        netdev_err(adapter->netdev,
-                                  "Couldn't initialize crq. rc=%d\n", rc);
+                                  "Reset couldn't initialize crq. rc=%d\n", rc);
                        goto out;
                }
 
index aa8026b..67806b7 100644 (file)
@@ -2072,6 +2072,9 @@ static int i40e_set_ringparam(struct net_device *netdev,
                        err = i40e_setup_rx_descriptors(&rx_rings[i]);
                        if (err)
                                goto rx_unwind;
+                       err = i40e_alloc_rx_bi(&rx_rings[i]);
+                       if (err)
+                               goto rx_unwind;
 
                        /* now allocate the Rx buffers to make sure the OS
                         * has enough memory, any failure here means abort
index 5d807c8..56ecd6c 100644 (file)
@@ -439,11 +439,15 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
                i40e_get_netdev_stats_struct_tx(ring, stats);
 
                if (i40e_enabled_xdp_vsi(vsi)) {
-                       ring++;
+                       ring = READ_ONCE(vsi->xdp_rings[i]);
+                       if (!ring)
+                               continue;
                        i40e_get_netdev_stats_struct_tx(ring, stats);
                }
 
-               ring++;
+               ring = READ_ONCE(vsi->rx_rings[i]);
+               if (!ring)
+                       continue;
                do {
                        start   = u64_stats_fetch_begin_irq(&ring->syncp);
                        packets = ring->stats.packets;
@@ -787,6 +791,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
        for (q = 0; q < vsi->num_queue_pairs; q++) {
                /* locate Tx ring */
                p = READ_ONCE(vsi->tx_rings[q]);
+               if (!p)
+                       continue;
 
                do {
                        start = u64_stats_fetch_begin_irq(&p->syncp);
@@ -800,8 +806,11 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
                tx_linearize += p->tx_stats.tx_linearize;
                tx_force_wb += p->tx_stats.tx_force_wb;
 
-               /* Rx queue is part of the same block as Tx queue */
-               p = &p[1];
+               /* locate Rx ring */
+               p = READ_ONCE(vsi->rx_rings[q]);
+               if (!p)
+                       continue;
+
                do {
                        start = u64_stats_fetch_begin_irq(&p->syncp);
                        packets = p->stats.packets;
@@ -10824,10 +10833,10 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
        if (vsi->tx_rings && vsi->tx_rings[0]) {
                for (i = 0; i < vsi->alloc_queue_pairs; i++) {
                        kfree_rcu(vsi->tx_rings[i], rcu);
-                       vsi->tx_rings[i] = NULL;
-                       vsi->rx_rings[i] = NULL;
+                       WRITE_ONCE(vsi->tx_rings[i], NULL);
+                       WRITE_ONCE(vsi->rx_rings[i], NULL);
                        if (vsi->xdp_rings)
-                               vsi->xdp_rings[i] = NULL;
+                               WRITE_ONCE(vsi->xdp_rings[i], NULL);
                }
        }
 }
@@ -10861,7 +10870,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
                if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
                        ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
                ring->itr_setting = pf->tx_itr_default;
-               vsi->tx_rings[i] = ring++;
+               WRITE_ONCE(vsi->tx_rings[i], ring++);
 
                if (!i40e_enabled_xdp_vsi(vsi))
                        goto setup_rx;
@@ -10879,7 +10888,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
                        ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
                set_ring_xdp(ring);
                ring->itr_setting = pf->tx_itr_default;
-               vsi->xdp_rings[i] = ring++;
+               WRITE_ONCE(vsi->xdp_rings[i], ring++);
 
 setup_rx:
                ring->queue_index = i;
@@ -10892,7 +10901,7 @@ setup_rx:
                ring->size = 0;
                ring->dcb_tc = 0;
                ring->itr_setting = pf->rx_itr_default;
-               vsi->rx_rings[i] = ring;
+               WRITE_ONCE(vsi->rx_rings[i], ring);
        }
 
        return 0;
index 28b46cc..2e3a39c 100644 (file)
@@ -1194,7 +1194,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
                for (i = 0; i < vsi->alloc_txq; i++) {
                        if (vsi->tx_rings[i]) {
                                kfree_rcu(vsi->tx_rings[i], rcu);
-                               vsi->tx_rings[i] = NULL;
+                               WRITE_ONCE(vsi->tx_rings[i], NULL);
                        }
                }
        }
@@ -1202,7 +1202,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
                for (i = 0; i < vsi->alloc_rxq; i++) {
                        if (vsi->rx_rings[i]) {
                                kfree_rcu(vsi->rx_rings[i], rcu);
-                               vsi->rx_rings[i] = NULL;
+                               WRITE_ONCE(vsi->rx_rings[i], NULL);
                        }
                }
        }
@@ -1235,7 +1235,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
                ring->vsi = vsi;
                ring->dev = dev;
                ring->count = vsi->num_tx_desc;
-               vsi->tx_rings[i] = ring;
+               WRITE_ONCE(vsi->tx_rings[i], ring);
        }
 
        /* Allocate Rx rings */
@@ -1254,7 +1254,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
                ring->netdev = vsi->netdev;
                ring->dev = dev;
                ring->count = vsi->num_rx_desc;
-               vsi->rx_rings[i] = ring;
+               WRITE_ONCE(vsi->rx_rings[i], ring);
        }
 
        return 0;
index 082825e..4cbd49c 100644 (file)
@@ -1702,7 +1702,7 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
                xdp_ring->netdev = NULL;
                xdp_ring->dev = dev;
                xdp_ring->count = vsi->num_tx_desc;
-               vsi->xdp_rings[i] = xdp_ring;
+               WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
                if (ice_setup_tx_ring(xdp_ring))
                        goto free_xdp_rings;
                ice_set_ring_xdp(xdp_ring);
index fd9f5d4..2e35c57 100644 (file)
@@ -921,7 +921,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
                ring->queue_index = txr_idx;
 
                /* assign ring to adapter */
-               adapter->tx_ring[txr_idx] = ring;
+               WRITE_ONCE(adapter->tx_ring[txr_idx], ring);
 
                /* update count and index */
                txr_count--;
@@ -948,7 +948,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
                set_ring_xdp(ring);
 
                /* assign ring to adapter */
-               adapter->xdp_ring[xdp_idx] = ring;
+               WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring);
 
                /* update count and index */
                xdp_count--;
@@ -991,7 +991,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
                ring->queue_index = rxr_idx;
 
                /* assign ring to adapter */
-               adapter->rx_ring[rxr_idx] = ring;
+               WRITE_ONCE(adapter->rx_ring[rxr_idx], ring);
 
                /* update count and index */
                rxr_count--;
@@ -1020,13 +1020,13 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
 
        ixgbe_for_each_ring(ring, q_vector->tx) {
                if (ring_is_xdp(ring))
-                       adapter->xdp_ring[ring->queue_index] = NULL;
+                       WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL);
                else
-                       adapter->tx_ring[ring->queue_index] = NULL;
+                       WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL);
        }
 
        ixgbe_for_each_ring(ring, q_vector->rx)
-               adapter->rx_ring[ring->queue_index] = NULL;
+               WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL);
 
        adapter->q_vector[v_idx] = NULL;
        napi_hash_del(&q_vector->napi);
index f162b8b..97a423e 100644 (file)
@@ -7051,7 +7051,10 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
        }
 
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
+               struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]);
+
+               if (!rx_ring)
+                       continue;
                non_eop_descs += rx_ring->rx_stats.non_eop_descs;
                alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
                alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
@@ -7072,15 +7075,20 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
        packets = 0;
        /* gather some stats to the adapter struct that are per queue */
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+               struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]);
+
+               if (!tx_ring)
+                       continue;
                restart_queue += tx_ring->tx_stats.restart_queue;
                tx_busy += tx_ring->tx_stats.tx_busy;
                bytes += tx_ring->stats.bytes;
                packets += tx_ring->stats.packets;
        }
        for (i = 0; i < adapter->num_xdp_queues; i++) {
-               struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
+               struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]);
 
+               if (!xdp_ring)
+                       continue;
                restart_queue += xdp_ring->tx_stats.restart_queue;
                tx_busy += xdp_ring->tx_stats.tx_busy;
                bytes += xdp_ring->stats.bytes;
index 946925b..7d5d9d3 100644 (file)
 #define      MVNETA_TX_IN_PRGRS                  BIT(1)
 #define      MVNETA_TX_FIFO_EMPTY                BIT(8)
 #define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
+/* Only exists on Armada XP and Armada 370 */
 #define MVNETA_SERDES_CFG                       0x24A0
 #define      MVNETA_SGMII_SERDES_PROTO          0x0cc7
 #define      MVNETA_QSGMII_SERDES_PROTO                 0x0667
+#define      MVNETA_HSGMII_SERDES_PROTO                 0x1107
 #define MVNETA_TYPE_PRIO                         0x24bc
 #define      MVNETA_FORCE_UNI                    BIT(21)
 #define MVNETA_TXQ_CMD_1                         0x24e4
@@ -3529,26 +3531,60 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
        return 0;
 }
 
-static int mvneta_comphy_init(struct mvneta_port *pp)
+static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface)
 {
        int ret;
 
-       if (!pp->comphy)
-               return 0;
-
-       ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET,
-                              pp->phy_interface);
+       ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface);
        if (ret)
                return ret;
 
        return phy_power_on(pp->comphy);
 }
 
+static int mvneta_config_interface(struct mvneta_port *pp,
+                                  phy_interface_t interface)
+{
+       int ret = 0;
+
+       if (pp->comphy) {
+               if (interface == PHY_INTERFACE_MODE_SGMII ||
+                   interface == PHY_INTERFACE_MODE_1000BASEX ||
+                   interface == PHY_INTERFACE_MODE_2500BASEX) {
+                       ret = mvneta_comphy_init(pp, interface);
+               }
+       } else {
+               switch (interface) {
+               case PHY_INTERFACE_MODE_QSGMII:
+                       mvreg_write(pp, MVNETA_SERDES_CFG,
+                                   MVNETA_QSGMII_SERDES_PROTO);
+                       break;
+
+               case PHY_INTERFACE_MODE_SGMII:
+               case PHY_INTERFACE_MODE_1000BASEX:
+                       mvreg_write(pp, MVNETA_SERDES_CFG,
+                                   MVNETA_SGMII_SERDES_PROTO);
+                       break;
+
+               case PHY_INTERFACE_MODE_2500BASEX:
+                       mvreg_write(pp, MVNETA_SERDES_CFG,
+                                   MVNETA_HSGMII_SERDES_PROTO);
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       pp->phy_interface = interface;
+
+       return ret;
+}
+
 static void mvneta_start_dev(struct mvneta_port *pp)
 {
        int cpu;
 
-       WARN_ON(mvneta_comphy_init(pp));
+       WARN_ON(mvneta_config_interface(pp, pp->phy_interface));
 
        mvneta_max_rx_size_set(pp, pp->pkt_size);
        mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
@@ -3923,17 +3959,13 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
        /* When at 2.5G, the link partner can send frames with shortened
         * preambles.
         */
-       if (state->speed == SPEED_2500)
+       if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
                new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
 
-       if (pp->comphy && pp->phy_interface != state->interface &&
-           (state->interface == PHY_INTERFACE_MODE_SGMII ||
-            state->interface == PHY_INTERFACE_MODE_1000BASEX ||
-            state->interface == PHY_INTERFACE_MODE_2500BASEX)) {
-               pp->phy_interface = state->interface;
-
-               WARN_ON(phy_power_off(pp->comphy));
-               WARN_ON(mvneta_comphy_init(pp));
+       if (pp->phy_interface != state->interface) {
+               if (pp->comphy)
+                       WARN_ON(phy_power_off(pp->comphy));
+               WARN_ON(mvneta_config_interface(pp, state->interface));
        }
 
        if (new_ctrl0 != gmac_ctrl0)
@@ -4982,12 +5014,10 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
        /* MAC Cause register should be cleared */
        mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
 
-       if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
-               mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
-       else if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
-                phy_interface_mode_is_8023z(phy_mode))
-               mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
-       else if (!phy_interface_mode_is_rgmii(phy_mode))
+       if (phy_mode != PHY_INTERFACE_MODE_QSGMII &&
+           phy_mode != PHY_INTERFACE_MODE_SGMII &&
+           !phy_interface_mode_is_8023z(phy_mode) &&
+           !phy_interface_mode_is_rgmii(phy_mode))
                return -EINVAL;
 
        return 0;
@@ -5176,10 +5206,10 @@ static int mvneta_probe(struct platform_device *pdev)
        if (err < 0)
                goto err_netdev;
 
-       err = mvneta_port_power_up(pp, phy_mode);
+       err = mvneta_port_power_up(pp, pp->phy_interface);
        if (err < 0) {
                dev_err(&pdev->dev, "can't power up port\n");
-               goto err_netdev;
+               return err;
        }
 
        /* Armada3700 network controller does not support per-cpu
index 241f007..fe54764 100644 (file)
@@ -203,7 +203,7 @@ io_error:
 
 static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
 {
-       u16 v;
+       u16 v = 0;
        __gm_phy_read(hw, port, reg, &v);
        return v;
 }
index 7be6b2d..9976de8 100644 (file)
@@ -29,6 +29,7 @@ struct mlx5e_dcbx {
        bool                       manual_buffer;
        u32                        cable_len;
        u32                        xoff;
+       u16                        port_buff_cell_sz;
 };
 
 #define MLX5E_MAX_DSCP (64)
index 2a8950b..3cf3e35 100644 (file)
@@ -78,11 +78,26 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
        [MLX5E_400GAUI_8]                       = 400000,
 };
 
+bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev)
+{
+       struct mlx5e_port_eth_proto eproto;
+       int err;
+
+       if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet))
+               return true;
+
+       err = mlx5_port_query_eth_proto(mdev, 1, true, &eproto);
+       if (err)
+               return false;
+
+       return !!eproto.cap;
+}
+
 static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
                                     const u32 **arr, u32 *size,
                                     bool force_legacy)
 {
-       bool ext = force_legacy ? false : MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+       bool ext = force_legacy ? false : mlx5e_ptys_ext_supported(mdev);
 
        *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) :
                      ARRAY_SIZE(mlx5e_link_speed);
@@ -177,7 +192,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
        bool ext;
        int err;
 
-       ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+       ext = mlx5e_ptys_ext_supported(mdev);
        err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
        if (err)
                goto out;
@@ -205,7 +220,7 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
        int err;
        int i;
 
-       ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+       ext = mlx5e_ptys_ext_supported(mdev);
        err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
        if (err)
                return err;
index a2ddd44..7a7defe 100644 (file)
@@ -54,7 +54,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
 int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
 u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
                               bool force_legacy);
-
+bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev);
 int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out);
 int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
 int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
index ae99fac..673f1c8 100644 (file)
@@ -34,6 +34,7 @@
 int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
                            struct mlx5e_port_buffer *port_buffer)
 {
+       u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
        struct mlx5_core_dev *mdev = priv->mdev;
        int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
        u32 total_used = 0;
@@ -57,11 +58,11 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
                port_buffer->buffer[i].epsb =
                        MLX5_GET(bufferx_reg, buffer, epsb);
                port_buffer->buffer[i].size =
-                       MLX5_GET(bufferx_reg, buffer, size) << MLX5E_BUFFER_CELL_SHIFT;
+                       MLX5_GET(bufferx_reg, buffer, size) * port_buff_cell_sz;
                port_buffer->buffer[i].xon =
-                       MLX5_GET(bufferx_reg, buffer, xon_threshold) << MLX5E_BUFFER_CELL_SHIFT;
+                       MLX5_GET(bufferx_reg, buffer, xon_threshold) * port_buff_cell_sz;
                port_buffer->buffer[i].xoff =
-                       MLX5_GET(bufferx_reg, buffer, xoff_threshold) << MLX5E_BUFFER_CELL_SHIFT;
+                       MLX5_GET(bufferx_reg, buffer, xoff_threshold) * port_buff_cell_sz;
                total_used += port_buffer->buffer[i].size;
 
                mlx5e_dbg(HW, priv, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n", i,
@@ -73,7 +74,7 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
        }
 
        port_buffer->port_buffer_size =
-               MLX5_GET(pbmc_reg, out, port_buffer_size) << MLX5E_BUFFER_CELL_SHIFT;
+               MLX5_GET(pbmc_reg, out, port_buffer_size) * port_buff_cell_sz;
        port_buffer->spare_buffer_size =
                port_buffer->port_buffer_size - total_used;
 
@@ -88,9 +89,9 @@ out:
 static int port_set_buffer(struct mlx5e_priv *priv,
                           struct mlx5e_port_buffer *port_buffer)
 {
+       u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
        struct mlx5_core_dev *mdev = priv->mdev;
        int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
-       void *buffer;
        void *in;
        int err;
        int i;
@@ -104,16 +105,18 @@ static int port_set_buffer(struct mlx5e_priv *priv,
                goto out;
 
        for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
-               buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
-
-               MLX5_SET(bufferx_reg, buffer, size,
-                        port_buffer->buffer[i].size >> MLX5E_BUFFER_CELL_SHIFT);
-               MLX5_SET(bufferx_reg, buffer, lossy,
-                        port_buffer->buffer[i].lossy);
-               MLX5_SET(bufferx_reg, buffer, xoff_threshold,
-                        port_buffer->buffer[i].xoff >> MLX5E_BUFFER_CELL_SHIFT);
-               MLX5_SET(bufferx_reg, buffer, xon_threshold,
-                        port_buffer->buffer[i].xon >> MLX5E_BUFFER_CELL_SHIFT);
+               void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
+               u64 size = port_buffer->buffer[i].size;
+               u64 xoff = port_buffer->buffer[i].xoff;
+               u64 xon = port_buffer->buffer[i].xon;
+
+               do_div(size, port_buff_cell_sz);
+               do_div(xoff, port_buff_cell_sz);
+               do_div(xon, port_buff_cell_sz);
+               MLX5_SET(bufferx_reg, buffer, size, size);
+               MLX5_SET(bufferx_reg, buffer, lossy, port_buffer->buffer[i].lossy);
+               MLX5_SET(bufferx_reg, buffer, xoff_threshold, xoff);
+               MLX5_SET(bufferx_reg, buffer, xon_threshold, xon);
        }
 
        err = mlx5e_port_set_pbmc(mdev, in);
@@ -143,7 +146,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
 }
 
 static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
-                                u32 xoff, unsigned int max_mtu)
+                                u32 xoff, unsigned int max_mtu, u16 port_buff_cell_sz)
 {
        int i;
 
@@ -155,7 +158,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
                }
 
                if (port_buffer->buffer[i].size <
-                   (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) {
+                   (xoff + max_mtu + port_buff_cell_sz)) {
                        pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n",
                               i, port_buffer->buffer[i].size);
                        return -ENOMEM;
@@ -175,6 +178,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
  *     @pfc_en: <input> current pfc configuration
  *     @buffer: <input> current prio to buffer mapping
  *     @xoff:   <input> xoff value
+ *     @port_buff_cell_sz: <input> port buffer cell_size
  *     @port_buffer: <output> port receive buffer configuration
  *     @change: <output>
  *
@@ -189,7 +193,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
  *     sets change to true if buffer configuration was modified.
  */
 static int update_buffer_lossy(unsigned int max_mtu,
-                              u8 pfc_en, u8 *buffer, u32 xoff,
+                              u8 pfc_en, u8 *buffer, u32 xoff, u16 port_buff_cell_sz,
                               struct mlx5e_port_buffer *port_buffer,
                               bool *change)
 {
@@ -225,7 +229,7 @@ static int update_buffer_lossy(unsigned int max_mtu,
        }
 
        if (changed) {
-               err = update_xoff_threshold(port_buffer, xoff, max_mtu);
+               err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz);
                if (err)
                        return err;
 
@@ -262,6 +266,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                                    u32 *buffer_size,
                                    u8 *prio2buffer)
 {
+       u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
        struct mlx5e_port_buffer port_buffer;
        u32 xoff = calculate_xoff(priv, mtu);
        bool update_prio2buffer = false;
@@ -282,7 +287,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
 
        if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
                update_buffer = true;
-               err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
+               err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
                if (err)
                        return err;
        }
@@ -292,7 +297,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                if (err)
                        return err;
 
-               err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
+               err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, port_buff_cell_sz,
                                          &port_buffer, &update_buffer);
                if (err)
                        return err;
@@ -304,7 +309,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                if (err)
                        return err;
 
-               err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
+               err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, port_buff_cell_sz,
                                          xoff, &port_buffer, &update_buffer);
                if (err)
                        return err;
@@ -329,7 +334,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                        return -EINVAL;
 
                update_buffer = true;
-               err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
+               err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
                if (err)
                        return err;
        }
@@ -337,7 +342,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
        /* Need to update buffer configuration if xoff value is changed */
        if (!update_buffer && xoff != priv->dcbx.xoff) {
                update_buffer = true;
-               err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
+               err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
                if (err)
                        return err;
        }
index 34f55b8..80af7a5 100644 (file)
@@ -36,7 +36,6 @@
 #include "port.h"
 
 #define MLX5E_MAX_BUFFER 8
-#define MLX5E_BUFFER_CELL_SHIFT 7
 #define MLX5E_DEFAULT_CABLE_LEN 7 /* 7 meters */
 
 #define MLX5_BUFFER_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, pcam_reg) && \
index baa1624..c3d167f 100644 (file)
@@ -6,7 +6,6 @@
 #include <linux/rculist.h>
 #include <linux/rtnetlink.h>
 #include <linux/workqueue.h>
-#include <linux/rwlock.h>
 #include <linux/spinlock.h>
 #include <linux/notifier.h>
 #include <net/netevent.h>
index 8071312..eefeb1c 100644 (file)
@@ -407,7 +407,9 @@ static int
 mlx5e_rep_indr_setup_block(struct net_device *netdev,
                           struct mlx5e_rep_priv *rpriv,
                           struct flow_block_offload *f,
-                          flow_setup_cb_t *setup_cb)
+                          flow_setup_cb_t *setup_cb,
+                          void *data,
+                          void (*cleanup)(struct flow_block_cb *block_cb))
 {
        struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
        struct mlx5e_rep_indr_block_priv *indr_priv;
@@ -438,8 +440,10 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev,
                list_add(&indr_priv->list,
                         &rpriv->uplink_priv.tc_indr_block_priv_list);
 
-               block_cb = flow_block_cb_alloc(setup_cb, indr_priv, indr_priv,
-                                              mlx5e_rep_indr_block_unbind);
+               block_cb = flow_indr_block_cb_alloc(setup_cb, indr_priv, indr_priv,
+                                                   mlx5e_rep_indr_block_unbind,
+                                                   f, netdev, data, rpriv,
+                                                   cleanup);
                if (IS_ERR(block_cb)) {
                        list_del(&indr_priv->list);
                        kfree(indr_priv);
@@ -458,7 +462,7 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev,
                if (!block_cb)
                        return -ENOENT;
 
-               flow_block_cb_remove(block_cb, f);
+               flow_indr_block_cb_remove(block_cb, f);
                list_del(&block_cb->driver_list);
                return 0;
        default:
@@ -469,15 +473,19 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev,
 
 static
 int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv,
-                           enum tc_setup_type type, void *type_data)
+                           enum tc_setup_type type, void *type_data,
+                           void *data,
+                           void (*cleanup)(struct flow_block_cb *block_cb))
 {
        switch (type) {
        case TC_SETUP_BLOCK:
                return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
-                                                 mlx5e_rep_indr_setup_tc_cb);
+                                                 mlx5e_rep_indr_setup_tc_cb,
+                                                 data, cleanup);
        case TC_SETUP_FT:
                return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
-                                                 mlx5e_rep_indr_setup_ft_cb);
+                                                 mlx5e_rep_indr_setup_ft_cb,
+                                                 data, cleanup);
        default:
                return -EOPNOTSUPP;
        }
@@ -496,7 +504,7 @@ int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv)
 void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv)
 {
        flow_indr_dev_unregister(mlx5e_rep_indr_setup_cb, rpriv,
-                                mlx5e_rep_indr_setup_tc_cb);
+                                mlx5e_rep_indr_block_unbind);
 }
 
 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
index 4300255..aad1c29 100644 (file)
@@ -1097,6 +1097,7 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
        struct mlx5_ct_entry *entry = ptr;
 
        mlx5_tc_ct_entry_del_rules(ct_priv, entry);
+       kfree(entry);
 }
 
 static void
index bc102d0..d20243d 100644 (file)
@@ -1217,6 +1217,24 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
        return 0;
 }
 
+#define MLX5E_BUFFER_CELL_SHIFT 7
+
+static u16 mlx5e_query_port_buffers_cell_size(struct mlx5e_priv *priv)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u32 out[MLX5_ST_SZ_DW(sbcam_reg)] = {};
+       u32 in[MLX5_ST_SZ_DW(sbcam_reg)] = {};
+
+       if (!MLX5_CAP_GEN(mdev, sbcam_reg))
+               return (1 << MLX5E_BUFFER_CELL_SHIFT);
+
+       if (mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
+                                MLX5_REG_SBCAM, 0, 0))
+               return (1 << MLX5E_BUFFER_CELL_SHIFT);
+
+       return MLX5_GET(sbcam_reg, out, cap_cell_size);
+}
+
 void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
 {
        struct mlx5e_dcbx *dcbx = &priv->dcbx;
@@ -1234,6 +1252,7 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
        if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
                priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
 
+       priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
        priv->dcbx.manual_buffer = false;
        priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
 
index ec5658b..c2464c3 100644 (file)
@@ -200,7 +200,7 @@ static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev,
                                        struct ptys2ethtool_config **arr,
                                        u32 *size)
 {
-       bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+       bool ext = mlx5e_ptys_ext_supported(mdev);
 
        *arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
        *size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
@@ -883,7 +883,7 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
                               struct ethtool_link_ksettings *link_ksettings)
 {
        unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
-       bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+       bool ext = mlx5e_ptys_ext_supported(mdev);
 
        ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
 }
@@ -913,7 +913,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
                           __func__, err);
                goto err_query_regs;
        }
-       ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+       ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
        eth_proto_cap    = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
                                              eth_proto_capability);
        eth_proto_admin  = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
@@ -1066,7 +1066,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
        autoneg = link_ksettings->base.autoneg;
        speed = link_ksettings->base.speed;
 
-       ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+       ext_supported = mlx5e_ptys_ext_supported(mdev);
        ext = ext_requested(autoneg, adver, ext_supported);
        if (!ext_supported && ext)
                return -EOPNOTSUPP;
index a836a02..081f150 100644 (file)
@@ -3104,9 +3104,6 @@ int mlx5e_open(struct net_device *netdev)
                mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
        mutex_unlock(&priv->state_lock);
 
-       if (mlx5_vxlan_allowed(priv->mdev->vxlan))
-               udp_tunnel_get_rx_info(netdev);
-
        return err;
 }
 
@@ -5121,6 +5118,10 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
        if (err)
                goto err_destroy_flow_steering;
 
+#ifdef CONFIG_MLX5_EN_ARFS
+       priv->netdev->rx_cpu_rmap =  mlx5_eq_table_get_rmap(priv->mdev);
+#endif
+
        return 0;
 
 err_destroy_flow_steering:
@@ -5202,6 +5203,8 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
        rtnl_lock();
        if (netif_running(netdev))
                mlx5e_open(netdev);
+       if (mlx5_vxlan_allowed(priv->mdev->vxlan))
+               udp_tunnel_get_rx_info(netdev);
        netif_device_attach(netdev);
        rtnl_unlock();
 }
@@ -5216,6 +5219,8 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
        rtnl_lock();
        if (netif_running(priv->netdev))
                mlx5e_close(priv->netdev);
+       if (mlx5_vxlan_allowed(priv->mdev->vxlan))
+               udp_tunnel_drop_rx_info(priv->netdev);
        netif_device_detach(priv->netdev);
        rtnl_unlock();
 
@@ -5288,10 +5293,6 @@ int mlx5e_netdev_init(struct net_device *netdev,
        /* netdev init */
        netif_carrier_off(netdev);
 
-#ifdef CONFIG_MLX5_EN_ARFS
-       netdev->rx_cpu_rmap =  mlx5_eq_table_get_rmap(mdev);
-#endif
-
        return 0;
 
 err_free_cpumask:
index 7fc84f5..cc84121 100644 (file)
@@ -4670,9 +4670,10 @@ static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
                                           struct mlx5e_rep_priv *rpriv)
 {
        /* Offloaded flow rule is allowed to duplicate on non-uplink representor
-        * sharing tc block with other slaves of a lag device.
+        * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this
+        * function is called from NIC mode.
         */
-       return netif_is_lag_port(dev) && rpriv->rep->vport != MLX5_VPORT_UPLINK;
+       return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
 }
 
 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
@@ -4686,13 +4687,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
 
        rcu_read_lock();
        flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
-       rcu_read_unlock();
        if (flow) {
                /* Same flow rule offloaded to non-uplink representor sharing tc block,
                 * just return 0.
                 */
                if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
-                       goto out;
+                       goto rcu_unlock;
 
                NL_SET_ERR_MSG_MOD(extack,
                                   "flow cookie already exists, ignoring");
@@ -4700,8 +4700,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
                                 "flow cookie %lx already exists, ignoring\n",
                                 f->cookie);
                err = -EEXIST;
-               goto out;
+               goto rcu_unlock;
        }
+rcu_unlock:
+       rcu_read_unlock();
+       if (flow)
+               goto out;
 
        trace_mlx5e_configure_flower(f);
        err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
index 5dc335e..b68976b 100644 (file)
@@ -217,7 +217,6 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
        }
 
        /* Create ingress allow rule */
-       memset(spec, 0, sizeof(*spec));
        spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
        vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
index 9f829e6..e4186e8 100644 (file)
@@ -293,7 +293,40 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
        return 0;
 }
 
-static int mlx5_eeprom_page(int offset)
+static int mlx5_query_module_id(struct mlx5_core_dev *dev, int module_num,
+                               u8 *module_id)
+{
+       u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
+       u32 out[MLX5_ST_SZ_DW(mcia_reg)];
+       int err, status;
+       u8 *ptr;
+
+       MLX5_SET(mcia_reg, in, i2c_device_address, MLX5_I2C_ADDR_LOW);
+       MLX5_SET(mcia_reg, in, module, module_num);
+       MLX5_SET(mcia_reg, in, device_address, 0);
+       MLX5_SET(mcia_reg, in, page_number, 0);
+       MLX5_SET(mcia_reg, in, size, 1);
+       MLX5_SET(mcia_reg, in, l, 0);
+
+       err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+                                  sizeof(out), MLX5_REG_MCIA, 0, 0);
+       if (err)
+               return err;
+
+       status = MLX5_GET(mcia_reg, out, status);
+       if (status) {
+               mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
+                             status);
+               return -EIO;
+       }
+       ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
+
+       *module_id = ptr[0];
+
+       return 0;
+}
+
+static int mlx5_qsfp_eeprom_page(u16 offset)
 {
        if (offset < MLX5_EEPROM_PAGE_LENGTH)
                /* Addresses between 0-255 - page 00 */
@@ -307,7 +340,7 @@ static int mlx5_eeprom_page(int offset)
                    MLX5_EEPROM_HIGH_PAGE_LENGTH);
 }
 
-static int mlx5_eeprom_high_page_offset(int page_num)
+static int mlx5_qsfp_eeprom_high_page_offset(int page_num)
 {
        if (!page_num) /* Page 0 always start from low page */
                return 0;
@@ -316,35 +349,62 @@ static int mlx5_eeprom_high_page_offset(int page_num)
        return page_num * MLX5_EEPROM_HIGH_PAGE_LENGTH;
 }
 
+static void mlx5_qsfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset)
+{
+       *i2c_addr = MLX5_I2C_ADDR_LOW;
+       *page_num = mlx5_qsfp_eeprom_page(*offset);
+       *offset -=  mlx5_qsfp_eeprom_high_page_offset(*page_num);
+}
+
+static void mlx5_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset)
+{
+       *i2c_addr = MLX5_I2C_ADDR_LOW;
+       *page_num = 0;
+
+       if (*offset < MLX5_EEPROM_PAGE_LENGTH)
+               return;
+
+       *i2c_addr = MLX5_I2C_ADDR_HIGH;
+       *offset -= MLX5_EEPROM_PAGE_LENGTH;
+}
+
 int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
                             u16 offset, u16 size, u8 *data)
 {
-       int module_num, page_num, status, err;
+       int module_num, status, err, page_num = 0;
+       u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
        u32 out[MLX5_ST_SZ_DW(mcia_reg)];
-       u32 in[MLX5_ST_SZ_DW(mcia_reg)];
-       u16 i2c_addr;
-       void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
+       u16 i2c_addr = 0;
+       u8 module_id;
+       void *ptr;
 
        err = mlx5_query_module_num(dev, &module_num);
        if (err)
                return err;
 
-       memset(in, 0, sizeof(in));
-       size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
-
-       /* Get the page number related to the given offset */
-       page_num = mlx5_eeprom_page(offset);
+       err = mlx5_query_module_id(dev, module_num, &module_id);
+       if (err)
+               return err;
 
-       /* Set the right offset according to the page number,
-        * For page_num > 0, relative offset is always >= 128 (high page).
-        */
-       offset -= mlx5_eeprom_high_page_offset(page_num);
+       switch (module_id) {
+       case MLX5_MODULE_ID_SFP:
+               mlx5_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+               break;
+       case MLX5_MODULE_ID_QSFP:
+       case MLX5_MODULE_ID_QSFP_PLUS:
+       case MLX5_MODULE_ID_QSFP28:
+               mlx5_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+               break;
+       default:
+               mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
+               return -EINVAL;
+       }
 
        if (offset + size > MLX5_EEPROM_PAGE_LENGTH)
                /* Cross pages read, read until offset 256 in low page */
                size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
 
-       i2c_addr = MLX5_I2C_ADDR_LOW;
+       size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
 
        MLX5_SET(mcia_reg, in, l, 0);
        MLX5_SET(mcia_reg, in, module, module_num);
@@ -365,6 +425,7 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
                return -EIO;
        }
 
+       ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
        memcpy(data, ptr, size);
 
        return size;
index fd0e97d..c04ec1a 100644 (file)
@@ -1414,23 +1414,12 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
        u16 num_pages;
        int err;
 
-       mutex_init(&mlxsw_pci->cmd.lock);
-       init_waitqueue_head(&mlxsw_pci->cmd.wait);
-
        mlxsw_pci->core = mlxsw_core;
 
        mbox = mlxsw_cmd_mbox_alloc();
        if (!mbox)
                return -ENOMEM;
 
-       err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
-       if (err)
-               goto mbox_put;
-
-       err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
-       if (err)
-               goto err_out_mbox_alloc;
-
        err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
        if (err)
                goto err_sw_reset;
@@ -1537,9 +1526,6 @@ err_query_fw:
        mlxsw_pci_free_irq_vectors(mlxsw_pci);
 err_alloc_irq:
 err_sw_reset:
-       mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
-err_out_mbox_alloc:
-       mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
 mbox_put:
        mlxsw_cmd_mbox_free(mbox);
        return err;
@@ -1553,8 +1539,6 @@ static void mlxsw_pci_fini(void *bus_priv)
        mlxsw_pci_aqs_fini(mlxsw_pci);
        mlxsw_pci_fw_area_fini(mlxsw_pci);
        mlxsw_pci_free_irq_vectors(mlxsw_pci);
-       mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
-       mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
 }
 
 static struct mlxsw_pci_queue *
@@ -1776,6 +1760,37 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
        .features               = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
 };
 
+static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci)
+{
+       int err;
+
+       mutex_init(&mlxsw_pci->cmd.lock);
+       init_waitqueue_head(&mlxsw_pci->cmd.wait);
+
+       err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+       if (err)
+               goto err_in_mbox_alloc;
+
+       err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+       if (err)
+               goto err_out_mbox_alloc;
+
+       return 0;
+
+err_out_mbox_alloc:
+       mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+err_in_mbox_alloc:
+       mutex_destroy(&mlxsw_pci->cmd.lock);
+       return err;
+}
+
+static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci)
+{
+       mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+       mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+       mutex_destroy(&mlxsw_pci->cmd.lock);
+}
+
 static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        const char *driver_name = pdev->driver->name;
@@ -1831,6 +1846,10 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        mlxsw_pci->pdev = pdev;
        pci_set_drvdata(pdev, mlxsw_pci);
 
+       err = mlxsw_pci_cmd_init(mlxsw_pci);
+       if (err)
+               goto err_pci_cmd_init;
+
        mlxsw_pci->bus_info.device_kind = driver_name;
        mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
        mlxsw_pci->bus_info.dev = &pdev->dev;
@@ -1848,6 +1867,8 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        return 0;
 
 err_bus_device_register:
+       mlxsw_pci_cmd_fini(mlxsw_pci);
+err_pci_cmd_init:
        iounmap(mlxsw_pci->hw_addr);
 err_ioremap:
 err_pci_resource_len_check:
@@ -1865,6 +1886,7 @@ static void mlxsw_pci_remove(struct pci_dev *pdev)
        struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
 
        mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
+       mlxsw_pci_cmd_fini(mlxsw_pci);
        iounmap(mlxsw_pci->hw_addr);
        pci_release_regions(mlxsw_pci->pdev);
        pci_disable_device(mlxsw_pci->pdev);
index 55af877..029ea34 100644 (file)
@@ -978,10 +978,10 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
 
                lossy = !(pfc || pause_en);
                thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
-               mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, &thres_cells);
+               thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells);
                delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
                                                        pfc, pause_en);
-               mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, &delay_cells);
+               delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells);
                total_cells = thres_cells + delay_cells;
 
                taken_headroom_cells += total_cells;
index 6e87457..3abe3e7 100644 (file)
@@ -374,17 +374,15 @@ mlxsw_sp_port_vlan_find_by_vid(const struct mlxsw_sp_port *mlxsw_sp_port,
        return NULL;
 }
 
-static inline void
+static inline u32
 mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port,
-                                u16 *p_size)
+                                u32 size_cells)
 {
        /* Ports with eight lanes use two headroom buffers between which the
         * configured headroom size is split. Therefore, multiply the calculated
         * headroom size by two.
         */
-       if (mlxsw_sp_port->mapping.width != 8)
-               return;
-       *p_size *= 2;
+       return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells;
 }
 
 enum mlxsw_sp_flood_type {
index f25a8b0..6f84557 100644 (file)
@@ -312,7 +312,7 @@ static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
 
                if (i == MLXSW_SP_PB_UNUSED)
                        continue;
-               mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, &size);
+               size = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, size);
                mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
        }
        mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
index 770de02..019ed50 100644 (file)
@@ -6262,7 +6262,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
        }
 
        fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
-       if (WARN_ON(!fib_work))
+       if (!fib_work)
                return NOTIFY_BAD;
 
        fib_work->mlxsw_sp = router->mlxsw_sp;
index f843545..92351a7 100644 (file)
@@ -782,7 +782,7 @@ mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
                speed = 0;
 
        buffsize = mlxsw_sp_span_buffsize_get(mlxsw_sp, speed, mtu);
-       mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, (u16 *) &buffsize);
+       buffsize = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, buffsize);
        mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, buffsize);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
 }
index 628fa9b..3731651 100644 (file)
@@ -297,7 +297,7 @@ struct vxge_hw_fifo_config {
  * @greedy_return: If Set it forces the device to return absolutely all RxD
  *             that are consumed and still on board when a timer interrupt
  *             triggers. If Clear, then if the device has already returned
- *             RxD before current timer interrupt trigerred and after the
+ *             RxD before current timer interrupt triggered and after the
  *             previous timer interrupt triggered, then the device is not
  *             forced to returned the rest of the consumed RxD that it has
  *             on board which account for a byte count less than the one
index c393276..bb448c8 100644 (file)
@@ -861,7 +861,7 @@ static void nfp_flower_clean(struct nfp_app *app)
        flush_work(&app_priv->cmsg_work);
 
        flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app,
-                                nfp_flower_setup_indr_block_cb);
+                                nfp_flower_setup_indr_tc_release);
 
        if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
                nfp_flower_qos_cleanup(app);
index 6c3dc3b..7f54a62 100644 (file)
@@ -459,9 +459,10 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
                                 struct tc_cls_matchall_offload *flow);
 void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb);
 int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
-                               enum tc_setup_type type, void *type_data);
-int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, void *type_data,
-                                  void *cb_priv);
+                               enum tc_setup_type type, void *type_data,
+                               void *data,
+                               void (*cleanup)(struct flow_block_cb *block_cb));
+void nfp_flower_setup_indr_tc_release(void *cb_priv);
 
 void
 __nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv);
index 695d24b..d7340dc 100644 (file)
@@ -1619,8 +1619,8 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
        return NULL;
 }
 
-int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
-                                  void *type_data, void *cb_priv)
+static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
+                                         void *type_data, void *cb_priv)
 {
        struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
        struct flow_cls_offload *flower = type_data;
@@ -1637,7 +1637,7 @@ int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
        }
 }
 
-static void nfp_flower_setup_indr_tc_release(void *cb_priv)
+void nfp_flower_setup_indr_tc_release(void *cb_priv)
 {
        struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
 
@@ -1647,7 +1647,8 @@ static void nfp_flower_setup_indr_tc_release(void *cb_priv)
 
 static int
 nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
-                              struct flow_block_offload *f)
+                              struct flow_block_offload *f, void *data,
+                              void (*cleanup)(struct flow_block_cb *block_cb))
 {
        struct nfp_flower_indr_block_cb_priv *cb_priv;
        struct nfp_flower_priv *priv = app->priv;
@@ -1676,9 +1677,10 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
                cb_priv->app = app;
                list_add(&cb_priv->list, &priv->indr_block_cb_priv);
 
-               block_cb = flow_block_cb_alloc(nfp_flower_setup_indr_block_cb,
-                                              cb_priv, cb_priv,
-                                              nfp_flower_setup_indr_tc_release);
+               block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb,
+                                                   cb_priv, cb_priv,
+                                                   nfp_flower_setup_indr_tc_release,
+                                                   f, netdev, data, app, cleanup);
                if (IS_ERR(block_cb)) {
                        list_del(&cb_priv->list);
                        kfree(cb_priv);
@@ -1699,7 +1701,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
                if (!block_cb)
                        return -ENOENT;
 
-               flow_block_cb_remove(block_cb, f);
+               flow_indr_block_cb_remove(block_cb, f);
                list_del(&block_cb->driver_list);
                return 0;
        default:
@@ -1710,7 +1712,9 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
 
 int
 nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
-                           enum tc_setup_type type, void *type_data)
+                           enum tc_setup_type type, void *type_data,
+                           void *data,
+                           void (*cleanup)(struct flow_block_cb *block_cb))
 {
        if (!nfp_fl_is_netdev_to_offload(netdev))
                return -EOPNOTSUPP;
@@ -1718,7 +1722,7 @@ nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
        switch (type) {
        case TC_SETUP_BLOCK:
                return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
-                                                     type_data);
+                                                     type_data, data, cleanup);
        default:
                return -EOPNOTSUPP;
        }
index 32b9d77..55cef5b 100644 (file)
@@ -147,7 +147,7 @@ struct pch_gbe_regs {
 #define PCH_GBE_RH_ALM_FULL_8   0x00001000      /* 8 words */
 #define PCH_GBE_RH_ALM_FULL_16  0x00002000      /* 16 words */
 #define PCH_GBE_RH_ALM_FULL_32  0x00003000      /* 32 words */
-/* RX FIFO Read Triger Threshold */
+/* RX FIFO Read Trigger Threshold */
 #define PCH_GBE_RH_RD_TRG_4     0x00000000      /* 4 words */
 #define PCH_GBE_RH_RD_TRG_8     0x00000200      /* 8 words */
 #define PCH_GBE_RH_RD_TRG_16    0x00000400      /* 16 words */
index f7e3ce3..e03ea9b 100644 (file)
@@ -468,12 +468,18 @@ static void ionic_get_ringparam(struct net_device *netdev,
        ring->rx_pending = lif->nrxq_descs;
 }
 
+static void ionic_set_ringsize(struct ionic_lif *lif, void *arg)
+{
+       struct ethtool_ringparam *ring = arg;
+
+       lif->ntxq_descs = ring->tx_pending;
+       lif->nrxq_descs = ring->rx_pending;
+}
+
 static int ionic_set_ringparam(struct net_device *netdev,
                               struct ethtool_ringparam *ring)
 {
        struct ionic_lif *lif = netdev_priv(netdev);
-       bool running;
-       int err;
 
        if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
                netdev_info(netdev, "Changing jumbo or mini descriptors not supported\n");
@@ -491,22 +497,7 @@ static int ionic_set_ringparam(struct net_device *netdev,
            ring->rx_pending == lif->nrxq_descs)
                return 0;
 
-       err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
-       if (err)
-               return err;
-
-       running = test_bit(IONIC_LIF_F_UP, lif->state);
-       if (running)
-               ionic_stop(netdev);
-
-       lif->ntxq_descs = ring->tx_pending;
-       lif->nrxq_descs = ring->rx_pending;
-
-       if (running)
-               ionic_open(netdev);
-       clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
-
-       return 0;
+       return ionic_reset_queues(lif, ionic_set_ringsize, ring);
 }
 
 static void ionic_get_channels(struct net_device *netdev,
@@ -521,12 +512,17 @@ static void ionic_get_channels(struct net_device *netdev,
        ch->combined_count = lif->nxqs;
 }
 
+static void ionic_set_queuecount(struct ionic_lif *lif, void *arg)
+{
+       struct ethtool_channels *ch = arg;
+
+       lif->nxqs = ch->combined_count;
+}
+
 static int ionic_set_channels(struct net_device *netdev,
                              struct ethtool_channels *ch)
 {
        struct ionic_lif *lif = netdev_priv(netdev);
-       bool running;
-       int err;
 
        if (!ch->combined_count || ch->other_count ||
            ch->rx_count || ch->tx_count)
@@ -535,21 +531,7 @@ static int ionic_set_channels(struct net_device *netdev,
        if (ch->combined_count == lif->nxqs)
                return 0;
 
-       err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
-       if (err)
-               return err;
-
-       running = test_bit(IONIC_LIF_F_UP, lif->state);
-       if (running)
-               ionic_stop(netdev);
-
-       lif->nxqs = ch->combined_count;
-
-       if (running)
-               ionic_open(netdev);
-       clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
-
-       return 0;
+       return ionic_reset_queues(lif, ionic_set_queuecount, ch);
 }
 
 static u32 ionic_get_priv_flags(struct net_device *netdev)
index 9d8c969..f49486b 100644 (file)
@@ -96,7 +96,8 @@ static void ionic_link_status_check(struct ionic_lif *lif)
        u16 link_status;
        bool link_up;
 
-       if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
+       if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state) ||
+           test_bit(IONIC_LIF_F_QUEUE_RESET, lif->state))
                return;
 
        link_status = le16_to_cpu(lif->info->status.link_status);
@@ -1245,6 +1246,7 @@ static int ionic_init_nic_features(struct ionic_lif *lif)
 
        netdev->hw_features |= netdev->hw_enc_features;
        netdev->features |= netdev->hw_features;
+       netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES;
 
        netdev->priv_flags |= IFF_UNICAST_FLT |
                              IFF_LIVE_ADDR_CHANGE;
@@ -1311,7 +1313,7 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
                return err;
 
        netdev->mtu = new_mtu;
-       err = ionic_reset_queues(lif);
+       err = ionic_reset_queues(lif, NULL, NULL);
 
        return err;
 }
@@ -1323,7 +1325,7 @@ static void ionic_tx_timeout_work(struct work_struct *ws)
        netdev_info(lif->netdev, "Tx Timeout recovery\n");
 
        rtnl_lock();
-       ionic_reset_queues(lif);
+       ionic_reset_queues(lif, NULL, NULL);
        rtnl_unlock();
 }
 
@@ -1671,6 +1673,14 @@ int ionic_open(struct net_device *netdev)
        if (err)
                goto err_out;
 
+       err = netif_set_real_num_tx_queues(netdev, lif->nxqs);
+       if (err)
+               goto err_txrx_deinit;
+
+       err = netif_set_real_num_rx_queues(netdev, lif->nxqs);
+       if (err)
+               goto err_txrx_deinit;
+
        /* don't start the queues until we have link */
        if (netif_carrier_ok(netdev)) {
                err = ionic_start_queues(lif);
@@ -1692,15 +1702,15 @@ static void ionic_stop_queues(struct ionic_lif *lif)
        if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
                return;
 
-       ionic_txrx_disable(lif);
        netif_tx_disable(lif->netdev);
+       ionic_txrx_disable(lif);
 }
 
 int ionic_stop(struct net_device *netdev)
 {
        struct ionic_lif *lif = netdev_priv(netdev);
 
-       if (!netif_device_present(netdev))
+       if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
                return 0;
 
        ionic_stop_queues(lif);
@@ -1978,24 +1988,32 @@ static const struct net_device_ops ionic_netdev_ops = {
        .ndo_get_vf_stats       = ionic_get_vf_stats,
 };
 
-int ionic_reset_queues(struct ionic_lif *lif)
+int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
 {
        bool running;
        int err = 0;
 
-       /* Put off the next watchdog timeout */
-       netif_trans_update(lif->netdev);
-
        err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
        if (err)
                return err;
 
        running = netif_running(lif->netdev);
-       if (running)
+       if (running) {
+               netif_device_detach(lif->netdev);
                err = ionic_stop(lif->netdev);
-       if (!err && running)
-               ionic_open(lif->netdev);
+               if (err)
+                       goto reset_out;
+       }
+
+       if (cb)
+               cb(lif, arg);
+
+       if (running) {
+               err = ionic_open(lif->netdev);
+               netif_device_attach(lif->netdev);
+       }
 
+reset_out:
        clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
 
        return err;
index c342803..ed126dd 100644 (file)
@@ -248,6 +248,8 @@ static inline u32 ionic_coal_hw_to_usec(struct ionic *ionic, u32 units)
        return (units * div) / mult;
 }
 
+typedef void (*ionic_reset_cb)(struct ionic_lif *lif, void *arg);
+
 void ionic_link_status_check_request(struct ionic_lif *lif);
 void ionic_get_stats64(struct net_device *netdev,
                       struct rtnl_link_stats64 *ns);
@@ -267,7 +269,7 @@ int ionic_lif_rss_config(struct ionic_lif *lif, u16 types,
 
 int ionic_open(struct net_device *netdev);
 int ionic_stop(struct net_device *netdev);
-int ionic_reset_queues(struct ionic_lif *lif);
+int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg);
 
 static inline void debug_stats_txq_post(struct ionic_qcq *qcq,
                                        struct ionic_txq_desc *desc, bool dbell)
index a49743d..6c2f9ff 100644 (file)
@@ -876,6 +876,8 @@ struct qed_dev {
        struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM];
        u8 engine_for_debug;
        bool disable_ilt_dump;
+       bool                            dbg_bin_dump;
+
        DECLARE_HASHTABLE(connections, 10);
        const struct firmware           *firmware;
 
index 7b76667..08ba9d5 100644 (file)
@@ -271,7 +271,7 @@ static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
                vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
        }
 
-       iids->vf_cids += vf_cids * p_mngr->vf_count;
+       iids->vf_cids = vf_cids;
        iids->tids += vf_tids * p_mngr->vf_count;
 
        DP_VERBOSE(p_hwfn, QED_MSG_ILT,
@@ -465,6 +465,20 @@ static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk)
        return p_blk;
 }
 
+static void qed_cxt_ilt_blk_reset(struct qed_hwfn *p_hwfn)
+{
+       struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
+       u32 cli_idx, blk_idx;
+
+       for (cli_idx = 0; cli_idx < MAX_ILT_CLIENTS; cli_idx++) {
+               for (blk_idx = 0; blk_idx < ILT_CLI_PF_BLOCKS; blk_idx++)
+                       clients[cli_idx].pf_blks[blk_idx].total_size = 0;
+
+               for (blk_idx = 0; blk_idx < ILT_CLI_VF_BLOCKS; blk_idx++)
+                       clients[cli_idx].vf_blks[blk_idx].total_size = 0;
+       }
+}
+
 int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
 {
        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
@@ -484,6 +498,11 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
 
        p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
 
+       /* Reset all ILT blocks at the beginning of ILT computing in order
+        * to prevent memory allocation for irrelevant blocks afterwards.
+        */
+       qed_cxt_ilt_blk_reset(p_hwfn);
+
        DP_VERBOSE(p_hwfn, QED_MSG_ILT,
                   "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
                   p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
index 57a0dab..3b9bbaf 100644 (file)
@@ -5568,7 +5568,8 @@ static const char * const s_status_str[] = {
 
        /* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */
        "The filter/trigger constraint dword offsets are not enabled for recording",
-
+       /* DBG_STATUS_NO_MATCHING_FRAMING_MODE */
+       "No matching framing mode",
 
        /* DBG_STATUS_VFC_READ_ERROR */
        "Error reading from VFC",
@@ -7505,6 +7506,12 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
        if (p_hwfn->cdev->print_dbg_data)
                qed_dbg_print_feature(text_buf, text_size_bytes);
 
+       /* Just return the original binary buffer if requested */
+       if (p_hwfn->cdev->dbg_bin_dump) {
+               vfree(text_buf);
+               return DBG_STATUS_OK;
+       }
+
        /* Free the old dump_buf and point the dump_buf to the newly allocagted
         * and formatted text buffer.
         */
@@ -7732,7 +7739,9 @@ int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
 #define REGDUMP_HEADER_SIZE_SHIFT              0
 #define REGDUMP_HEADER_SIZE_MASK               0xffffff
 #define REGDUMP_HEADER_FEATURE_SHIFT           24
-#define REGDUMP_HEADER_FEATURE_MASK            0x3f
+#define REGDUMP_HEADER_FEATURE_MASK            0x1f
+#define REGDUMP_HEADER_BIN_DUMP_SHIFT          29
+#define REGDUMP_HEADER_BIN_DUMP_MASK           0x1
 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT       30
 #define REGDUMP_HEADER_OMIT_ENGINE_MASK                0x1
 #define REGDUMP_HEADER_ENGINE_SHIFT            31
@@ -7770,6 +7779,7 @@ static u32 qed_calc_regdump_header(struct qed_dev *cdev,
                          feature, feature_size);
 
        SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature);
+       SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, 1);
        SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine);
        SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine);
 
@@ -7793,6 +7803,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
                omit_engine = 1;
 
        mutex_lock(&qed_dbg_lock);
+       cdev->dbg_bin_dump = true;
 
        org_engine = qed_get_debug_engine(cdev);
        for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
@@ -7930,6 +7941,10 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
                DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
        }
 
+       /* Re-populate nvm attribute info */
+       qed_mcp_nvm_info_free(p_hwfn);
+       qed_mcp_nvm_info_populate(p_hwfn);
+
        /* nvm cfg1 */
        rc = qed_dbg_nvm_image(cdev,
                               (u8 *)buffer + offset +
@@ -7992,6 +8007,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
                       QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc);
        }
 
+       cdev->dbg_bin_dump = false;
        mutex_unlock(&qed_dbg_lock);
 
        return 0;
index 1eebf30..9c26fde 100644 (file)
@@ -980,7 +980,7 @@ int qed_llh_add_mac_filter(struct qed_dev *cdev,
        struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
        struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
        union qed_llh_filter filter = {};
-       u8 filter_idx, abs_ppfid;
+       u8 filter_idx, abs_ppfid = 0;
        u32 high, low, ref_cnt;
        int rc = 0;
 
@@ -1368,6 +1368,8 @@ static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn)
 
 void qed_resc_free(struct qed_dev *cdev)
 {
+       struct qed_rdma_info *rdma_info;
+       struct qed_hwfn *p_hwfn;
        int i;
 
        if (IS_VF(cdev)) {
@@ -1385,7 +1387,8 @@ void qed_resc_free(struct qed_dev *cdev)
        qed_llh_free(cdev);
 
        for_each_hwfn(cdev, i) {
-               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+               p_hwfn = cdev->hwfns + i;
+               rdma_info = p_hwfn->p_rdma_info;
 
                qed_cxt_mngr_free(p_hwfn);
                qed_qm_info_free(p_hwfn);
@@ -1404,8 +1407,10 @@ void qed_resc_free(struct qed_dev *cdev)
                        qed_ooo_free(p_hwfn);
                }
 
-               if (QED_IS_RDMA_PERSONALITY(p_hwfn))
+               if (QED_IS_RDMA_PERSONALITY(p_hwfn) && rdma_info) {
+                       qed_spq_unregister_async_cb(p_hwfn, rdma_info->proto);
                        qed_rdma_info_free(p_hwfn);
+               }
 
                qed_iov_free(p_hwfn);
                qed_l2_free(p_hwfn);
@@ -4467,12 +4472,6 @@ static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        return 0;
 }
 
-static void qed_nvm_info_free(struct qed_hwfn *p_hwfn)
-{
-       kfree(p_hwfn->nvm_info.image_att);
-       p_hwfn->nvm_info.image_att = NULL;
-}
-
 static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
                                 void __iomem *p_regview,
                                 void __iomem *p_doorbells,
@@ -4557,7 +4556,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
        return rc;
 err3:
        if (IS_LEAD_HWFN(p_hwfn))
-               qed_nvm_info_free(p_hwfn);
+               qed_mcp_nvm_info_free(p_hwfn);
 err2:
        if (IS_LEAD_HWFN(p_hwfn))
                qed_iov_free_hw_info(p_hwfn->cdev);
@@ -4618,7 +4617,7 @@ int qed_hw_prepare(struct qed_dev *cdev,
                if (rc) {
                        if (IS_PF(cdev)) {
                                qed_init_free(p_hwfn);
-                               qed_nvm_info_free(p_hwfn);
+                               qed_mcp_nvm_info_free(p_hwfn);
                                qed_mcp_free(p_hwfn);
                                qed_hw_hwfn_free(p_hwfn);
                        }
@@ -4652,7 +4651,7 @@ void qed_hw_remove(struct qed_dev *cdev)
 
        qed_iov_free_hw_info(cdev);
 
-       qed_nvm_info_free(p_hwfn);
+       qed_mcp_nvm_info_free(p_hwfn);
 }
 
 static void qed_chain_free_next_ptr(struct qed_dev *cdev,
index d2fe61a..5409a2d 100644 (file)
@@ -2836,8 +2836,6 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn)
        if (rc)
                return rc;
 
-       qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
-
        return qed_iwarp_ll2_stop(p_hwfn);
 }
 
index 9624616..0fd4520 100644 (file)
@@ -3280,6 +3280,13 @@ err0:
        return rc;
 }
 
+void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn)
+{
+       kfree(p_hwfn->nvm_info.image_att);
+       p_hwfn->nvm_info.image_att = NULL;
+       p_hwfn->nvm_info.valid = false;
+}
+
 int
 qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
                          enum qed_nvm_images image_id,
index 5750b4c..12a705e 100644 (file)
@@ -1221,6 +1221,13 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
 int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn);
 
 /**
+ * @brief Delete nvm info shadow in the given hardware function
+ *
+ * @param p_hwfn
+ */
+void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn);
+
+/**
  * @brief Get the engine affinity configuration.
  *
  * @param p_hwfn
index 4566815..7271dd7 100644 (file)
@@ -113,7 +113,6 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn)
                        break;
                }
        }
-       qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE);
 }
 
 static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
index 856051f..adc2c8f 100644 (file)
@@ -81,12 +81,17 @@ static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status)
        mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
 }
 
+#define QED_VF_CHANNEL_USLEEP_ITERATIONS       90
+#define QED_VF_CHANNEL_USLEEP_DELAY            100
+#define QED_VF_CHANNEL_MSLEEP_ITERATIONS       10
+#define QED_VF_CHANNEL_MSLEEP_DELAY            25
+
 static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
 {
        union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
        struct ustorm_trigger_vf_zone trigger;
        struct ustorm_vf_zone *zone_data;
-       int rc = 0, time = 100;
+       int iter, rc = 0;
 
        zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
 
@@ -126,11 +131,19 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
        REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
 
        /* When PF would be done with the response, it would write back to the
-        * `done' address. Poll until then.
+        * `done' address from a coherent DMA zone. Poll until then.
         */
-       while ((!*done) && time) {
-               msleep(25);
-               time--;
+
+       iter = QED_VF_CHANNEL_USLEEP_ITERATIONS;
+       while (!*done && iter--) {
+               udelay(QED_VF_CHANNEL_USLEEP_DELAY);
+               dma_rmb();
+       }
+
+       iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS;
+       while (!*done && iter--) {
+               msleep(QED_VF_CHANNEL_MSLEEP_DELAY);
+               dma_rmb();
        }
 
        if (!*done) {
index 756c05e..29e2854 100644 (file)
@@ -1229,7 +1229,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
 
        /* PTP not supported on VFs */
        if (!is_vf)
-               qede_ptp_enable(edev, (mode == QEDE_PROBE_NORMAL));
+               qede_ptp_enable(edev);
 
        edev->ops->register_ops(cdev, &qede_ll_ops, edev);
 
@@ -1318,6 +1318,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
        if (system_state == SYSTEM_POWER_OFF)
                return;
        qed_ops->common->remove(cdev);
+       edev->cdev = NULL;
 
        /* Since this can happen out-of-sync with other flows,
         * don't release the netdevice until after slowpath stop
index 4c7f7a7..cd5841a 100644 (file)
@@ -412,6 +412,7 @@ void qede_ptp_disable(struct qede_dev *edev)
        if (ptp->tx_skb) {
                dev_kfree_skb_any(ptp->tx_skb);
                ptp->tx_skb = NULL;
+               clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
        }
 
        /* Disable PTP in HW */
@@ -423,7 +424,7 @@ void qede_ptp_disable(struct qede_dev *edev)
        edev->ptp = NULL;
 }
 
-static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
+static int qede_ptp_init(struct qede_dev *edev)
 {
        struct qede_ptp *ptp;
        int rc;
@@ -444,25 +445,19 @@ static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
        /* Init work queue for Tx timestamping */
        INIT_WORK(&ptp->work, qede_ptp_task);
 
-       /* Init cyclecounter and timecounter. This is done only in the first
-        * load. If done in every load, PTP application will fail when doing
-        * unload / load (e.g. MTU change) while it is running.
-        */
-       if (init_tc) {
-               memset(&ptp->cc, 0, sizeof(ptp->cc));
-               ptp->cc.read = qede_ptp_read_cc;
-               ptp->cc.mask = CYCLECOUNTER_MASK(64);
-               ptp->cc.shift = 0;
-               ptp->cc.mult = 1;
-
-               timecounter_init(&ptp->tc, &ptp->cc,
-                                ktime_to_ns(ktime_get_real()));
-       }
+       /* Init cyclecounter and timecounter */
+       memset(&ptp->cc, 0, sizeof(ptp->cc));
+       ptp->cc.read = qede_ptp_read_cc;
+       ptp->cc.mask = CYCLECOUNTER_MASK(64);
+       ptp->cc.shift = 0;
+       ptp->cc.mult = 1;
 
-       return rc;
+       timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+
+       return 0;
 }
 
-int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
+int qede_ptp_enable(struct qede_dev *edev)
 {
        struct qede_ptp *ptp;
        int rc;
@@ -483,7 +478,7 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
 
        edev->ptp = ptp;
 
-       rc = qede_ptp_init(edev, init_tc);
+       rc = qede_ptp_init(edev);
        if (rc)
                goto err1;
 
index 691a14c..89c7f3c 100644 (file)
@@ -41,7 +41,7 @@ void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb);
 void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
 int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
 void qede_ptp_disable(struct qede_dev *edev);
-int qede_ptp_enable(struct qede_dev *edev, bool init_tc);
+int qede_ptp_enable(struct qede_dev *edev);
 int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts);
 
 static inline void qede_ptp_record_rx_ts(struct qede_dev *edev,
index 2d873ae..668ccc9 100644 (file)
@@ -105,6 +105,7 @@ static void qede_rdma_destroy_wq(struct qede_dev *edev)
 
        qede_rdma_cleanup_event(edev);
        destroy_workqueue(edev->rdma_info.rdma_wq);
+       edev->rdma_info.rdma_wq = NULL;
 }
 
 int qede_rdma_dev_add(struct qede_dev *edev, bool recovery)
@@ -325,7 +326,7 @@ static void qede_rdma_add_event(struct qede_dev *edev,
        if (edev->rdma_info.exp_recovery)
                return;
 
-       if (!edev->rdma_info.qedr_dev)
+       if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq)
                return;
 
        /* We don't want the cleanup flow to start while we're allocating and
index 40efe60..fcdecdd 100644 (file)
@@ -47,15 +47,23 @@ static int rmnet_unregister_real_device(struct net_device *real_dev)
        return 0;
 }
 
-static int rmnet_register_real_device(struct net_device *real_dev)
+static int rmnet_register_real_device(struct net_device *real_dev,
+                                     struct netlink_ext_ack *extack)
 {
        struct rmnet_port *port;
        int rc, entry;
 
        ASSERT_RTNL();
 
-       if (rmnet_is_real_dev_registered(real_dev))
+       if (rmnet_is_real_dev_registered(real_dev)) {
+               port = rmnet_get_port_rtnl(real_dev);
+               if (port->rmnet_mode != RMNET_EPMODE_VND) {
+                       NL_SET_ERR_MSG_MOD(extack, "bridge device already exists");
+                       return -EINVAL;
+               }
+
                return 0;
+       }
 
        port = kzalloc(sizeof(*port), GFP_KERNEL);
        if (!port)
@@ -133,7 +141,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
 
        mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
 
-       err = rmnet_register_real_device(real_dev);
+       err = rmnet_register_real_device(real_dev, extack);
        if (err)
                goto err0;
 
@@ -422,7 +430,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
        }
 
        if (port->rmnet_mode != RMNET_EPMODE_VND) {
-               NL_SET_ERR_MSG_MOD(extack, "bridge device already exists");
+               NL_SET_ERR_MSG_MOD(extack, "more than one bridge dev attached");
                return -EINVAL;
        }
 
@@ -433,7 +441,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
                return -EBUSY;
        }
 
-       err = rmnet_register_real_device(slave_dev);
+       err = rmnet_register_real_device(slave_dev, extack);
        if (err)
                return -EBUSY;
 
index dad84ec..b660ddb 100644 (file)
@@ -2114,8 +2114,11 @@ static void rtl_release_firmware(struct rtl8169_private *tp)
 void r8169_apply_firmware(struct rtl8169_private *tp)
 {
        /* TODO: release firmware if rtl_fw_write_firmware signals failure. */
-       if (tp->rtl_fw)
+       if (tp->rtl_fw) {
                rtl_fw_write_firmware(tp, tp->rtl_fw);
+               /* At least one firmware doesn't reset tp->ocp_base. */
+               tp->ocp_base = OCP_STD_PHY_BASE;
+       }
 }
 
 static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
index 328bc38..0f366cc 100644 (file)
@@ -1044,8 +1044,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
 
 next:
-               if ((skb && napi_gro_receive(&priv->napi, skb) != GRO_DROP) ||
-                   xdp_result) {
+               if (skb)
+                       napi_gro_receive(&priv->napi, skb);
+               if (skb || xdp_result) {
                        ndev->stats.rx_packets++;
                        ndev->stats.rx_bytes += xdp.data_end - xdp.data;
                }
index 7526658..4661ef8 100644 (file)
@@ -1649,6 +1649,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[],
        geneve->collect_md = metadata;
        geneve->use_udp6_rx_checksums = use_udp6_rx_checksums;
        geneve->ttl_inherit = ttl_inherit;
+       geneve->df = df;
        geneve_unquiesce(geneve, gs4, gs6);
 
        return 0;
index 55226b2..ac7e5a0 100644 (file)
@@ -500,6 +500,13 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
        int ret;
 
        state = gsi_channel_state(channel);
+
+       /* Channel could have entered STOPPED state since last call
+        * if it timed out.  If so, we're done.
+        */
+       if (state == GSI_CHANNEL_STATE_STOPPED)
+               return 0;
+
        if (state != GSI_CHANNEL_STATE_STARTED &&
            state != GSI_CHANNEL_STATE_STOP_IN_PROC)
                return -EINVAL;
@@ -789,20 +796,11 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id)
 int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
 {
        struct gsi_channel *channel = &gsi->channel[channel_id];
-       enum gsi_channel_state state;
        u32 retries;
        int ret;
 
        gsi_channel_freeze(channel);
 
-       /* Channel could have entered STOPPED state since last call if the
-        * STOP command timed out.  We won't stop a channel if stopping it
-        * was successful previously (so we still want the freeze above).
-        */
-       state = gsi_channel_state(channel);
-       if (state == GSI_CHANNEL_STATE_STOPPED)
-               return 0;
-
        /* RX channels might require a little time to enter STOPPED state */
        retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES;
 
index c9ab865..d92dd3f 100644 (file)
@@ -586,6 +586,21 @@ u32 ipa_cmd_tag_process_count(void)
        return 4;
 }
 
+void ipa_cmd_tag_process(struct ipa *ipa)
+{
+       u32 count = ipa_cmd_tag_process_count();
+       struct gsi_trans *trans;
+
+       trans = ipa_cmd_trans_alloc(ipa, count);
+       if (trans) {
+               ipa_cmd_tag_process_add(trans);
+               gsi_trans_commit_wait(trans);
+       } else {
+               dev_err(&ipa->pdev->dev,
+                       "error allocating %u entry tag transaction\n", count);
+       }
+}
+
 static struct ipa_cmd_info *
 ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count)
 {
index e440aa6..1a646e0 100644 (file)
@@ -172,6 +172,14 @@ void ipa_cmd_tag_process_add(struct gsi_trans *trans);
 u32 ipa_cmd_tag_process_count(void);
 
 /**
+ * ipa_cmd_tag_process() - Perform a tag process
+ *
+ * @Return:    The number of elements to allocate in a transaction
+ *             to hold tag process commands
+ */
+void ipa_cmd_tag_process(struct ipa *ipa);
+
+/**
  * ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint
  * @ipa:       IPA pointer
  * @tre_count: Number of elements in the transaction
index 52d4b84..de2768d 100644 (file)
@@ -44,7 +44,6 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
                .endpoint = {
                        .seq_type       = IPA_SEQ_INVALID,
                        .config = {
-                               .checksum       = true,
                                .aggregation    = true,
                                .status_enable  = true,
                                .rx = {
index 9f50d0d..9e58e49 100644 (file)
@@ -1450,6 +1450,8 @@ void ipa_endpoint_suspend(struct ipa *ipa)
        if (ipa->modem_netdev)
                ipa_modem_suspend(ipa->modem_netdev);
 
+       ipa_cmd_tag_process(ipa);
+
        ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
        ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
 }
index dc4a5c2..d323adb 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <linux/types.h>
 
+#include "ipa_gsi.h"
 #include "gsi_trans.h"
 #include "ipa.h"
 #include "ipa_endpoint.h"
index 3cf1860..0a40f3d 100644 (file)
@@ -8,7 +8,9 @@
 
 #include <linux/types.h>
 
+struct gsi;
 struct gsi_trans;
+struct ipa_gsi_endpoint_data;
 
 /**
  * ipa_gsi_trans_complete() - GSI transaction completion callback
index 03a1d0e..7341337 100644 (file)
@@ -119,7 +119,7 @@ struct qmi_elem_info ipa_driver_init_complete_rsp_ei[] = {
                        sizeof_field(struct ipa_driver_init_complete_rsp,
                                     rsp),
                .tlv_type       = 0x02,
-               .elem_size      = offsetof(struct ipa_driver_init_complete_rsp,
+               .offset         = offsetof(struct ipa_driver_init_complete_rsp,
                                           rsp),
                .ei_array       = qmi_response_type_v01_ei,
        },
@@ -137,7 +137,7 @@ struct qmi_elem_info ipa_init_complete_ind_ei[] = {
                        sizeof_field(struct ipa_init_complete_ind,
                                     status),
                .tlv_type       = 0x02,
-               .elem_size      = offsetof(struct ipa_init_complete_ind,
+               .offset         = offsetof(struct ipa_init_complete_ind,
                                           status),
                .ei_array       = qmi_response_type_v01_ei,
        },
@@ -218,7 +218,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
                        sizeof_field(struct ipa_init_modem_driver_req,
                                     platform_type_valid),
                .tlv_type       = 0x10,
-               .elem_size      = offsetof(struct ipa_init_modem_driver_req,
+               .offset         = offsetof(struct ipa_init_modem_driver_req,
                                           platform_type_valid),
        },
        {
index e56547b..9159846 100644 (file)
@@ -4052,9 +4052,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
                return err;
 
        netdev_lockdep_set_classes(dev);
-       lockdep_set_class_and_subclass(&dev->addr_list_lock,
-                                      &macsec_netdev_addr_lock_key,
-                                      dev->lower_level);
+       lockdep_set_class(&dev->addr_list_lock,
+                         &macsec_netdev_addr_lock_key);
 
        err = netdev_upper_dev_link(real_dev, dev, extack);
        if (err < 0)
index 6a6cc9f..4942f61 100644 (file)
@@ -880,9 +880,8 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
 static void macvlan_set_lockdep_class(struct net_device *dev)
 {
        netdev_lockdep_set_classes(dev);
-       lockdep_set_class_and_subclass(&dev->addr_list_lock,
-                                      &macvlan_netdev_addr_lock_key,
-                                      dev->lower_level);
+       lockdep_set_class(&dev->addr_list_lock,
+                         &macvlan_netdev_addr_lock_key);
 }
 
 static int macvlan_init(struct net_device *dev)
index f257023..e351d65 100644 (file)
@@ -480,8 +480,7 @@ config MICROCHIP_T1_PHY
 config MICROSEMI_PHY
        tristate "Microsemi PHYs"
        depends on MACSEC || MACSEC=n
-       select CRYPTO_AES
-       select CRYPTO_ECB
+       select CRYPTO_LIB_AES if MACSEC
        help
          Currently supports VSC8514, VSC8530, VSC8531, VSC8540 and VSC8541 PHYs
 
index b4d3dc4..d53ca88 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/phy.h>
 #include <dt-bindings/net/mscc-phy-vsc8531.h>
 
-#include <crypto/skcipher.h>
+#include <crypto/aes.h>
 
 #include <net/macsec.h>
 
@@ -500,39 +500,17 @@ static u32 vsc8584_macsec_flow_context_id(struct macsec_flow *flow)
 static int vsc8584_macsec_derive_key(const u8 key[MACSEC_KEYID_LEN],
                                     u16 key_len, u8 hkey[16])
 {
-       struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
-       struct skcipher_request *req = NULL;
-       struct scatterlist src, dst;
-       DECLARE_CRYPTO_WAIT(wait);
-       u32 input[4] = {0};
+       const u8 input[AES_BLOCK_SIZE] = {0};
+       struct crypto_aes_ctx ctx;
        int ret;
 
-       if (IS_ERR(tfm))
-               return PTR_ERR(tfm);
-
-       req = skcipher_request_alloc(tfm, GFP_KERNEL);
-       if (!req) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
-                                     CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done,
-                                     &wait);
-       ret = crypto_skcipher_setkey(tfm, key, key_len);
-       if (ret < 0)
-               goto out;
-
-       sg_init_one(&src, input, 16);
-       sg_init_one(&dst, hkey, 16);
-       skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
-
-       ret = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
+       ret = aes_expandkey(&ctx, key, key_len);
+       if (ret)
+               return ret;
 
-out:
-       skcipher_request_free(req);
-       crypto_free_skcipher(tfm);
-       return ret;
+       aes_encrypt(&ctx, hkey, input);
+       memzero_explicit(&ctx, sizeof(ctx));
+       return 0;
 }
 
 static int vsc8584_macsec_transformation(struct phy_device *phydev,
index 1de3938..56cfae9 100644 (file)
@@ -840,7 +840,7 @@ static void phy_error(struct phy_device *phydev)
  * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
  * @phydev: target phy_device struct
  */
-static int phy_disable_interrupts(struct phy_device *phydev)
+int phy_disable_interrupts(struct phy_device *phydev)
 {
        int err;
 
index 04946de..b4978c5 100644 (file)
@@ -794,8 +794,10 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
 
        /* Grab the bits from PHYIR2, and put them in the lower half */
        phy_reg = mdiobus_read(bus, addr, MII_PHYSID2);
-       if (phy_reg < 0)
-               return -EIO;
+       if (phy_reg < 0) {
+               /* returning -ENODEV doesn't stop bus scanning */
+               return (phy_reg == -EIO || phy_reg == -ENODEV) ? -ENODEV : -EIO;
+       }
 
        *phy_id |= phy_reg;
 
@@ -1090,6 +1092,10 @@ int phy_init_hw(struct phy_device *phydev)
        if (ret < 0)
                return ret;
 
+       ret = phy_disable_interrupts(phydev);
+       if (ret)
+               return ret;
+
        if (phydev->drv->config_init)
                ret = phydev->drv->config_init(phydev);
 
index 0ab65fb..3b7c70e 100644 (file)
@@ -1463,6 +1463,8 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
                                   struct ethtool_pauseparam *pause)
 {
        struct phylink_link_state *config = &pl->link_config;
+       bool manual_changed;
+       int pause_state;
 
        ASSERT_RTNL();
 
@@ -1477,15 +1479,15 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
            !pause->autoneg && pause->rx_pause != pause->tx_pause)
                return -EINVAL;
 
-       mutex_lock(&pl->state_mutex);
-       config->pause = 0;
+       pause_state = 0;
        if (pause->autoneg)
-               config->pause |= MLO_PAUSE_AN;
+               pause_state |= MLO_PAUSE_AN;
        if (pause->rx_pause)
-               config->pause |= MLO_PAUSE_RX;
+               pause_state |= MLO_PAUSE_RX;
        if (pause->tx_pause)
-               config->pause |= MLO_PAUSE_TX;
+               pause_state |= MLO_PAUSE_TX;
 
+       mutex_lock(&pl->state_mutex);
        /*
         * See the comments for linkmode_set_pause(), wrt the deficiencies
         * with the current implementation.  A solution to this issue would
@@ -1502,18 +1504,35 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
        linkmode_set_pause(config->advertising, pause->tx_pause,
                           pause->rx_pause);
 
-       /* If we have a PHY, phylib will call our link state function if the
-        * mode has changed, which will trigger a resolve and update the MAC
-        * configuration.
+       manual_changed = (config->pause ^ pause_state) & MLO_PAUSE_AN ||
+                        (!(pause_state & MLO_PAUSE_AN) &&
+                          (config->pause ^ pause_state) & MLO_PAUSE_TXRX_MASK);
+
+       config->pause = pause_state;
+
+       if (!pl->phydev && !test_bit(PHYLINK_DISABLE_STOPPED,
+                                    &pl->phylink_disable_state))
+               phylink_pcs_config(pl, true, &pl->link_config);
+
+       mutex_unlock(&pl->state_mutex);
+
+       /* If we have a PHY, a change of the pause frame advertisement will
+        * cause phylib to renegotiate (if AN is enabled) which will in turn
+        * call our phylink_phy_change() and trigger a resolve.  Note that
+        * we can't hold our state mutex while calling phy_set_asym_pause().
         */
-       if (pl->phydev) {
+       if (pl->phydev)
                phy_set_asym_pause(pl->phydev, pause->rx_pause,
                                   pause->tx_pause);
-       } else if (!test_bit(PHYLINK_DISABLE_STOPPED,
-                            &pl->phylink_disable_state)) {
-               phylink_pcs_config(pl, true, &pl->link_config);
+
+       /* If the manual pause settings changed, make sure we trigger a
+        * resolve to update their state; we can not guarantee that the
+        * link will cycle.
+        */
+       if (manual_changed) {
+               pl->mac_link_dropped = true;
+               phylink_run_resolve(pl);
        }
-       mutex_unlock(&pl->state_mutex);
 
        return 0;
 }
index 93da7d3..74568ae 100644 (file)
@@ -122,10 +122,13 @@ static int lan87xx_read_status(struct phy_device *phydev)
                if (rc < 0)
                        return rc;
 
-               /* Wait max 640 ms to detect energy */
-               phy_read_poll_timeout(phydev, MII_LAN83C185_CTRL_STATUS, rc,
-                                     rc & MII_LAN83C185_ENERGYON, 10000,
-                                     640000, true);
+               /* Wait max 640 ms to detect energy and the timeout is not
+                * an actual error.
+                */
+               read_poll_timeout(phy_read, rc,
+                                 rc & MII_LAN83C185_ENERGYON || rc < 0,
+                                 10000, 640000, true, phydev,
+                                 MII_LAN83C185_CTRL_STATUS);
                if (rc < 0)
                        return rc;
 
index 858b012..7adeb91 100644 (file)
@@ -62,6 +62,7 @@
 #include <net/rtnetlink.h>
 #include <net/sock.h>
 #include <net/xdp.h>
+#include <net/ip_tunnels.h>
 #include <linux/seq_file.h>
 #include <linux/uio.h>
 #include <linux/skb_array.h>
@@ -1351,6 +1352,7 @@ static void tun_net_init(struct net_device *dev)
        switch (tun->flags & TUN_TYPE_MASK) {
        case IFF_TUN:
                dev->netdev_ops = &tun_netdev_ops;
+               dev->header_ops = &ip_tunnel_header_ops;
 
                /* Point-to-Point TUN Device */
                dev->hard_header_len = 0;
index 9507114..a38e868 100644 (file)
@@ -1491,10 +1491,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                }
 
                if (pkt_cnt == 0) {
-                       /* Skip IP alignment psudo header */
-                       skb_pull(skb, 2);
                        skb->len = pkt_len;
-                       skb_set_tail_pointer(skb, pkt_len);
+                       /* Skip IP alignment pseudo header */
+                       skb_pull(skb, 2);
+                       skb_set_tail_pointer(skb, skb->len);
                        skb->truesize = pkt_len + sizeof(struct sk_buff);
                        ax88179_rx_checksum(skb, pkt_hdr);
                        return 1;
@@ -1503,8 +1503,9 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                ax_skb = skb_clone(skb, GFP_ATOMIC);
                if (ax_skb) {
                        ax_skb->len = pkt_len;
-                       ax_skb->data = skb->data + 2;
-                       skb_set_tail_pointer(ax_skb, pkt_len);
+                       /* Skip IP alignment pseudo header */
+                       skb_pull(ax_skb, 2);
+                       skb_set_tail_pointer(ax_skb, ax_skb->len);
                        ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
                        ax88179_rx_checksum(ax_skb, pkt_hdr);
                        usbnet_skb_return(dev, ax_skb);
index 31b1d4b..07c42c0 100644 (file)
@@ -1370,6 +1370,7 @@ static const struct usb_device_id products[] = {
        {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
+       {QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */
        {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},    /* Quectel BG96 */
        {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
        {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)},    /* Foxconn T77W968 LTE */
index 355be77..bb4ccbd 100644 (file)
@@ -1287,11 +1287,14 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
 
        /* Init all registers */
        ret = smsc95xx_reset(dev);
+       if (ret)
+               goto free_pdata;
 
        /* detect device revision as different features may be available */
        ret = smsc95xx_read_reg(dev, ID_REV, &val);
        if (ret < 0)
-               return ret;
+               goto free_pdata;
+
        val >>= 16;
        pdata->chip_id = val;
        pdata->mdix_ctrl = get_mdix_status(dev->net);
@@ -1317,6 +1320,10 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
        schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
 
        return 0;
+
+free_pdata:
+       kfree(pdata);
+       return ret;
 }
 
 static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
@@ -1324,7 +1331,7 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
        struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
 
        if (pdata) {
-               cancel_delayed_work(&pdata->carrier_check);
+               cancel_delayed_work_sync(&pdata->carrier_check);
                netif_dbg(dev, ifdown, dev->net, "free pdata\n");
                kfree(pdata);
                pdata = NULL;
index e8085ab..89d85dc 100644 (file)
@@ -1380,6 +1380,8 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
                        struct vxlan_rdst *rd;
 
                        if (rcu_access_pointer(f->nh)) {
+                               if (*idx < cb->args[2])
+                                       goto skip_nh;
                                err = vxlan_fdb_info(skb, vxlan, f,
                                                     NETLINK_CB(cb->skb).portid,
                                                     cb->nlh->nlmsg_seq,
@@ -1387,6 +1389,8 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
                                                     NLM_F_MULTI, NULL);
                                if (err < 0)
                                        goto out;
+skip_nh:
+                               *idx += 1;
                                continue;
                        }
 
index e30d91a..2848323 100644 (file)
@@ -303,7 +303,6 @@ static void lapbeth_setup(struct net_device *dev)
        dev->netdev_ops      = &lapbeth_netdev_ops;
        dev->needs_free_netdev = true;
        dev->type            = ARPHRD_X25;
-       dev->hard_header_len = 3;
        dev->mtu             = 1000;
        dev->addr_len        = 0;
 }
@@ -324,6 +323,14 @@ static int lapbeth_new_device(struct net_device *dev)
        if (!ndev)
                goto out;
 
+       /* When transmitting data:
+        * first this driver removes a pseudo header of 1 byte,
+        * then the lapb module prepends an LAPB header of at most 3 bytes,
+        * then this driver prepends a length field of 2 bytes,
+        * then the underlying Ethernet device prepends its own header.
+        */
+       ndev->hard_header_len = -1 + 3 + 2 + dev->hard_header_len;
+
        lapbeth = netdev_priv(ndev);
        lapbeth->axdev = ndev;
 
index 3ac3f85..c9f65e9 100644 (file)
@@ -45,17 +45,18 @@ static int wg_open(struct net_device *dev)
        if (dev_v6)
                dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE;
 
+       mutex_lock(&wg->device_update_lock);
        ret = wg_socket_init(wg, wg->incoming_port);
        if (ret < 0)
-               return ret;
-       mutex_lock(&wg->device_update_lock);
+               goto out;
        list_for_each_entry(peer, &wg->peer_list, peer_list) {
                wg_packet_send_staged_packets(peer);
                if (peer->persistent_keepalive_interval)
                        wg_packet_send_keepalive(peer);
        }
+out:
        mutex_unlock(&wg->device_update_lock);
-       return 0;
+       return ret;
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -225,6 +226,7 @@ static void wg_destruct(struct net_device *dev)
        list_del(&wg->device_list);
        rtnl_unlock();
        mutex_lock(&wg->device_update_lock);
+       rcu_assign_pointer(wg->creating_net, NULL);
        wg->incoming_port = 0;
        wg_socket_reinit(wg, NULL, NULL);
        /* The final references are cleared in the below calls to destroy_workqueue. */
@@ -240,13 +242,11 @@ static void wg_destruct(struct net_device *dev)
        skb_queue_purge(&wg->incoming_handshakes);
        free_percpu(dev->tstats);
        free_percpu(wg->incoming_handshakes_worker);
-       if (wg->have_creating_net_ref)
-               put_net(wg->creating_net);
        kvfree(wg->index_hashtable);
        kvfree(wg->peer_hashtable);
        mutex_unlock(&wg->device_update_lock);
 
-       pr_debug("%s: Interface deleted\n", dev->name);
+       pr_debug("%s: Interface destroyed\n", dev->name);
        free_netdev(dev);
 }
 
@@ -262,6 +262,7 @@ static void wg_setup(struct net_device *dev)
                             max(sizeof(struct ipv6hdr), sizeof(struct iphdr));
 
        dev->netdev_ops = &netdev_ops;
+       dev->header_ops = &ip_tunnel_header_ops;
        dev->hard_header_len = 0;
        dev->addr_len = 0;
        dev->needed_headroom = DATA_PACKET_HEAD_ROOM;
@@ -292,7 +293,7 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
        struct wg_device *wg = netdev_priv(dev);
        int ret = -ENOMEM;
 
-       wg->creating_net = src_net;
+       rcu_assign_pointer(wg->creating_net, src_net);
        init_rwsem(&wg->static_identity.lock);
        mutex_init(&wg->socket_update_lock);
        mutex_init(&wg->device_update_lock);
@@ -393,30 +394,26 @@ static struct rtnl_link_ops link_ops __read_mostly = {
        .newlink                = wg_newlink,
 };
 
-static int wg_netdevice_notification(struct notifier_block *nb,
-                                    unsigned long action, void *data)
+static void wg_netns_pre_exit(struct net *net)
 {
-       struct net_device *dev = ((struct netdev_notifier_info *)data)->dev;
-       struct wg_device *wg = netdev_priv(dev);
-
-       ASSERT_RTNL();
-
-       if (action != NETDEV_REGISTER || dev->netdev_ops != &netdev_ops)
-               return 0;
+       struct wg_device *wg;
 
-       if (dev_net(dev) == wg->creating_net && wg->have_creating_net_ref) {
-               put_net(wg->creating_net);
-               wg->have_creating_net_ref = false;
-       } else if (dev_net(dev) != wg->creating_net &&
-                  !wg->have_creating_net_ref) {
-               wg->have_creating_net_ref = true;
-               get_net(wg->creating_net);
+       rtnl_lock();
+       list_for_each_entry(wg, &device_list, device_list) {
+               if (rcu_access_pointer(wg->creating_net) == net) {
+                       pr_debug("%s: Creating namespace exiting\n", wg->dev->name);
+                       netif_carrier_off(wg->dev);
+                       mutex_lock(&wg->device_update_lock);
+                       rcu_assign_pointer(wg->creating_net, NULL);
+                       wg_socket_reinit(wg, NULL, NULL);
+                       mutex_unlock(&wg->device_update_lock);
+               }
        }
-       return 0;
+       rtnl_unlock();
 }
 
-static struct notifier_block netdevice_notifier = {
-       .notifier_call = wg_netdevice_notification
+static struct pernet_operations pernet_ops = {
+       .pre_exit = wg_netns_pre_exit
 };
 
 int __init wg_device_init(void)
@@ -429,18 +426,18 @@ int __init wg_device_init(void)
                return ret;
 #endif
 
-       ret = register_netdevice_notifier(&netdevice_notifier);
+       ret = register_pernet_device(&pernet_ops);
        if (ret)
                goto error_pm;
 
        ret = rtnl_link_register(&link_ops);
        if (ret)
-               goto error_netdevice;
+               goto error_pernet;
 
        return 0;
 
-error_netdevice:
-       unregister_netdevice_notifier(&netdevice_notifier);
+error_pernet:
+       unregister_pernet_device(&pernet_ops);
 error_pm:
 #ifdef CONFIG_PM_SLEEP
        unregister_pm_notifier(&pm_notifier);
@@ -451,7 +448,7 @@ error_pm:
 void wg_device_uninit(void)
 {
        rtnl_link_unregister(&link_ops);
-       unregister_netdevice_notifier(&netdevice_notifier);
+       unregister_pernet_device(&pernet_ops);
 #ifdef CONFIG_PM_SLEEP
        unregister_pm_notifier(&pm_notifier);
 #endif
index b15a8be..4d0144e 100644 (file)
@@ -40,7 +40,7 @@ struct wg_device {
        struct net_device *dev;
        struct crypt_queue encrypt_queue, decrypt_queue;
        struct sock __rcu *sock4, *sock6;
-       struct net *creating_net;
+       struct net __rcu *creating_net;
        struct noise_static_identity static_identity;
        struct workqueue_struct *handshake_receive_wq, *handshake_send_wq;
        struct workqueue_struct *packet_crypt_wq;
@@ -56,7 +56,6 @@ struct wg_device {
        unsigned int num_peers, device_update_gen;
        u32 fwmark;
        u16 incoming_port;
-       bool have_creating_net_ref;
 };
 
 int wg_device_init(void);
index 802099c..20a4f3c 100644 (file)
@@ -511,11 +511,15 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
        if (flags & ~__WGDEVICE_F_ALL)
                goto out;
 
-       ret = -EPERM;
-       if ((info->attrs[WGDEVICE_A_LISTEN_PORT] ||
-            info->attrs[WGDEVICE_A_FWMARK]) &&
-           !ns_capable(wg->creating_net->user_ns, CAP_NET_ADMIN))
-               goto out;
+       if (info->attrs[WGDEVICE_A_LISTEN_PORT] || info->attrs[WGDEVICE_A_FWMARK]) {
+               struct net *net;
+               rcu_read_lock();
+               net = rcu_dereference(wg->creating_net);
+               ret = !net || !ns_capable(net->user_ns, CAP_NET_ADMIN) ? -EPERM : 0;
+               rcu_read_unlock();
+               if (ret)
+                       goto out;
+       }
 
        ++wg->device_update_gen;
 
index 6264336..201a226 100644 (file)
@@ -617,8 +617,8 @@ wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src,
        memcpy(handshake->hash, hash, NOISE_HASH_LEN);
        memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN);
        handshake->remote_index = src->sender_index;
-       if ((s64)(handshake->last_initiation_consumption -
-           (initiation_consumption = ktime_get_coarse_boottime_ns())) < 0)
+       initiation_consumption = ktime_get_coarse_boottime_ns();
+       if ((s64)(handshake->last_initiation_consumption - initiation_consumption) < 0)
                handshake->last_initiation_consumption = initiation_consumption;
        handshake->state = HANDSHAKE_CONSUMED_INITIATION;
        up_write(&handshake->lock);
index c58df43..dfb674e 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/skbuff.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
+#include <net/ip_tunnels.h>
 
 struct wg_device;
 struct wg_peer;
@@ -65,25 +66,9 @@ struct packet_cb {
 #define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
 #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
 
-/* Returns either the correct skb->protocol value, or 0 if invalid. */
-static inline __be16 wg_examine_packet_protocol(struct sk_buff *skb)
-{
-       if (skb_network_header(skb) >= skb->head &&
-           (skb_network_header(skb) + sizeof(struct iphdr)) <=
-                   skb_tail_pointer(skb) &&
-           ip_hdr(skb)->version == 4)
-               return htons(ETH_P_IP);
-       if (skb_network_header(skb) >= skb->head &&
-           (skb_network_header(skb) + sizeof(struct ipv6hdr)) <=
-                   skb_tail_pointer(skb) &&
-           ipv6_hdr(skb)->version == 6)
-               return htons(ETH_P_IPV6);
-       return 0;
-}
-
 static inline bool wg_check_packet_protocol(struct sk_buff *skb)
 {
-       __be16 real_protocol = wg_examine_packet_protocol(skb);
+       __be16 real_protocol = ip_tunnel_parse_protocol(skb);
        return real_protocol && skb->protocol == real_protocol;
 }
 
index 9143814..2c9551e 100644 (file)
@@ -387,7 +387,7 @@ static void wg_packet_consume_data_done(struct wg_peer *peer,
         */
        skb->ip_summed = CHECKSUM_UNNECESSARY;
        skb->csum_level = ~0; /* All levels */
-       skb->protocol = wg_examine_packet_protocol(skb);
+       skb->protocol = ip_tunnel_parse_protocol(skb);
        if (skb->protocol == htons(ETH_P_IP)) {
                len = ntohs(ip_hdr(skb)->tot_len);
                if (unlikely(len < sizeof(struct iphdr)))
@@ -414,14 +414,8 @@ static void wg_packet_consume_data_done(struct wg_peer *peer,
        if (unlikely(routed_peer != peer))
                goto dishonest_packet_peer;
 
-       if (unlikely(napi_gro_receive(&peer->napi, skb) == GRO_DROP)) {
-               ++dev->stats.rx_dropped;
-               net_dbg_ratelimited("%s: Failed to give packet to userspace from peer %llu (%pISpfsc)\n",
-                                   dev->name, peer->internal_id,
-                                   &peer->endpoint.addr);
-       } else {
-               update_rx_stats(peer, message_data_len(len_before_trim));
-       }
+       napi_gro_receive(&peer->napi, skb);
+       update_rx_stats(peer, message_data_len(len_before_trim));
        return;
 
 dishonest_packet_peer:
index f901802..c33e2c8 100644 (file)
@@ -347,6 +347,7 @@ static void set_sock_opts(struct socket *sock)
 
 int wg_socket_init(struct wg_device *wg, u16 port)
 {
+       struct net *net;
        int ret;
        struct udp_tunnel_sock_cfg cfg = {
                .sk_user_data = wg,
@@ -371,37 +372,47 @@ int wg_socket_init(struct wg_device *wg, u16 port)
        };
 #endif
 
+       rcu_read_lock();
+       net = rcu_dereference(wg->creating_net);
+       net = net ? maybe_get_net(net) : NULL;
+       rcu_read_unlock();
+       if (unlikely(!net))
+               return -ENONET;
+
 #if IS_ENABLED(CONFIG_IPV6)
 retry:
 #endif
 
-       ret = udp_sock_create(wg->creating_net, &port4, &new4);
+       ret = udp_sock_create(net, &port4, &new4);
        if (ret < 0) {
                pr_err("%s: Could not create IPv4 socket\n", wg->dev->name);
-               return ret;
+               goto out;
        }
        set_sock_opts(new4);
-       setup_udp_tunnel_sock(wg->creating_net, new4, &cfg);
+       setup_udp_tunnel_sock(net, new4, &cfg);
 
 #if IS_ENABLED(CONFIG_IPV6)
        if (ipv6_mod_enabled()) {
                port6.local_udp_port = inet_sk(new4->sk)->inet_sport;
-               ret = udp_sock_create(wg->creating_net, &port6, &new6);
+               ret = udp_sock_create(net, &port6, &new6);
                if (ret < 0) {
                        udp_tunnel_sock_release(new4);
                        if (ret == -EADDRINUSE && !port && retries++ < 100)
                                goto retry;
                        pr_err("%s: Could not create IPv6 socket\n",
                               wg->dev->name);
-                       return ret;
+                       goto out;
                }
                set_sock_opts(new6);
-               setup_udp_tunnel_sock(wg->creating_net, new6, &cfg);
+               setup_udp_tunnel_sock(net, new6, &cfg);
        }
 #endif
 
        wg_socket_reinit(wg, new4->sk, new6 ? new6->sk : NULL);
-       return 0;
+       ret = 0;
+out:
+       put_net(net);
+       return ret;
 }
 
 void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
index bc8c15f..080e5aa 100644 (file)
@@ -897,7 +897,6 @@ static void wil_rx_handle_eapol(struct wil6210_vif *vif, struct sk_buff *skb)
 void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
                  struct wil_net_stats *stats, bool gro)
 {
-       gro_result_t rc = GRO_NORMAL;
        struct wil6210_vif *vif = ndev_to_vif(ndev);
        struct wil6210_priv *wil = ndev_to_wil(ndev);
        struct wireless_dev *wdev = vif_to_wdev(vif);
@@ -908,22 +907,16 @@ void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
         */
        int mcast = is_multicast_ether_addr(da);
        struct sk_buff *xmit_skb = NULL;
-       static const char * const gro_res_str[] = {
-               [GRO_MERGED]            = "GRO_MERGED",
-               [GRO_MERGED_FREE]       = "GRO_MERGED_FREE",
-               [GRO_HELD]              = "GRO_HELD",
-               [GRO_NORMAL]            = "GRO_NORMAL",
-               [GRO_DROP]              = "GRO_DROP",
-               [GRO_CONSUMED]          = "GRO_CONSUMED",
-       };
 
        if (wdev->iftype == NL80211_IFTYPE_STATION) {
                sa = wil_skb_get_sa(skb);
                if (mcast && ether_addr_equal(sa, ndev->dev_addr)) {
                        /* mcast packet looped back to us */
-                       rc = GRO_DROP;
                        dev_kfree_skb(skb);
-                       goto stats;
+                       ndev->stats.rx_dropped++;
+                       stats->rx_dropped++;
+                       wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
+                       return;
                }
        } else if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) {
                if (mcast) {
@@ -967,26 +960,16 @@ void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
                        wil_rx_handle_eapol(vif, skb);
 
                if (gro)
-                       rc = napi_gro_receive(&wil->napi_rx, skb);
+                       napi_gro_receive(&wil->napi_rx, skb);
                else
                        netif_rx_ni(skb);
-               wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
-                            len, gro_res_str[rc]);
-       }
-stats:
-       /* statistics. rc set to GRO_NORMAL for AP bridging */
-       if (unlikely(rc == GRO_DROP)) {
-               ndev->stats.rx_dropped++;
-               stats->rx_dropped++;
-               wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
-       } else {
-               ndev->stats.rx_packets++;
-               stats->rx_packets++;
-               ndev->stats.rx_bytes += len;
-               stats->rx_bytes += len;
-               if (mcast)
-                       ndev->stats.multicast++;
        }
+       ndev->stats.rx_packets++;
+       stats->rx_packets++;
+       ndev->stats.rx_bytes += len;
+       stats->rx_bytes += len;
+       if (mcast)
+               ndev->stats.multicast++;
 }
 
 void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
index 89b8597..4cef69b 100644 (file)
@@ -95,7 +95,7 @@ static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
        struct encrypted_key_payload *epayload;
        struct device *dev = &nvdimm->dev;
 
-       keyref = lookup_user_key(id, 0, 0);
+       keyref = lookup_user_key(id, 0, KEY_NEED_SEARCH);
        if (IS_ERR(keyref))
                return NULL;
 
index c2c5bc4..add0401 100644 (file)
@@ -1116,10 +1116,16 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
                dev_warn(ctrl->device,
                        "Identify Descriptors failed (%d)\n", status);
                 /*
-                 * Don't treat an error as fatal, as we potentially already
-                 * have a NGUID or EUI-64.
+                 * Don't treat non-retryable errors as fatal, as we potentially
+                 * already have a NGUID or EUI-64.  If we failed with DNR set,
+                 * we want to silently ignore the error as we can still
+                 * identify the device, but if the status has DNR set, we want
+                 * to propagate the error back specifically for the disk
+                 * revalidation flow to make sure we don't abandon the
+                 * device just because of a temporal retry-able error (such
+                 * as path of transport errors).
                  */
-               if (status > 0 && !(status & NVME_SC_DNR))
+               if (status > 0 && (status & NVME_SC_DNR))
                        status = 0;
                goto free_data;
        }
@@ -1974,7 +1980,7 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
        if (ns->head->disk) {
                nvme_update_disk_info(ns->head->disk, ns, id);
                blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
-               revalidate_disk(ns->head->disk);
+               nvme_mpath_update_disk_size(ns->head->disk);
        }
 #endif
        return 0;
@@ -4174,6 +4180,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
        ctrl->dev = dev;
        ctrl->ops = ops;
        ctrl->quirks = quirks;
+       ctrl->numa_node = NUMA_NO_NODE;
        INIT_WORK(&ctrl->scan_work, nvme_scan_work);
        INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
        INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
index da78e49..6650947 100644 (file)
@@ -409,15 +409,14 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
 {
        struct nvme_ns_head *head = ns->head;
 
-       lockdep_assert_held(&ns->head->lock);
-
        if (!head->disk)
                return;
 
-       if (!(head->disk->flags & GENHD_FL_UP))
+       if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
                device_add_disk(&head->subsys->dev, head->disk,
                                nvme_ns_id_attr_groups);
 
+       mutex_lock(&head->lock);
        if (nvme_path_is_optimized(ns)) {
                int node, srcu_idx;
 
@@ -426,9 +425,10 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
                        __nvme_find_path(head, node);
                srcu_read_unlock(&head->srcu, srcu_idx);
        }
+       mutex_unlock(&head->lock);
 
-       synchronize_srcu(&ns->head->srcu);
-       kblockd_schedule_work(&ns->head->requeue_work);
+       synchronize_srcu(&head->srcu);
+       kblockd_schedule_work(&head->requeue_work);
 }
 
 static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
@@ -483,14 +483,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state)
 static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
                struct nvme_ns *ns)
 {
-       mutex_lock(&ns->head->lock);
        ns->ana_grpid = le32_to_cpu(desc->grpid);
        ns->ana_state = desc->state;
        clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
 
        if (nvme_state_is_live(ns->ana_state))
                nvme_mpath_set_live(ns);
-       mutex_unlock(&ns->head->lock);
 }
 
 static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
@@ -640,38 +638,45 @@ static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
 }
 DEVICE_ATTR_RO(ana_state);
 
-static int nvme_set_ns_ana_state(struct nvme_ctrl *ctrl,
+static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
                struct nvme_ana_group_desc *desc, void *data)
 {
-       struct nvme_ns *ns = data;
+       struct nvme_ana_group_desc *dst = data;
 
-       if (ns->ana_grpid == le32_to_cpu(desc->grpid)) {
-               nvme_update_ns_ana_state(desc, ns);
-               return -ENXIO; /* just break out of the loop */
-       }
+       if (desc->grpid != dst->grpid)
+               return 0;
 
-       return 0;
+       *dst = *desc;
+       return -ENXIO; /* just break out of the loop */
 }
 
 void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
 {
        if (nvme_ctrl_use_ana(ns->ctrl)) {
+               struct nvme_ana_group_desc desc = {
+                       .grpid = id->anagrpid,
+                       .state = 0,
+               };
+
                mutex_lock(&ns->ctrl->ana_lock);
                ns->ana_grpid = le32_to_cpu(id->anagrpid);
-               nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state);
+               nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
                mutex_unlock(&ns->ctrl->ana_lock);
+               if (desc.state) {
+                       /* found the group desc: update */
+                       nvme_update_ns_ana_state(&desc, ns);
+               }
        } else {
-               mutex_lock(&ns->head->lock);
                ns->ana_state = NVME_ANA_OPTIMIZED; 
                nvme_mpath_set_live(ns);
-               mutex_unlock(&ns->head->lock);
        }
 
        if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) {
-               struct backing_dev_info *info =
-                                       ns->head->disk->queue->backing_dev_info;
+               struct gendisk *disk = ns->head->disk;
 
-               info->capabilities |= BDI_CAP_STABLE_WRITES;
+               if (disk)
+                       disk->queue->backing_dev_info->capabilities |=
+                                       BDI_CAP_STABLE_WRITES;
        }
 }
 
@@ -686,6 +691,14 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
        kblockd_schedule_work(&head->requeue_work);
        flush_work(&head->requeue_work);
        blk_cleanup_queue(head->disk->queue);
+       if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
+               /*
+                * if device_add_disk wasn't called, prevent
+                * disk release to put a bogus reference on the
+                * request queue
+                */
+               head->disk->queue = NULL;
+       }
        put_disk(head->disk);
 }
 
index c0f4226..1de3f9b 100644 (file)
@@ -364,6 +364,8 @@ struct nvme_ns_head {
        spinlock_t              requeue_lock;
        struct work_struct      requeue_work;
        struct mutex            lock;
+       unsigned long           flags;
+#define NVME_NSHEAD_DISK_LIVE  0
        struct nvme_ns __rcu    *current_path[];
 #endif
 };
@@ -602,6 +604,16 @@ static inline void nvme_trace_bio_complete(struct request *req,
                trace_block_bio_complete(ns->head->disk->queue, req->bio);
 }
 
+static inline void nvme_mpath_update_disk_size(struct gendisk *disk)
+{
+       struct block_device *bdev = bdget_disk(disk, 0);
+
+       if (bdev) {
+               bd_set_size(bdev, get_capacity(disk) << SECTOR_SHIFT);
+               bdput(bdev);
+       }
+}
+
 extern struct device_attribute dev_attr_ana_grpid;
 extern struct device_attribute dev_attr_ana_state;
 extern struct device_attribute subsys_attr_iopolicy;
@@ -677,6 +689,9 @@ static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
 static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
 {
 }
+static inline void nvme_mpath_update_disk_size(struct gendisk *disk)
+{
+}
 #endif /* CONFIG_NVME_MULTIPATH */
 
 #ifdef CONFIG_NVM
index e2bacd3..b1d18f0 100644 (file)
@@ -1593,7 +1593,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
 
                dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
                dev->admin_tagset.timeout = ADMIN_TIMEOUT;
-               dev->admin_tagset.numa_node = dev_to_node(dev->dev);
+               dev->admin_tagset.numa_node = dev->ctrl.numa_node;
                dev->admin_tagset.cmd_size = sizeof(struct nvme_iod);
                dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
                dev->admin_tagset.driver_data = dev;
@@ -1669,6 +1669,8 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
        if (result)
                return result;
 
+       dev->ctrl.numa_node = dev_to_node(dev->dev);
+
        nvmeq = &dev->queues[0];
        aqa = nvmeq->q_depth - 1;
        aqa |= aqa << 16;
@@ -2257,7 +2259,7 @@ static void nvme_dev_add(struct nvme_dev *dev)
                if (dev->io_queues[HCTX_TYPE_POLL])
                        dev->tagset.nr_maps++;
                dev->tagset.timeout = NVME_IO_TIMEOUT;
-               dev->tagset.numa_node = dev_to_node(dev->dev);
+               dev->tagset.numa_node = dev->ctrl.numa_node;
                dev->tagset.queue_depth =
                                min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
                dev->tagset.cmd_size = sizeof(struct nvme_iod);
index f8f856d..13506a8 100644 (file)
@@ -470,7 +470,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
         * Spread I/O queues completion vectors according their queue index.
         * Admin queues can always go on completion vector 0.
         */
-       comp_vector = idx == 0 ? idx : idx - 1;
+       comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors;
 
        /* Polling queues need direct cq polling context */
        if (nvme_rdma_poll_queue(queue))
index 3345ec7..79ef2b8 100644 (file)
@@ -1532,7 +1532,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
                set->ops = &nvme_tcp_admin_mq_ops;
                set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
                set->reserved_tags = 2; /* connect + keep-alive */
-               set->numa_node = NUMA_NO_NODE;
+               set->numa_node = nctrl->numa_node;
                set->flags = BLK_MQ_F_BLOCKING;
                set->cmd_size = sizeof(struct nvme_tcp_request);
                set->driver_data = ctrl;
@@ -1544,7 +1544,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
                set->ops = &nvme_tcp_mq_ops;
                set->queue_depth = nctrl->sqsize + 1;
                set->reserved_tags = 1; /* fabric connect */
-               set->numa_node = NUMA_NO_NODE;
+               set->numa_node = nctrl->numa_node;
                set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
                set->cmd_size = sizeof(struct nvme_tcp_request);
                set->driver_data = ctrl;
index 0d54e73..6344e73 100644 (file)
@@ -340,7 +340,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
        ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
        ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
        ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
-       ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
+       ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
        ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
                NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
        ctrl->admin_tag_set.driver_data = ctrl;
@@ -512,7 +512,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
        ctrl->tag_set.ops = &nvme_loop_mq_ops;
        ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
        ctrl->tag_set.reserved_tags = 1; /* fabric connect */
-       ctrl->tag_set.numa_node = NUMA_NO_NODE;
+       ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
        ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
        ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
                NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
index a04afe7..ef6f818 100644 (file)
@@ -314,10 +314,15 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
                                 child, addr);
 
                        if (of_mdiobus_child_is_phy(child)) {
+                               /* -ENODEV is the return code that PHYLIB has
+                                * standardized on to indicate that bus
+                                * scanning should continue.
+                                */
                                rc = of_mdiobus_register_phy(mdio, child, addr);
-                               if (rc && rc != -ENODEV)
+                               if (!rc)
+                                       break;
+                               if (rc != -ENODEV)
                                        goto unregister;
-                               break;
                        }
                }
        }
index 9a58735..314f306 100644 (file)
@@ -902,6 +902,10 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
                return -EINVAL;
        }
 
+       mutex_lock(&opp_table->lock);
+       opp_table->parsed_static_opps = 1;
+       mutex_unlock(&opp_table->lock);
+
        val = prop->value;
        while (nr) {
                unsigned long freq = be32_to_cpup(val++) * 1000;
index e386d4e..9a64cf9 100644 (file)
@@ -546,9 +546,10 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
 
        vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info,
                                                    x86_vector_domain);
-       irq_domain_free_fwnode(fn);
-       if (!vmd->irq_domain)
+       if (!vmd->irq_domain) {
+               irq_domain_free_fwnode(fn);
                return -ENODEV;
+       }
 
        pci_add_resource(&resources, &vmd->resources[0]);
        pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
index 1b8e337..87c4be9 100644 (file)
@@ -1718,6 +1718,7 @@ static struct platform_driver cci_pmu_driver = {
        .driver = {
                   .name = DRIVER_NAME,
                   .of_match_table = arm_cci_pmu_matches,
+                  .suppress_bind_attrs = true,
                  },
        .probe = cci_pmu_probe,
        .remove = cci_pmu_remove,
index d50edef..7b7d23f 100644 (file)
@@ -1545,6 +1545,7 @@ static struct platform_driver arm_ccn_driver = {
        .driver = {
                .name = "arm-ccn",
                .of_match_table = arm_ccn_match,
+               .suppress_bind_attrs = true,
        },
        .probe = arm_ccn_probe,
        .remove = arm_ccn_remove,
index 518d060..96ed93c 100644 (file)
@@ -757,6 +757,7 @@ static struct platform_driver dsu_pmu_driver = {
        .driver = {
                .name   = DRVNAME,
                .of_match_table = of_match_ptr(dsu_pmu_of_match),
+               .suppress_bind_attrs = true,
        },
        .probe = dsu_pmu_device_probe,
        .remove = dsu_pmu_device_remove,
index 48e28ef..4cdb35d 100644 (file)
@@ -742,6 +742,7 @@ static int smmu_pmu_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, smmu_pmu);
 
        smmu_pmu->pmu = (struct pmu) {
+               .module         = THIS_MODULE,
                .task_ctx_nr    = perf_invalid_context,
                .pmu_enable     = smmu_pmu_enable,
                .pmu_disable    = smmu_pmu_disable,
@@ -859,6 +860,7 @@ static void smmu_pmu_shutdown(struct platform_device *pdev)
 static struct platform_driver smmu_pmu_driver = {
        .driver = {
                .name = "arm-smmu-v3-pmcg",
+               .suppress_bind_attrs = true,
        },
        .probe = smmu_pmu_probe,
        .remove = smmu_pmu_remove,
index d80f487..e51ddb6 100644 (file)
@@ -1226,6 +1226,7 @@ static struct platform_driver arm_spe_pmu_driver = {
        .driver = {
                .name           = DRVNAME,
                .of_match_table = of_match_ptr(arm_spe_pmu_of_match),
+               .suppress_bind_attrs = true,
        },
        .probe  = arm_spe_pmu_device_probe,
        .remove = arm_spe_pmu_device_remove,
index 90884d1..397540a 100644 (file)
@@ -512,6 +512,7 @@ static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
 {
        *pmu = (struct ddr_pmu) {
                .pmu = (struct pmu) {
+                       .module       = THIS_MODULE,
                        .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
                        .task_ctx_nr = perf_invalid_context,
                        .attr_groups = attr_groups,
@@ -706,6 +707,7 @@ static struct platform_driver imx_ddr_pmu_driver = {
        .driver         = {
                .name   = "imx-ddr-pmu",
                .of_match_table = imx_ddr_pmu_dt_ids,
+               .suppress_bind_attrs = true,
        },
        .probe          = ddr_perf_probe,
        .remove         = ddr_perf_remove,
index 15713fa..5e3645c 100644 (file)
@@ -378,6 +378,7 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
                              ddrc_pmu->sccl_id, ddrc_pmu->index_id);
        ddrc_pmu->pmu = (struct pmu) {
                .name           = name,
+               .module         = THIS_MODULE,
                .task_ctx_nr    = perf_invalid_context,
                .event_init     = hisi_uncore_pmu_event_init,
                .pmu_enable     = hisi_uncore_pmu_enable,
@@ -418,6 +419,7 @@ static struct platform_driver hisi_ddrc_pmu_driver = {
        .driver = {
                .name = "hisi_ddrc_pmu",
                .acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match),
+               .suppress_bind_attrs = true,
        },
        .probe = hisi_ddrc_pmu_probe,
        .remove = hisi_ddrc_pmu_remove,
index dcc5600..5eb8168 100644 (file)
@@ -390,6 +390,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
                              hha_pmu->sccl_id, hha_pmu->index_id);
        hha_pmu->pmu = (struct pmu) {
                .name           = name,
+               .module         = THIS_MODULE,
                .task_ctx_nr    = perf_invalid_context,
                .event_init     = hisi_uncore_pmu_event_init,
                .pmu_enable     = hisi_uncore_pmu_enable,
@@ -430,6 +431,7 @@ static struct platform_driver hisi_hha_pmu_driver = {
        .driver = {
                .name = "hisi_hha_pmu",
                .acpi_match_table = ACPI_PTR(hisi_hha_pmu_acpi_match),
+               .suppress_bind_attrs = true,
        },
        .probe = hisi_hha_pmu_probe,
        .remove = hisi_hha_pmu_remove,
index 7719ae4..3e8b5ea 100644 (file)
@@ -380,6 +380,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
                              l3c_pmu->sccl_id, l3c_pmu->index_id);
        l3c_pmu->pmu = (struct pmu) {
                .name           = name,
+               .module         = THIS_MODULE,
                .task_ctx_nr    = perf_invalid_context,
                .event_init     = hisi_uncore_pmu_event_init,
                .pmu_enable     = hisi_uncore_pmu_enable,
@@ -420,6 +421,7 @@ static struct platform_driver hisi_l3c_pmu_driver = {
        .driver = {
                .name = "hisi_l3c_pmu",
                .acpi_match_table = ACPI_PTR(hisi_l3c_pmu_acpi_match),
+               .suppress_bind_attrs = true,
        },
        .probe = hisi_l3c_pmu_probe,
        .remove = hisi_l3c_pmu_remove,
index 21d6991..4da37f6 100644 (file)
@@ -1028,6 +1028,7 @@ static struct platform_driver l2_cache_pmu_driver = {
        .driver = {
                .name = "qcom-l2cache-pmu",
                .acpi_match_table = ACPI_PTR(l2_cache_pmu_acpi_match),
+               .suppress_bind_attrs = true,
        },
        .probe = l2_cache_pmu_probe,
        .remove = l2_cache_pmu_remove,
index 656e830..9ddb577 100644 (file)
@@ -814,6 +814,7 @@ static struct platform_driver qcom_l3_cache_pmu_driver = {
        .driver = {
                .name = "qcom-l3cache-pmu",
                .acpi_match_table = ACPI_PTR(qcom_l3_cache_pmu_acpi_match),
+               .suppress_bind_attrs = true,
        },
        .probe = qcom_l3_cache_pmu_probe,
 };
index 51b31d6..aac9823 100644 (file)
@@ -1017,6 +1017,7 @@ static struct platform_driver tx2_uncore_driver = {
        .driver = {
                .name           = "tx2-uncore-pmu",
                .acpi_match_table = ACPI_PTR(tx2_uncore_acpi_match),
+               .suppress_bind_attrs = true,
        },
        .probe = tx2_uncore_probe,
        .remove = tx2_uncore_remove,
index 46ee680..edac28c 100644 (file)
@@ -1975,6 +1975,7 @@ static struct platform_driver xgene_pmu_driver = {
                .name           = "xgene-pmu",
                .of_match_table = xgene_pmu_of_match,
                .acpi_match_table = ACPI_PTR(xgene_pmu_acpi_match),
+               .suppress_bind_attrs = true,
        },
 };
 
index 8569273..e5842e4 100644 (file)
@@ -545,13 +545,14 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
        struct sun4i_usb_phy_data *data =
                container_of(work, struct sun4i_usb_phy_data, detect.work);
        struct phy *phy0 = data->phys[0].phy;
-       struct sun4i_usb_phy *phy = phy_get_drvdata(phy0);
+       struct sun4i_usb_phy *phy;
        bool force_session_end, id_notify = false, vbus_notify = false;
        int id_det, vbus_det;
 
-       if (phy0 == NULL)
+       if (!phy0)
                return;
 
+       phy = phy_get_drvdata(phy0);
        id_det = sun4i_usb_phy0_get_id_det(data);
        vbus_det = sun4i_usb_phy0_get_vbus_det(data);
 
index c2a35be..360b1eb 100644 (file)
@@ -134,7 +134,7 @@ static inline void combo_phy_w32_off_mask(void __iomem *base, unsigned int reg,
 
        reg_val = readl(base + reg);
        reg_val &= ~mask;
-       reg_val |= FIELD_PREP(mask, val);
+       reg_val |= val;
        writel(reg_val, base + reg);
 }
 
@@ -169,7 +169,7 @@ static int intel_cbphy_pcie_en_pad_refclk(struct intel_cbphy_iphy *iphy)
                return 0;
 
        combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
-                              PCIE_PHY_CLK_PAD, 0);
+                              PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 0));
 
        /* Delay for stable clock PLL */
        usleep_range(50, 100);
@@ -192,14 +192,14 @@ static int intel_cbphy_pcie_dis_pad_refclk(struct intel_cbphy_iphy *iphy)
                return 0;
 
        combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
-                              PCIE_PHY_CLK_PAD, 1);
+                              PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 1));
 
        return 0;
 }
 
 static int intel_cbphy_set_mode(struct intel_combo_phy *cbphy)
 {
-       enum intel_combo_mode cb_mode = PHY_PCIE_MODE;
+       enum intel_combo_mode cb_mode;
        enum aggregated_mode aggr = cbphy->aggr_mode;
        struct device *dev = cbphy->dev;
        enum intel_phy_mode mode;
@@ -224,6 +224,8 @@ static int intel_cbphy_set_mode(struct intel_combo_phy *cbphy)
 
                cb_mode = SATA0_SATA1_MODE;
                break;
+       default:
+               return -EINVAL;
        }
 
        ret = regmap_write(cbphy->hsiocfg, REG_COMBO_MODE(cbphy->bid), cb_mode);
@@ -385,7 +387,7 @@ static int intel_cbphy_calibrate(struct phy *phy)
 
        /* trigger auto RX adaptation */
        combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id),
-                              ADAPT_REQ_MSK, 3);
+                              ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 3));
        /* Wait RX adaptation to finish */
        ret = readl_poll_timeout(cr_base + CR_ADDR(PCS_XF_RX_ADAPT_ACK, id),
                                 val, val & RX_ADAPT_ACK_BIT, 10, 5000);
@@ -396,7 +398,7 @@ static int intel_cbphy_calibrate(struct phy *phy)
 
        /* Stop RX adaptation */
        combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id),
-                              ADAPT_REQ_MSK, 0);
+                              ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 0));
 
        return ret;
 }
index a7c6c94..8af8c6c 100644 (file)
@@ -607,8 +607,8 @@ static int inno_dsidphy_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, inno);
 
        inno->phy_base = devm_platform_ioremap_resource(pdev, 0);
-       if (!inno->phy_base)
-               return -ENOMEM;
+       if (IS_ERR(inno->phy_base))
+               return PTR_ERR(inno->phy_base);
 
        inno->ref_clk = devm_clk_get(dev, "ref");
        if (IS_ERR(inno->ref_clk)) {
index 0a166d5..a174b3c 100644 (file)
@@ -72,7 +72,7 @@ struct serdes_am654_clk_mux {
 #define to_serdes_am654_clk_mux(_hw)   \
                container_of(_hw, struct serdes_am654_clk_mux, hw)
 
-static struct regmap_config serdes_am654_regmap_config = {
+static const struct regmap_config serdes_am654_regmap_config = {
        .reg_bits = 32,
        .val_bits = 32,
        .reg_stride = 4,
index 30ea5b2..33c4cf0 100644 (file)
@@ -117,7 +117,7 @@ struct wiz_clk_mux {
 struct wiz_clk_divider {
        struct clk_hw           hw;
        struct regmap_field     *field;
-       struct clk_div_table    *table;
+       const struct clk_div_table      *table;
        struct clk_init_data    clk_data;
 };
 
@@ -131,7 +131,7 @@ struct wiz_clk_mux_sel {
 
 struct wiz_clk_div_sel {
        struct regmap_field     *field;
-       struct clk_div_table    *table;
+       const struct clk_div_table      *table;
        const char              *node_name;
 };
 
@@ -173,7 +173,7 @@ static struct wiz_clk_mux_sel clk_mux_sel_10g[] = {
        },
 };
 
-static struct clk_div_table clk_div_table[] = {
+static const struct clk_div_table clk_div_table[] = {
        { .val = 0, .div = 1, },
        { .val = 1, .div = 2, },
        { .val = 2, .div = 4, },
@@ -559,7 +559,7 @@ static const struct clk_ops wiz_clk_div_ops = {
 
 static int wiz_div_clk_register(struct wiz *wiz, struct device_node *node,
                                struct regmap_field *field,
-                               struct clk_div_table *table)
+                               const struct clk_div_table *table)
 {
        struct device *dev = wiz->dev;
        struct wiz_clk_divider *div;
@@ -756,7 +756,7 @@ static const struct reset_control_ops wiz_phy_reset_ops = {
        .deassert = wiz_phy_reset_deassert,
 };
 
-static struct regmap_config wiz_regmap_config = {
+static const struct regmap_config wiz_regmap_config = {
        .reg_bits = 32,
        .val_bits = 32,
        .reg_stride = 4,
index 0ff7c55..615174a 100644 (file)
@@ -800,6 +800,21 @@ static void byt_gpio_disable_free(struct pinctrl_dev *pctl_dev,
        pm_runtime_put(vg->dev);
 }
 
+static void byt_gpio_direct_irq_check(struct intel_pinctrl *vg,
+                                     unsigned int offset)
+{
+       void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
+
+       /*
+        * Before making any direction modifications, do a check if gpio is set
+        * for direct IRQ. On Bay Trail, setting GPIO to output does not make
+        * sense, so let's at least inform the caller before they shoot
+        * themselves in the foot.
+        */
+       if (readl(conf_reg) & BYT_DIRECT_IRQ_EN)
+               dev_info_once(vg->dev, "Potential Error: Setting GPIO with direct_irq_en to output");
+}
+
 static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
                                  struct pinctrl_gpio_range *range,
                                  unsigned int offset,
@@ -807,7 +822,6 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
 {
        struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
        void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
-       void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
        unsigned long flags;
        u32 value;
 
@@ -817,14 +831,8 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
        value &= ~BYT_DIR_MASK;
        if (input)
                value |= BYT_OUTPUT_EN;
-       else if (readl(conf_reg) & BYT_DIRECT_IRQ_EN)
-               /*
-                * Before making any direction modifications, do a check if gpio
-                * is set for direct IRQ.  On baytrail, setting GPIO to output
-                * does not make sense, so let's at least inform the caller before
-                * they shoot themselves in the foot.
-                */
-               dev_info_once(vg->dev, "Potential Error: Setting GPIO with direct_irq_en to output");
+       else
+               byt_gpio_direct_irq_check(vg, offset);
 
        writel(value, val_reg);
 
@@ -1165,19 +1173,50 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
 
 static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
 {
-       return pinctrl_gpio_direction_input(chip->base + offset);
+       struct intel_pinctrl *vg = gpiochip_get_data(chip);
+       void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+       unsigned long flags;
+       u32 reg;
+
+       raw_spin_lock_irqsave(&byt_lock, flags);
+
+       reg = readl(val_reg);
+       reg &= ~BYT_DIR_MASK;
+       reg |= BYT_OUTPUT_EN;
+       writel(reg, val_reg);
+
+       raw_spin_unlock_irqrestore(&byt_lock, flags);
+       return 0;
 }
 
+/*
+ * Note despite the temptation this MUST NOT be converted into a call to
+ * pinctrl_gpio_direction_output() + byt_gpio_set() that does not work this
+ * MUST be done as a single BYT_VAL_REG register write.
+ * See the commit message of the commit adding this comment for details.
+ */
 static int byt_gpio_direction_output(struct gpio_chip *chip,
                                     unsigned int offset, int value)
 {
-       int ret = pinctrl_gpio_direction_output(chip->base + offset);
+       struct intel_pinctrl *vg = gpiochip_get_data(chip);
+       void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+       unsigned long flags;
+       u32 reg;
 
-       if (ret)
-               return ret;
+       raw_spin_lock_irqsave(&byt_lock, flags);
+
+       byt_gpio_direct_irq_check(vg, offset);
 
-       byt_gpio_set(chip, offset, value);
+       reg = readl(val_reg);
+       reg &= ~BYT_DIR_MASK;
+       if (value)
+               reg |= BYT_LEVEL;
+       else
+               reg &= ~BYT_LEVEL;
 
+       writel(reg, val_reg);
+
+       raw_spin_unlock_irqrestore(&byt_lock, flags);
        return 0;
 }
 
index 3e5760f..d4a192d 100644 (file)
@@ -252,7 +252,7 @@ static const struct amd_pingroup kerncz_groups[] = {
        {
                .name = "uart0",
                .pins = uart0_pins,
-               .npins = 9,
+               .npins = 5,
        },
        {
                .name = "uart1",
index 877aade..8f4acdc 100644 (file)
@@ -441,6 +441,7 @@ static int asus_wmi_battery_add(struct power_supply *battery)
         * battery is named BATT.
         */
        if (strcmp(battery->desc->name, "BAT0") != 0 &&
+           strcmp(battery->desc->name, "BAT1") != 0 &&
            strcmp(battery->desc->name, "BATT") != 0)
                return -ENODEV;
 
index 1409a5b..4f6f7f0 100644 (file)
@@ -13,6 +13,9 @@
 #define INTEL_RAPL_PRIO_DEVID_0        0x3451
 #define INTEL_CFG_MBOX_DEVID_0 0x3459
 
+#define INTEL_RAPL_PRIO_DEVID_1 0x3251
+#define INTEL_CFG_MBOX_DEVID_1  0x3259
+
 /*
  * Validate maximum commands in a single request.
  * This is enough to handle command to every core in one ioctl, or all
index d84e217..95f01e7 100644 (file)
@@ -147,6 +147,7 @@ static long isst_if_mbox_proc_cmd(u8 *cmd_ptr, int *write_only, int resume)
 
 static const struct pci_device_id isst_if_mbox_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CFG_MBOX_DEVID_0)},
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CFG_MBOX_DEVID_1)},
        { 0 },
 };
 MODULE_DEVICE_TABLE(pci, isst_if_mbox_ids);
index 3584859..aa17fd7 100644 (file)
@@ -72,6 +72,7 @@ static long isst_if_mmio_rd_wr(u8 *cmd_ptr, int *write_only, int resume)
 
 static const struct pci_device_id isst_if_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_RAPL_PRIO_DEVID_0)},
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_RAPL_PRIO_DEVID_1)},
        { 0 },
 };
 MODULE_DEVICE_TABLE(pci, isst_if_ids);
index ff7f0a4..0f6fced 100644 (file)
@@ -885,11 +885,19 @@ static ssize_t dispatch_proc_write(struct file *file,
 
        if (!ibm || !ibm->write)
                return -EINVAL;
+       if (count > PAGE_SIZE - 1)
+               return -EINVAL;
+
+       kernbuf = kmalloc(count + 1, GFP_KERNEL);
+       if (!kernbuf)
+               return -ENOMEM;
 
-       kernbuf = strndup_user(userbuf, PAGE_SIZE);
-       if (IS_ERR(kernbuf))
-               return PTR_ERR(kernbuf);
+       if (copy_from_user(kernbuf, userbuf, count)) {
+               kfree(kernbuf);
+               return -EFAULT;
+       }
 
+       kernbuf[count] = 0;
        ret = ibm->write(kernbuf);
        if (ret == 0)
                ret = count;
index 8f677f5..edb1c4f 100644 (file)
@@ -684,7 +684,7 @@ config REGULATOR_MT6323
 
 config REGULATOR_MT6358
        tristate "MediaTek MT6358 PMIC"
-       depends on MFD_MT6397 && BROKEN
+       depends on MFD_MT6397
        help
          Say y here to select this option to enable the power regulator of
          MediaTek MT6358 PMIC.
index e8f1633..0796e4a 100644 (file)
@@ -31,7 +31,7 @@ obj-$(CONFIG_REGULATOR_BD70528) += bd70528-regulator.o
 obj-$(CONFIG_REGULATOR_BD71828) += bd71828-regulator.o
 obj-$(CONFIG_REGULATOR_BD718XX) += bd718x7-regulator.o
 obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o
-obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
+obj-$(CONFIG_REGULATOR_DA903X) += da903x-regulator.o
 obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o
 obj-$(CONFIG_REGULATOR_DA9055) += da9055-regulator.o
 obj-$(CONFIG_REGULATOR_DA9062) += da9062-regulator.o
index e1d6c8f..fe65b5a 100644 (file)
@@ -512,7 +512,6 @@ static const struct da9063_regulator_info da9063_regulator_info[] = {
        },
        {
                DA9063_LDO(DA9063, LDO9, 950, 50, 3600),
-               .suspend = BFIELD(DA9063_REG_LDO9_CONT, DA9063_VLDO9_SEL),
        },
        {
                DA9063_LDO(DA9063, LDO11, 900, 50, 3600),
index e970e9d..e4bb09b 100644 (file)
@@ -486,7 +486,7 @@ int regulator_map_voltage_pickable_linear_range(struct regulator_dev *rdev,
                        continue;
                }
 
-               ret = selector + sel;
+               ret = selector + sel - range->min_sel;
 
                voltage = rdev->desc->ops->list_voltage(rdev, ret);
 
index 6895379..4c8e8b4 100644 (file)
@@ -209,6 +209,19 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = {
 
 };
 
+static const struct regulator_ops pfuze3000_sw_regulator_ops = {
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .list_voltage = regulator_list_voltage_table,
+       .map_voltage = regulator_map_voltage_ascend,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .set_voltage_time_sel = regulator_set_voltage_time_sel,
+       .set_ramp_delay = pfuze100_set_ramp_delay,
+
+};
+
 #define PFUZE100_FIXED_REG(_chip, _name, base, voltage)        \
        [_chip ## _ ## _name] = {       \
                .desc = {       \
@@ -318,23 +331,28 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = {
        .stby_mask = 0x20,      \
 }
 
-
-#define PFUZE3000_SW2_REG(_chip, _name, base, min, max, step)  {       \
-       .desc = {       \
-               .name = #_name,\
-               .n_voltages = ((max) - (min)) / (step) + 1,     \
-               .ops = &pfuze100_sw_regulator_ops,      \
-               .type = REGULATOR_VOLTAGE,      \
-               .id = _chip ## _ ## _name,      \
-               .owner = THIS_MODULE,   \
-               .min_uV = (min),        \
-               .uV_step = (step),      \
-               .vsel_reg = (base) + PFUZE100_VOL_OFFSET,       \
-               .vsel_mask = 0x7,       \
-       },      \
-       .stby_reg = (base) + PFUZE100_STANDBY_OFFSET,   \
-       .stby_mask = 0x7,       \
-}
+/* No linar case for the some switches of PFUZE3000 */
+#define PFUZE3000_SW_REG(_chip, _name, base, mask, voltages)   \
+       [_chip ## _ ##  _name] = {      \
+               .desc = {       \
+                       .name = #_name, \
+                       .n_voltages = ARRAY_SIZE(voltages),     \
+                       .ops = &pfuze3000_sw_regulator_ops,     \
+                       .type = REGULATOR_VOLTAGE,      \
+                       .id = _chip ## _ ## _name,      \
+                       .owner = THIS_MODULE,   \
+                       .volt_table = voltages, \
+                       .vsel_reg = (base) + PFUZE100_VOL_OFFSET,       \
+                       .vsel_mask = (mask),    \
+                       .enable_reg = (base) + PFUZE100_MODE_OFFSET,    \
+                       .enable_mask = 0xf,     \
+                       .enable_val = 0x8,      \
+                       .enable_time = 500,     \
+               },      \
+               .stby_reg = (base) + PFUZE100_STANDBY_OFFSET,   \
+               .stby_mask = (mask),    \
+               .sw_reg = true,         \
+       }
 
 #define PFUZE3000_SW3_REG(_chip, _name, base, min, max, step)  {       \
        .desc = {       \
@@ -391,9 +409,9 @@ static struct pfuze_regulator pfuze200_regulators[] = {
 };
 
 static struct pfuze_regulator pfuze3000_regulators[] = {
-       PFUZE100_SWB_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
+       PFUZE3000_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
        PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000),
-       PFUZE100_SWB_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
+       PFUZE3000_SW_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
        PFUZE3000_SW3_REG(PFUZE3000, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
        PFUZE100_SWB_REG(PFUZE3000, SWBST, PFUZE100_SWBSTCON1, 0x3, pfuze100_swbst),
        PFUZE100_SWB_REG(PFUZE3000, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
@@ -407,8 +425,8 @@ static struct pfuze_regulator pfuze3000_regulators[] = {
 };
 
 static struct pfuze_regulator pfuze3001_regulators[] = {
-       PFUZE100_SWB_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
-       PFUZE100_SWB_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
+       PFUZE3000_SW_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
+       PFUZE3000_SW_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
        PFUZE3000_SW3_REG(PFUZE3001, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
        PFUZE100_SWB_REG(PFUZE3001, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
        PFUZE100_VGEN_REG(PFUZE3001, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000),
index 53a64d8..7f5c318 100644 (file)
@@ -821,7 +821,7 @@ static const struct rpm_regulator_data rpm_pm8994_regulators[] = {
 static const struct rpm_regulator_data rpm_pmi8994_regulators[] = {
        { "s1", QCOM_SMD_RPM_SMPB, 1, &pmi8994_ftsmps, "vdd_s1" },
        { "s2", QCOM_SMD_RPM_SMPB, 2, &pmi8994_hfsmps, "vdd_s2" },
-       { "s2", QCOM_SMD_RPM_SMPB, 3, &pmi8994_hfsmps, "vdd_s3" },
+       { "s3", QCOM_SMD_RPM_SMPB, 3, &pmi8994_hfsmps, "vdd_s3" },
        { "boost-bypass", QCOM_SMD_RPM_BBYB, 1, &pmi8994_bby, "vdd_bst_byp" },
        {}
 };
index a646fc8..13b26a1 100644 (file)
@@ -8,6 +8,7 @@
  *            Eric Farman <farman@linux.ibm.com>
  */
 
+#include <linux/slab.h>
 #include <linux/vfio.h>
 #include "vfio_ccw_private.h"
 
index 18a0fb7..88e998d 100644 (file)
@@ -4544,9 +4544,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
        int fallback = *(int *)reply->param;
 
        QETH_CARD_TEXT(card, 4, "setaccb");
-       if (cmd->hdr.return_code)
-               return -EIO;
-       qeth_setadpparms_inspect_rc(cmd);
 
        access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
        QETH_CARD_TEXT_(card, 2, "rc=%d",
@@ -4556,7 +4553,7 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
                QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
                                 access_ctrl_req->subcmd_code, CARD_DEVID(card),
                                 cmd->data.setadapterparms.hdr.return_code);
-       switch (cmd->data.setadapterparms.hdr.return_code) {
+       switch (qeth_setadpparms_inspect_rc(cmd)) {
        case SET_ACCESS_CTRL_RC_SUCCESS:
                if (card->options.isolation == ISOLATION_MODE_NONE) {
                        dev_info(&card->gdev->dev,
@@ -6840,9 +6837,11 @@ netdev_features_t qeth_features_check(struct sk_buff *skb,
                                      struct net_device *dev,
                                      netdev_features_t features)
 {
+       struct qeth_card *card = dev->ml_priv;
+
        /* Traffic with local next-hop is not eligible for some offloads: */
-       if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               struct qeth_card *card = dev->ml_priv;
+       if (skb->ip_summed == CHECKSUM_PARTIAL &&
+           card->options.isolation != ISOLATION_MODE_FWD) {
                netdev_features_t restricted = 0;
 
                if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
index db320da..79f6e8f 100644 (file)
@@ -577,7 +577,10 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
                                   ZFCP_STATUS_ERP_TIMEDOUT)) {
                        req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
                        zfcp_dbf_rec_run("erscf_1", act);
-                       req->erp_action = NULL;
+                       /* lock-free concurrent access with
+                        * zfcp_erp_timeout_handler()
+                        */
+                       WRITE_ONCE(req->erp_action, NULL);
                }
                if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
                        zfcp_dbf_rec_run("erscf_2", act);
@@ -613,8 +616,14 @@ void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask)
 void zfcp_erp_timeout_handler(struct timer_list *t)
 {
        struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
-       struct zfcp_erp_action *act = fsf_req->erp_action;
+       struct zfcp_erp_action *act;
 
+       if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
+               return;
+       /* lock-free concurrent access with zfcp_erp_strategy_check_fsfreq() */
+       act = READ_ONCE(fsf_req->erp_action);
+       if (!act)
+               return;
        zfcp_erp_notify(act, ZFCP_STATUS_ERP_TIMEDOUT);
 }
 
index 773c45a..278d15f 100644 (file)
@@ -133,8 +133,10 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
        lockdep_assert_held(&lport->disc.disc_mutex);
 
        rdata = fc_rport_lookup(lport, port_id);
-       if (rdata)
+       if (rdata) {
+               kref_put(&rdata->kref, fc_rport_destroy);
                return rdata;
+       }
 
        if (lport->rport_priv_size > 0)
                rport_priv_size = lport->rport_priv_size;
@@ -481,10 +483,11 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
 
        fc_rport_state_enter(rdata, RPORT_ST_DELETE);
 
-       kref_get(&rdata->kref);
-       if (rdata->event == RPORT_EV_NONE &&
-           !queue_work(rport_event_queue, &rdata->event_work))
-               kref_put(&rdata->kref, fc_rport_destroy);
+       if (rdata->event == RPORT_EV_NONE) {
+               kref_get(&rdata->kref);
+               if (!queue_work(rport_event_queue, &rdata->event_work))
+                       kref_put(&rdata->kref, fc_rport_destroy);
+       }
 
        rdata->event = event;
 }
index e5a64d4..49c8a18 100644 (file)
@@ -2629,7 +2629,7 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
                        "iscsi_q_%d", shost->host_no);
                ihost->workq = alloc_workqueue("%s",
                        WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
-                       2, ihost->workq_name);
+                       1, ihost->workq_name);
                if (!ihost->workq)
                        goto free_host;
        }
index 69a5249..6637f84 100644 (file)
@@ -11878,7 +11878,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
        lpfc_sli4_xri_exchange_busy_wait(phba);
 
        /* per-phba callback de-registration for hotplug event */
-       lpfc_cpuhp_remove(phba);
+       if (phba->pport)
+               lpfc_cpuhp_remove(phba);
 
        /* Disable PCI subsystem interrupt */
        lpfc_sli4_disable_intr(phba);
index 319f241..fcf03f7 100644 (file)
@@ -3739,10 +3739,8 @@ static irqreturn_t megasas_isr_fusion(int irq, void *devp)
        if (instance->mask_interrupts)
                return IRQ_NONE;
 
-#if defined(ENABLE_IRQ_POLL)
        if (irq_context->irq_poll_scheduled)
                return IRQ_HANDLED;
-#endif
 
        if (!instance->msix_vectors) {
                mfiStatus = instance->instancet->clear_intr(instance);
index 62e5528..983e568 100644 (file)
@@ -3145,19 +3145,18 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr,
        if (!ioc->is_warpdrive) {
                ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n",
                        __func__);
-               goto out;
+               return 0;
        }
        /* pci_access_mutex lock acquired by sysfs show path */
        mutex_lock(&ioc->pci_access_mutex);
-       if (ioc->pci_error_recovery || ioc->remove_host) {
-               mutex_unlock(&ioc->pci_access_mutex);
-               return 0;
-       }
+       if (ioc->pci_error_recovery || ioc->remove_host)
+               goto out;
 
        /* allocate upto GPIOVal 36 entries */
        sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
        io_unit_pg3 = kzalloc(sz, GFP_KERNEL);
        if (!io_unit_pg3) {
+               rc = -ENOMEM;
                ioc_err(ioc, "%s: failed allocating memory for iounit_pg3: (%d) bytes\n",
                        __func__, sz);
                goto out;
@@ -3167,6 +3166,7 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr,
            0) {
                ioc_err(ioc, "%s: failed reading iounit_pg3\n",
                        __func__);
+               rc = -EINVAL;
                goto out;
        }
 
@@ -3174,12 +3174,14 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr,
        if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
                ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n",
                        __func__, ioc_status);
+               rc = -EINVAL;
                goto out;
        }
 
        if (io_unit_pg3->GPIOCount < 25) {
                ioc_err(ioc, "%s: iounit_pg3->GPIOCount less than 25 entries, detected (%d) entries\n",
                        __func__, io_unit_pg3->GPIOCount);
+               rc = -EINVAL;
                goto out;
        }
 
index 42c3ad2..df670fb 100644 (file)
@@ -3496,7 +3496,9 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
                                qla2x00_clear_loop_id(fcport);
                                fcport->flags |= FCF_FABRIC_DEVICE;
                        } else if (fcport->d_id.b24 != rp->id.b24 ||
-                               fcport->scan_needed) {
+                                  (fcport->scan_needed &&
+                                   fcport->port_type != FCT_INITIATOR &&
+                                   fcport->port_type != FCT_NVME_INITIATOR)) {
                                qlt_schedule_sess_for_deletion(fcport);
                        }
                        fcport->d_id.b24 = rp->id.b24;
index 4576d3a..2436a17 100644 (file)
@@ -5944,7 +5944,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
                        break;
                }
 
-               if (NVME_TARGET(vha->hw, fcport)) {
+               if (found && NVME_TARGET(vha->hw, fcport)) {
                        if (fcport->disc_state == DSC_DELETE_PEND) {
                                qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
                                vha->fcport_count--;
index d66d47a..fa695a4 100644 (file)
@@ -139,11 +139,12 @@ static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
        sp->priv = NULL;
        if (priv->comp_status == QLA_SUCCESS) {
                fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
+               fd->status = NVME_SC_SUCCESS;
        } else {
                fd->rcv_rsplen = 0;
                fd->transferred_length = 0;
+               fd->status = NVME_SC_INTERNAL;
        }
-       fd->status = 0;
        spin_unlock_irqrestore(&priv->cmd_lock, flags);
 
        fd->done(fd);
index eed3102..ba84244 100644 (file)
@@ -239,6 +239,7 @@ static struct {
        {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+       {"FUJITSU", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"SanDisk", "Cruzer Blade", NULL, BLIST_TRY_VPD_PAGES |
                BLIST_INQUIRY_36},
        {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
index 42f0550..6f41e4b 100644 (file)
@@ -63,6 +63,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
        {"LSI", "INF-01-00",            "rdac", },
        {"ENGENIO", "INF-01-00",        "rdac", },
        {"LENOVO", "DE_Series",         "rdac", },
+       {"FUJITSU", "ETERNUS_AHB",      "rdac", },
        {NULL, NULL,                    NULL },
 };
 
index f4cc08e..7ae5024 100644 (file)
@@ -4760,7 +4760,7 @@ static __init int iscsi_transport_init(void)
 
        iscsi_eh_timer_workq = alloc_workqueue("%s",
                        WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
-                       2, "iscsi_eh");
+                       1, "iscsi_eh");
        if (!iscsi_eh_timer_workq) {
                err = -ENOMEM;
                goto release_nls;
index f866106..f3d5b1b 100644 (file)
@@ -339,7 +339,7 @@ store_spi_transport_##field(struct device *dev,                     \
        struct spi_transport_attrs *tp                                  \
                = (struct spi_transport_attrs *)&starget->starget_data; \
                                                                        \
-       if (i->f->set_##field)                                          \
+       if (!i->f->set_##field)                                         \
                return -EINVAL;                                         \
        val = simple_strtoul(buf, NULL, 0);                             \
        if (val > tp->max_##field)                                      \
index 01fc0d2..6f54bd8 100644 (file)
@@ -66,10 +66,12 @@ static const struct meson_gx_package_id {
        { "A113D", 0x25, 0x22, 0xff },
        { "S905D2", 0x28, 0x10, 0xf0 },
        { "S905X2", 0x28, 0x40, 0xf0 },
-       { "S922X", 0x29, 0x40, 0xf0 },
        { "A311D", 0x29, 0x10, 0xf0 },
-       { "S905X3", 0x2b, 0x5, 0xf },
-       { "S905D3", 0x2b, 0xb0, 0xf0 },
+       { "S922X", 0x29, 0x40, 0xf0 },
+       { "S905D3", 0x2b, 0x4, 0xf5 },
+       { "S905X3", 0x2b, 0x5, 0xf5 },
+       { "S905X3", 0x2b, 0x10, 0x3f },
+       { "S905D3", 0x2b, 0x30, 0x3f },
        { "A113L", 0x2c, 0x0, 0xf8 },
 };
 
index fec3d67..01bfea1 100644 (file)
@@ -33,6 +33,9 @@ static int __init imx_soc_device_init(void)
        u32 val;
        int ret;
 
+       if (of_machine_is_compatible("fsl,ls1021a"))
+               return 0;
+
        soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
        if (!soc_dev_attr)
                return -ENOMEM;
index 7b0759a..cc57a38 100644 (file)
@@ -22,6 +22,8 @@
 #define OCOTP_UID_LOW                  0x410
 #define OCOTP_UID_HIGH                 0x420
 
+#define IMX8MP_OCOTP_UID_OFFSET                0x10
+
 /* Same as ANADIG_DIGPROG_IMX7D */
 #define ANADIG_DIGPROG_IMX8MM  0x800
 
@@ -87,6 +89,8 @@ static void __init imx8mm_soc_uid(void)
 {
        void __iomem *ocotp_base;
        struct device_node *np;
+       u32 offset = of_machine_is_compatible("fsl,imx8mp") ?
+                    IMX8MP_OCOTP_UID_OFFSET : 0;
 
        np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp");
        if (!np)
@@ -95,9 +99,9 @@ static void __init imx8mm_soc_uid(void)
        ocotp_base = of_iomap(np, 0);
        WARN_ON(!ocotp_base);
 
-       soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
+       soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset);
        soc_uid <<= 32;
-       soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
+       soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset);
 
        iounmap(ocotp_base);
        of_node_put(np);
@@ -146,7 +150,7 @@ static const struct imx8_soc_data imx8mp_soc_data = {
        .soc_revision = imx8mm_soc_revision,
 };
 
-static const struct of_device_id imx8_soc_match[] = {
+static __maybe_unused const struct of_device_id imx8_soc_match[] = {
        { .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, },
        { .compatible = "fsl,imx8mm", .data = &imx8mm_soc_data, },
        { .compatible = "fsl,imx8mn", .data = &imx8mn_soc_data, },
index 96c6f77..c9b3f9e 100644 (file)
@@ -256,10 +256,10 @@ static int omap_reset_deassert(struct reset_controller_dev *rcdev,
                goto exit;
 
        /* wait for the status to be set */
-       ret = readl_relaxed_poll_timeout(reset->prm->base +
-                                        reset->prm->data->rstst,
-                                        v, v & BIT(st_bit), 1,
-                                        OMAP_RESET_MAX_WAIT);
+       ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +
+                                                reset->prm->data->rstst,
+                                                v, v & BIT(st_bit), 1,
+                                                OMAP_RESET_MAX_WAIT);
        if (ret)
                pr_err("%s: timedout waiting for %s:%lu\n", __func__,
                       reset->prm->data->name, id);
index 4cfdd07..c742274 100644 (file)
@@ -930,8 +930,9 @@ static int intel_create_dai(struct sdw_cdns *cdns,
 
         /* TODO: Read supported rates/formats from hardware */
        for (i = off; i < (off + num); i++) {
-               dais[i].name = kasprintf(GFP_KERNEL, "SDW%d Pin%d",
-                                        cdns->instance, i);
+               dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL,
+                                             "SDW%d Pin%d",
+                                             cdns->instance, i);
                if (!dais[i].name)
                        return -ENOMEM;
 
index a35face..91c6aff 100644 (file)
@@ -588,14 +588,14 @@ static void dspi_release_dma(struct fsl_dspi *dspi)
                return;
 
        if (dma->chan_tx) {
-               dma_unmap_single(dma->chan_tx->device->dev, dma->tx_dma_phys,
-                                dma_bufsize, DMA_TO_DEVICE);
+               dma_free_coherent(dma->chan_tx->device->dev, dma_bufsize,
+                                 dma->tx_dma_buf, dma->tx_dma_phys);
                dma_release_channel(dma->chan_tx);
        }
 
        if (dma->chan_rx) {
-               dma_unmap_single(dma->chan_rx->device->dev, dma->rx_dma_phys,
-                                dma_bufsize, DMA_FROM_DEVICE);
+               dma_free_coherent(dma->chan_rx->device->dev, dma_bufsize,
+                                 dma->rx_dma_buf, dma->rx_dma_phys);
                dma_release_channel(dma->chan_rx);
        }
 }
@@ -1109,6 +1109,8 @@ static int dspi_suspend(struct device *dev)
        struct spi_controller *ctlr = dev_get_drvdata(dev);
        struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
 
+       if (dspi->irq)
+               disable_irq(dspi->irq);
        spi_controller_suspend(ctlr);
        clk_disable_unprepare(dspi->clk);
 
@@ -1129,6 +1131,8 @@ static int dspi_resume(struct device *dev)
        if (ret)
                return ret;
        spi_controller_resume(ctlr);
+       if (dspi->irq)
+               enable_irq(dspi->irq);
 
        return 0;
 }
@@ -1385,22 +1389,22 @@ static int dspi_probe(struct platform_device *pdev)
                goto poll_mode;
        }
 
-       ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt,
-                              IRQF_SHARED, pdev->name, dspi);
+       init_completion(&dspi->xfer_done);
+
+       ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL,
+                                  IRQF_SHARED, pdev->name, dspi);
        if (ret < 0) {
                dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
                goto out_clk_put;
        }
 
-       init_completion(&dspi->xfer_done);
-
 poll_mode:
 
        if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
                ret = dspi_request_dma(dspi, res->start);
                if (ret < 0) {
                        dev_err(&pdev->dev, "can't get dma channels\n");
-                       goto out_clk_put;
+                       goto out_free_irq;
                }
        }
 
@@ -1415,11 +1419,14 @@ poll_mode:
        ret = spi_register_controller(ctlr);
        if (ret != 0) {
                dev_err(&pdev->dev, "Problem registering DSPI ctlr\n");
-               goto out_clk_put;
+               goto out_free_irq;
        }
 
        return ret;
 
+out_free_irq:
+       if (dspi->irq)
+               free_irq(dspi->irq, dspi);
 out_clk_put:
        clk_disable_unprepare(dspi->clk);
 out_ctlr_put:
@@ -1434,18 +1441,8 @@ static int dspi_remove(struct platform_device *pdev)
        struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
 
        /* Disconnect from the SPI framework */
-       dspi_release_dma(dspi);
-       clk_disable_unprepare(dspi->clk);
        spi_unregister_controller(dspi->ctlr);
 
-       return 0;
-}
-
-static void dspi_shutdown(struct platform_device *pdev)
-{
-       struct spi_controller *ctlr = platform_get_drvdata(pdev);
-       struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
-
        /* Disable RX and TX */
        regmap_update_bits(dspi->regmap, SPI_MCR,
                           SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF,
@@ -1455,8 +1452,16 @@ static void dspi_shutdown(struct platform_device *pdev)
        regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT);
 
        dspi_release_dma(dspi);
+       if (dspi->irq)
+               free_irq(dspi->irq, dspi);
        clk_disable_unprepare(dspi->clk);
-       spi_unregister_controller(dspi->ctlr);
+
+       return 0;
+}
+
+static void dspi_shutdown(struct platform_device *pdev)
+{
+       dspi_remove(pdev);
 }
 
 static struct platform_driver fsl_dspi_driver = {
index 6783e12..a556795 100644 (file)
@@ -36,7 +36,6 @@
 #define SPI_CFG0_SCK_LOW_OFFSET           8
 #define SPI_CFG0_CS_HOLD_OFFSET           16
 #define SPI_CFG0_CS_SETUP_OFFSET          24
-#define SPI_ADJUST_CFG0_SCK_LOW_OFFSET    16
 #define SPI_ADJUST_CFG0_CS_HOLD_OFFSET    0
 #define SPI_ADJUST_CFG0_CS_SETUP_OFFSET   16
 
@@ -48,6 +47,8 @@
 #define SPI_CFG1_CS_IDLE_MASK             0xff
 #define SPI_CFG1_PACKET_LOOP_MASK         0xff00
 #define SPI_CFG1_PACKET_LENGTH_MASK       0x3ff0000
+#define SPI_CFG2_SCK_HIGH_OFFSET          0
+#define SPI_CFG2_SCK_LOW_OFFSET           16
 
 #define SPI_CMD_ACT                  BIT(0)
 #define SPI_CMD_RESUME               BIT(1)
@@ -283,7 +284,7 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
 static void mtk_spi_prepare_transfer(struct spi_master *master,
                                     struct spi_transfer *xfer)
 {
-       u32 spi_clk_hz, div, sck_time, cs_time, reg_val = 0;
+       u32 spi_clk_hz, div, sck_time, cs_time, reg_val;
        struct mtk_spi *mdata = spi_master_get_devdata(master);
 
        spi_clk_hz = clk_get_rate(mdata->spi_clk);
@@ -296,18 +297,18 @@ static void mtk_spi_prepare_transfer(struct spi_master *master,
        cs_time = sck_time * 2;
 
        if (mdata->dev_comp->enhance_timing) {
+               reg_val = (((sck_time - 1) & 0xffff)
+                          << SPI_CFG2_SCK_HIGH_OFFSET);
                reg_val |= (((sck_time - 1) & 0xffff)
-                          << SPI_CFG0_SCK_HIGH_OFFSET);
-               reg_val |= (((sck_time - 1) & 0xffff)
-                          << SPI_ADJUST_CFG0_SCK_LOW_OFFSET);
+                          << SPI_CFG2_SCK_LOW_OFFSET);
                writel(reg_val, mdata->base + SPI_CFG2_REG);
-               reg_val |= (((cs_time - 1) & 0xffff)
+               reg_val = (((cs_time - 1) & 0xffff)
                           << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
                reg_val |= (((cs_time - 1) & 0xffff)
                           << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
                writel(reg_val, mdata->base + SPI_CFG0_REG);
        } else {
-               reg_val |= (((sck_time - 1) & 0xff)
+               reg_val = (((sck_time - 1) & 0xff)
                           << SPI_CFG0_SCK_HIGH_OFFSET);
                reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
                reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
index 6721910..0040362 100644 (file)
@@ -1485,6 +1485,11 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
        { PCI_VDEVICE(INTEL, 0x4daa), LPSS_CNL_SSP },
        { PCI_VDEVICE(INTEL, 0x4dab), LPSS_CNL_SSP },
        { PCI_VDEVICE(INTEL, 0x4dfb), LPSS_CNL_SSP },
+       /* TGL-H */
+       { PCI_VDEVICE(INTEL, 0x43aa), LPSS_CNL_SSP },
+       { PCI_VDEVICE(INTEL, 0x43ab), LPSS_CNL_SSP },
+       { PCI_VDEVICE(INTEL, 0x43fb), LPSS_CNL_SSP },
+       { PCI_VDEVICE(INTEL, 0x43fd), LPSS_CNL_SSP },
        /* APL */
        { PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
        { PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
index 06192c9..cbc2387 100644 (file)
 
 struct rspi_data {
        void __iomem *addr;
-       u32 max_speed_hz;
+       u32 speed_hz;
        struct spi_controller *ctlr;
        struct platform_device *pdev;
        wait_queue_head_t wait;
@@ -258,8 +258,7 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
        rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
 
        /* Sets transfer bit rate */
-       spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk),
-                           2 * rspi->max_speed_hz) - 1;
+       spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->speed_hz) - 1;
        rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
 
        /* Disable dummy transmission, set 16-bit word access, 1 frame */
@@ -299,14 +298,14 @@ static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
 
        clksrc = clk_get_rate(rspi->clk);
        while (div < 3) {
-               if (rspi->max_speed_hz >= clksrc/4) /* 4=(CLK/2)/2 */
+               if (rspi->speed_hz >= clksrc/4) /* 4=(CLK/2)/2 */
                        break;
                div++;
                clksrc /= 2;
        }
 
        /* Sets transfer bit rate */
-       spbr = DIV_ROUND_UP(clksrc, 2 * rspi->max_speed_hz) - 1;
+       spbr = DIV_ROUND_UP(clksrc, 2 * rspi->speed_hz) - 1;
        rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
        rspi->spcmd |= div << 2;
 
@@ -341,7 +340,7 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
        rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
 
        /* Sets transfer bit rate */
-       spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->max_speed_hz);
+       spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->speed_hz);
        rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
 
        /* Disable dummy transmission, set byte access */
@@ -949,9 +948,24 @@ static int rspi_prepare_message(struct spi_controller *ctlr,
 {
        struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
        struct spi_device *spi = msg->spi;
+       const struct spi_transfer *xfer;
        int ret;
 
-       rspi->max_speed_hz = spi->max_speed_hz;
+       /*
+        * As the Bit Rate Register must not be changed while the device is
+        * active, all transfers in a message must use the same bit rate.
+        * In theory, the sequencer could be enabled, and each Command Register
+        * could divide the base bit rate by a different value.
+        * However, most RSPI variants do not have Transfer Data Length
+        * Multiplier Setting Registers, so each sequence step would be limited
+        * to a single word, making this feature unsuitable for large
+        * transfers, which would gain most from it.
+        */
+       rspi->speed_hz = spi->max_speed_hz;
+       list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+               if (xfer->speed_hz < rspi->speed_hz)
+                       rspi->speed_hz = xfer->speed_hz;
+       }
 
        rspi->spcmd = SPCMD_SSLKP;
        if (spi->mode & SPI_CPOL)
index 88e6543..bd23c46 100644 (file)
@@ -389,9 +389,9 @@ static int sprd_adi_restart_handler(struct notifier_block *this,
        sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_CTRL, val);
 
        /* Load the watchdog timeout value, 50ms is always enough. */
+       sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0);
        sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_LOW,
                       WDG_LOAD_VAL & WDG_LOAD_MASK);
-       sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0);
 
        /* Start the watchdog to reset system */
        sprd_adi_read(sadi, sadi->slave_pbase + REG_WDG_CTRL, &val);
index 3c44bb2..a900962 100644 (file)
@@ -553,20 +553,6 @@ static const struct spi_controller_mem_ops stm32_qspi_mem_ops = {
        .exec_op = stm32_qspi_exec_op,
 };
 
-static void stm32_qspi_release(struct stm32_qspi *qspi)
-{
-       pm_runtime_get_sync(qspi->dev);
-       /* disable qspi */
-       writel_relaxed(0, qspi->io_base + QSPI_CR);
-       stm32_qspi_dma_free(qspi);
-       mutex_destroy(&qspi->lock);
-       pm_runtime_put_noidle(qspi->dev);
-       pm_runtime_disable(qspi->dev);
-       pm_runtime_set_suspended(qspi->dev);
-       pm_runtime_dont_use_autosuspend(qspi->dev);
-       clk_disable_unprepare(qspi->clk);
-}
-
 static int stm32_qspi_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -642,7 +628,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
        if (IS_ERR(rstc)) {
                ret = PTR_ERR(rstc);
                if (ret == -EPROBE_DEFER)
-                       goto err_qspi_release;
+                       goto err_clk_disable;
        } else {
                reset_control_assert(rstc);
                udelay(2);
@@ -653,7 +639,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, qspi);
        ret = stm32_qspi_dma_setup(qspi);
        if (ret)
-               goto err_qspi_release;
+               goto err_dma_free;
 
        mutex_init(&qspi->lock);
 
@@ -673,15 +659,26 @@ static int stm32_qspi_probe(struct platform_device *pdev)
 
        ret = devm_spi_register_master(dev, ctrl);
        if (ret)
-               goto err_qspi_release;
+               goto err_pm_runtime_free;
 
        pm_runtime_mark_last_busy(dev);
        pm_runtime_put_autosuspend(dev);
 
        return 0;
 
-err_qspi_release:
-       stm32_qspi_release(qspi);
+err_pm_runtime_free:
+       pm_runtime_get_sync(qspi->dev);
+       /* disable qspi */
+       writel_relaxed(0, qspi->io_base + QSPI_CR);
+       mutex_destroy(&qspi->lock);
+       pm_runtime_put_noidle(qspi->dev);
+       pm_runtime_disable(qspi->dev);
+       pm_runtime_set_suspended(qspi->dev);
+       pm_runtime_dont_use_autosuspend(qspi->dev);
+err_dma_free:
+       stm32_qspi_dma_free(qspi);
+err_clk_disable:
+       clk_disable_unprepare(qspi->clk);
 err_master_put:
        spi_master_put(qspi->ctrl);
 
@@ -692,7 +689,16 @@ static int stm32_qspi_remove(struct platform_device *pdev)
 {
        struct stm32_qspi *qspi = platform_get_drvdata(pdev);
 
-       stm32_qspi_release(qspi);
+       pm_runtime_get_sync(qspi->dev);
+       /* disable qspi */
+       writel_relaxed(0, qspi->io_base + QSPI_CR);
+       stm32_qspi_dma_free(qspi);
+       mutex_destroy(&qspi->lock);
+       pm_runtime_put_noidle(qspi->dev);
+       pm_runtime_disable(qspi->dev);
+       pm_runtime_set_suspended(qspi->dev);
+       pm_runtime_dont_use_autosuspend(qspi->dev);
+       clk_disable_unprepare(qspi->clk);
 
        return 0;
 }
index ecea155..fa11cc0 100644 (file)
@@ -198,7 +198,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
                                  struct spi_transfer *tfr)
 {
        struct sun6i_spi *sspi = spi_master_get_devdata(master);
-       unsigned int mclk_rate, div, timeout;
+       unsigned int mclk_rate, div, div_cdr1, div_cdr2, timeout;
        unsigned int start, end, tx_time;
        unsigned int trig_level;
        unsigned int tx_len = 0;
@@ -287,14 +287,12 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
         * First try CDR2, and if we can't reach the expected
         * frequency, fall back to CDR1.
         */
-       div = mclk_rate / (2 * tfr->speed_hz);
-       if (div <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) {
-               if (div > 0)
-                       div--;
-
-               reg = SUN6I_CLK_CTL_CDR2(div) | SUN6I_CLK_CTL_DRS;
+       div_cdr1 = DIV_ROUND_UP(mclk_rate, tfr->speed_hz);
+       div_cdr2 = DIV_ROUND_UP(div_cdr1, 2);
+       if (div_cdr2 <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) {
+               reg = SUN6I_CLK_CTL_CDR2(div_cdr2 - 1) | SUN6I_CLK_CTL_DRS;
        } else {
-               div = ilog2(mclk_rate) - ilog2(tfr->speed_hz);
+               div = min(SUN6I_CLK_CTL_CDR1_MASK, order_base_2(div_cdr1));
                reg = SUN6I_CLK_CTL_CDR1(div);
        }
 
index d753df7..59e0767 100644 (file)
@@ -609,15 +609,20 @@ err_find_dev:
 static int spidev_release(struct inode *inode, struct file *filp)
 {
        struct spidev_data      *spidev;
+       int                     dofree;
 
        mutex_lock(&device_list_lock);
        spidev = filp->private_data;
        filp->private_data = NULL;
 
+       spin_lock_irq(&spidev->spi_lock);
+       /* ... after we unbound from the underlying device? */
+       dofree = (spidev->spi == NULL);
+       spin_unlock_irq(&spidev->spi_lock);
+
        /* last close? */
        spidev->users--;
        if (!spidev->users) {
-               int             dofree;
 
                kfree(spidev->tx_buffer);
                spidev->tx_buffer = NULL;
@@ -625,19 +630,14 @@ static int spidev_release(struct inode *inode, struct file *filp)
                kfree(spidev->rx_buffer);
                spidev->rx_buffer = NULL;
 
-               spin_lock_irq(&spidev->spi_lock);
-               if (spidev->spi)
-                       spidev->speed_hz = spidev->spi->max_speed_hz;
-
-               /* ... after we unbound from the underlying device? */
-               dofree = (spidev->spi == NULL);
-               spin_unlock_irq(&spidev->spi_lock);
-
                if (dofree)
                        kfree(spidev);
+               else
+                       spidev->speed_hz = spidev->spi->max_speed_hz;
        }
 #ifdef CONFIG_SPI_SLAVE
-       spi_slave_abort(spidev->spi);
+       if (!dofree)
+               spi_slave_abort(spidev->spi);
 #endif
        mutex_unlock(&device_list_lock);
 
@@ -787,13 +787,13 @@ static int spidev_remove(struct spi_device *spi)
 {
        struct spidev_data      *spidev = spi_get_drvdata(spi);
 
+       /* prevent new opens */
+       mutex_lock(&device_list_lock);
        /* make sure ops on existing fds can abort cleanly */
        spin_lock_irq(&spidev->spi_lock);
        spidev->spi = NULL;
        spin_unlock_irq(&spidev->spi_lock);
 
-       /* prevent new opens */
-       mutex_lock(&device_list_lock);
        list_del(&spidev->device_entry);
        device_destroy(spidev_class, spidev->devt);
        clear_bit(MINOR(spidev->devt), minors);
index 45ad4ba..689acd6 100644 (file)
@@ -456,9 +456,9 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev,
        unsigned int lo_mask = data[5] << shift;
        unsigned int chan_mask = hi_mask | lo_mask;
        unsigned int old_mask = (1 << shift) - 1;
-       unsigned int pm = devpriv->pm[trig] & old_mask;
-       unsigned int pt = devpriv->pt[trig] & old_mask;
-       unsigned int pp = devpriv->pp[trig] & old_mask;
+       unsigned int pm;
+       unsigned int pt;
+       unsigned int pp;
 
        if (trig > 1) {
                dev_dbg(dev->class_dev,
@@ -471,6 +471,10 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev,
                return -EINVAL;
        }
 
+       pm = devpriv->pm[trig] & old_mask;
+       pt = devpriv->pt[trig] & old_mask;
+       pp = devpriv->pp[trig] & old_mask;
+
        switch (data[2]) {
        case COMEDI_DIGITAL_TRIG_DISABLE:
                /* clear trigger configuration */
index 69bcd17..a3ea7ce 100644 (file)
@@ -1824,12 +1824,14 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l
        pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
        if (!pIE)
                return _FAIL;
+       if (ie_len > sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates))
+               return _FAIL;
 
        memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len);
        supportRateNum = ie_len;
 
        pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _EXT_SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
-       if (pIE)
+       if (pIE && (ie_len <= sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates) - supportRateNum))
                memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len);
 
        return _SUCCESS;
index 893b67f..5110f9b 100644 (file)
@@ -240,7 +240,7 @@ int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
 }
 
 int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req,
-            int chan_start_idx, int chan_num)
+            int chan_start_idx, int chan_num, int *timeout)
 {
        int ret, i;
        struct hif_msg *hif;
@@ -289,11 +289,13 @@ int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req,
        tmo_chan_fg = 512 * USEC_PER_TU + body->probe_delay;
        tmo_chan_fg *= body->num_of_probe_requests;
        tmo = chan_num * max(tmo_chan_bg, tmo_chan_fg) + 512 * USEC_PER_TU;
+       if (timeout)
+               *timeout = usecs_to_jiffies(tmo);
 
        wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START_SCAN, buf_len);
        ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
        kfree(hif);
-       return ret ? ret : usecs_to_jiffies(tmo);
+       return ret;
 }
 
 int hif_stop_scan(struct wfx_vif *wvif)
index e9eca93..e1da28a 100644 (file)
@@ -42,7 +42,7 @@ int hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
 int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
                  void *buf, size_t buf_size);
 int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req80211,
-            int chan_start, int chan_num);
+            int chan_start, int chan_num, int *timeout);
 int hif_stop_scan(struct wfx_vif *wvif);
 int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
             struct ieee80211_channel *channel, const u8 *ssid, int ssidlen);
index 3248ece..93ea2b7 100644 (file)
@@ -246,7 +246,7 @@ static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev)
        for (i = 0; i < IEEE80211_NUM_ACS; i++) {
                sorted_queues[i] = &wdev->tx_queue[i];
                for (j = i; j > 0; j--)
-                       if (atomic_read(&sorted_queues[j]->pending_frames) >
+                       if (atomic_read(&sorted_queues[j]->pending_frames) <
                            atomic_read(&sorted_queues[j - 1]->pending_frames))
                                swap(sorted_queues[j - 1], sorted_queues[j]);
        }
@@ -291,15 +291,12 @@ struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
 
        if (atomic_read(&wdev->tx_lock))
                return NULL;
-
-       for (;;) {
-               skb = wfx_tx_queues_get_skb(wdev);
-               if (!skb)
-                       return NULL;
-               skb_queue_tail(&wdev->tx_pending, skb);
-               wake_up(&wdev->tx_dequeue);
-               tx_priv = wfx_skb_tx_priv(skb);
-               tx_priv->xmit_timestamp = ktime_get();
-               return (struct hif_msg *)skb->data;
-       }
+       skb = wfx_tx_queues_get_skb(wdev);
+       if (!skb)
+               return NULL;
+       skb_queue_tail(&wdev->tx_pending, skb);
+       wake_up(&wdev->tx_dequeue);
+       tx_priv = wfx_skb_tx_priv(skb);
+       tx_priv->xmit_timestamp = ktime_get();
+       return (struct hif_msg *)skb->data;
 }
index 57ea999..e9de197 100644 (file)
@@ -56,10 +56,10 @@ static int send_scan_req(struct wfx_vif *wvif,
        wfx_tx_lock_flush(wvif->wdev);
        wvif->scan_abort = false;
        reinit_completion(&wvif->scan_complete);
-       timeout = hif_scan(wvif, req, start_idx, i - start_idx);
-       if (timeout < 0) {
+       ret = hif_scan(wvif, req, start_idx, i - start_idx, &timeout);
+       if (ret) {
                wfx_tx_unlock(wvif->wdev);
-               return timeout;
+               return -EIO;
        }
        ret = wait_for_completion_timeout(&wvif->scan_complete, timeout);
        if (req->channels[start_idx]->max_power != wvif->vif->bss_conf.txpower)
index 9e12402..6c0e1b0 100644 (file)
@@ -123,12 +123,12 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
 {
        int i;
 
-       for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
-               if (power > cpufreq_cdev->em->table[i].power)
+       for (i = cpufreq_cdev->max_level; i >= 0; i--) {
+               if (power >= cpufreq_cdev->em->table[i].power)
                        break;
        }
 
-       return cpufreq_cdev->em->table[i + 1].frequency;
+       return cpufreq_cdev->em->table[i].frequency;
 }
 
 /**
index e761c9b..1b84ea6 100644 (file)
@@ -649,7 +649,7 @@ MODULE_DEVICE_TABLE(of, of_imx_thermal_match);
 static int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data)
 {
        struct device_node *np;
-       int ret;
+       int ret = 0;
 
        data->policy = cpufreq_cpu_get(0);
        if (!data->policy) {
@@ -664,11 +664,12 @@ static int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data)
                if (IS_ERR(data->cdev)) {
                        ret = PTR_ERR(data->cdev);
                        cpufreq_cpu_put(data->policy);
-                       return ret;
                }
        }
 
-       return 0;
+       of_node_put(np);
+
+       return ret;
 }
 
 static void imx_thermal_unregister_legacy_cooling(struct imx_thermal_data *data)
index 0b3a626..12448cc 100644 (file)
@@ -216,11 +216,16 @@ static int int3400_thermal_run_osc(acpi_handle handle,
        acpi_status status;
        int result = 0;
        struct acpi_osc_context context = {
-               .uuid_str = int3400_thermal_uuids[uuid],
+               .uuid_str = NULL,
                .rev = 1,
                .cap.length = 8,
        };
 
+       if (uuid < 0 || uuid >= INT3400_THERMAL_MAXIMUM_UUID)
+               return -EINVAL;
+
+       context.uuid_str = int3400_thermal_uuids[uuid];
+
        buf[OSC_QUERY_DWORD] = 0;
        buf[OSC_SUPPORT_DWORD] = enable;
 
index f86cbb1..ec1d58c 100644 (file)
@@ -74,7 +74,7 @@ static void int3403_notify(acpi_handle handle,
                                                   THERMAL_TRIP_CHANGED);
                break;
        default:
-               dev_err(&priv->pdev->dev, "Unsupported event [0x%x]\n", event);
+               dev_dbg(&priv->pdev->dev, "Unsupported event [0x%x]\n", event);
                break;
        }
 }
index 76e3060..42c9cd0 100644 (file)
@@ -211,6 +211,9 @@ enum {
 /* The total number of temperature sensors in the MT8183 */
 #define MT8183_NUM_SENSORS     6
 
+/* The number of banks in the MT8183 */
+#define MT8183_NUM_ZONES               1
+
 /* The number of sensing points per bank */
 #define MT8183_NUM_SENSORS_PER_ZONE     6
 
@@ -497,7 +500,7 @@ static const struct mtk_thermal_data mt7622_thermal_data = {
  */
 static const struct mtk_thermal_data mt8183_thermal_data = {
        .auxadc_channel = MT8183_TEMP_AUXADC_CHANNEL,
-       .num_banks = MT8183_NUM_SENSORS_PER_ZONE,
+       .num_banks = MT8183_NUM_ZONES,
        .num_sensors = MT8183_NUM_SENSORS,
        .vts_index = mt8183_vts_index,
        .cali_val = MT8183_CALIBRATION,
@@ -591,8 +594,7 @@ static int mtk_thermal_bank_temperature(struct mtk_thermal_bank *bank)
        u32 raw;
 
        for (i = 0; i < conf->bank_data[bank->id].num_sensors; i++) {
-               raw = readl(mt->thermal_base +
-                           conf->msr[conf->bank_data[bank->id].sensors[i]]);
+               raw = readl(mt->thermal_base + conf->msr[i]);
 
                temp = raw_to_mcelsius(mt,
                                       conf->bank_data[bank->id].sensors[i],
@@ -733,8 +735,7 @@ static void mtk_thermal_init_bank(struct mtk_thermal *mt, int num,
 
        for (i = 0; i < conf->bank_data[num].num_sensors; i++)
                writel(conf->sensor_mux_values[conf->bank_data[num].sensors[i]],
-                      mt->thermal_base +
-                      conf->adcpnp[conf->bank_data[num].sensors[i]]);
+                      mt->thermal_base + conf->adcpnp[i]);
 
        writel((1 << conf->bank_data[num].num_sensors) - 1,
               controller_base + TEMP_MONCTL0);
index 8d3e94d..39c4462 100644 (file)
@@ -382,7 +382,7 @@ static inline u32 masked_irq(u32 hw_id, u32 mask, enum tsens_ver ver)
  *
  * Return: IRQ_HANDLED
  */
-irqreturn_t tsens_critical_irq_thread(int irq, void *data)
+static irqreturn_t tsens_critical_irq_thread(int irq, void *data)
 {
        struct tsens_priv *priv = data;
        struct tsens_irq_data d;
@@ -452,7 +452,7 @@ irqreturn_t tsens_critical_irq_thread(int irq, void *data)
  *
  * Return: IRQ_HANDLED
  */
-irqreturn_t tsens_irq_thread(int irq, void *data)
+static irqreturn_t tsens_irq_thread(int irq, void *data)
 {
        struct tsens_priv *priv = data;
        struct tsens_irq_data d;
@@ -520,7 +520,7 @@ irqreturn_t tsens_irq_thread(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-int tsens_set_trips(void *_sensor, int low, int high)
+static int tsens_set_trips(void *_sensor, int low, int high)
 {
        struct tsens_sensor *s = _sensor;
        struct tsens_priv *priv = s->priv;
@@ -557,7 +557,7 @@ int tsens_set_trips(void *_sensor, int low, int high)
        return 0;
 }
 
-int tsens_enable_irq(struct tsens_priv *priv)
+static int tsens_enable_irq(struct tsens_priv *priv)
 {
        int ret;
        int val = tsens_version(priv) > VER_1_X ? 7 : 1;
@@ -570,7 +570,7 @@ int tsens_enable_irq(struct tsens_priv *priv)
        return ret;
 }
 
-void tsens_disable_irq(struct tsens_priv *priv)
+static void tsens_disable_irq(struct tsens_priv *priv)
 {
        regmap_field_write(priv->rf[INT_EN], 0);
 }
index 58fe7c1..c48c5e9 100644 (file)
@@ -167,7 +167,7 @@ static int rcar_gen3_thermal_get_temp(void *devdata, int *temp)
 {
        struct rcar_gen3_thermal_tsc *tsc = devdata;
        int mcelsius, val;
-       u32 reg;
+       int reg;
 
        /* Read register and convert to mili Celsius */
        reg = rcar_gen3_thermal_read(tsc, REG_GEN3_TEMP) & CTEMP_MASK;
index a340374..4cde70d 100644 (file)
@@ -348,8 +348,8 @@ static int sprd_thm_probe(struct platform_device *pdev)
 
        thm->var_data = pdata;
        thm->base = devm_platform_ioremap_resource(pdev, 0);
-       if (!thm->base)
-               return -ENOMEM;
+       if (IS_ERR(thm->base))
+               return PTR_ERR(thm->base);
 
        thm->nr_sensors = of_get_child_count(np);
        if (thm->nr_sensors == 0 || thm->nr_sensors > SPRD_THM_MAX_SENSOR) {
index dbe90bc..c144ca9 100644 (file)
@@ -913,21 +913,21 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
         * case.
         */
        path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
-                               &tunnel->dst_port, "USB3 Up");
+                               &tunnel->dst_port, "USB3 Down");
        if (!path) {
                /* Just disable the downstream port */
                tb_usb3_port_enable(down, false);
                goto err_free;
        }
-       tunnel->paths[TB_USB3_PATH_UP] = path;
-       tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
+       tunnel->paths[TB_USB3_PATH_DOWN] = path;
+       tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
 
        path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
-                               "USB3 Down");
+                               "USB3 Up");
        if (!path)
                goto err_deactivate;
-       tunnel->paths[TB_USB3_PATH_DOWN] = path;
-       tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
+       tunnel->paths[TB_USB3_PATH_UP] = path;
+       tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
 
        /* Validate that the tunnel is complete */
        if (!tb_port_is_usb3_up(tunnel->dst_port)) {
index a04f74d..4df47d0 100644 (file)
@@ -1215,7 +1215,12 @@ static int cpm_uart_init_port(struct device_node *np,
 
                pinfo->gpios[i] = NULL;
 
-               gpiod = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS);
+               gpiod = devm_gpiod_get_index_optional(dev, NULL, i, GPIOD_ASIS);
+
+               if (IS_ERR(gpiod)) {
+                       ret = PTR_ERR(gpiod);
+                       goto out_irq;
+               }
 
                if (gpiod) {
                        if (i == GPIO_RTS || i == GPIO_DTR)
@@ -1237,6 +1242,8 @@ static int cpm_uart_init_port(struct device_node *np,
 
        return cpm_uart_request_port(&pinfo->port);
 
+out_irq:
+       irq_dispose_mapping(pinfo->port.irq);
 out_pram:
        cpm_uart_unmap_pram(pinfo, pram);
 out_mem:
index 5022447..6004c0c 100644 (file)
@@ -50,7 +50,7 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
         * I/O utilities that messages sent to the console will automatically
         * be displayed on the dbg_io.
         */
-       dbg_io_ops->is_console = true;
+       dbg_io_ops->cons = co;
 
        return 0;
 }
index 4139698..84ffede 100644 (file)
@@ -45,7 +45,6 @@ static struct platform_device *kgdboc_pdev;
 
 #if IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE)
 static struct kgdb_io          kgdboc_earlycon_io_ops;
-static struct console          *earlycon;
 static int                      (*earlycon_orig_exit)(struct console *con);
 #endif /* IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */
 
@@ -145,7 +144,7 @@ static void kgdboc_unregister_kbd(void)
 #if IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE)
 static void cleanup_earlycon(void)
 {
-       if (earlycon)
+       if (kgdboc_earlycon_io_ops.cons)
                kgdb_unregister_io_module(&kgdboc_earlycon_io_ops);
 }
 #else /* !IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */
@@ -178,7 +177,7 @@ static int configure_kgdboc(void)
                goto noconfig;
        }
 
-       kgdboc_io_ops.is_console = 0;
+       kgdboc_io_ops.cons = NULL;
        kgdb_tty_driver = NULL;
 
        kgdboc_use_kms = 0;
@@ -198,7 +197,7 @@ static int configure_kgdboc(void)
                int idx;
                if (cons->device && cons->device(cons, &idx) == p &&
                    idx == tty_line) {
-                       kgdboc_io_ops.is_console = 1;
+                       kgdboc_io_ops.cons = cons;
                        break;
                }
        }
@@ -433,7 +432,8 @@ static int kgdboc_earlycon_get_char(void)
 {
        char c;
 
-       if (!earlycon->read(earlycon, &c, 1))
+       if (!kgdboc_earlycon_io_ops.cons->read(kgdboc_earlycon_io_ops.cons,
+                                              &c, 1))
                return NO_POLL_CHAR;
 
        return c;
@@ -441,7 +441,8 @@ static int kgdboc_earlycon_get_char(void)
 
 static void kgdboc_earlycon_put_char(u8 chr)
 {
-       earlycon->write(earlycon, &chr, 1);
+       kgdboc_earlycon_io_ops.cons->write(kgdboc_earlycon_io_ops.cons, &chr,
+                                          1);
 }
 
 static void kgdboc_earlycon_pre_exp_handler(void)
@@ -461,7 +462,7 @@ static void kgdboc_earlycon_pre_exp_handler(void)
         * boot if we detect this case.
         */
        for_each_console(con)
-               if (con == earlycon)
+               if (con == kgdboc_earlycon_io_ops.cons)
                        return;
 
        already_warned = true;
@@ -484,25 +485,25 @@ static int kgdboc_earlycon_deferred_exit(struct console *con)
 
 static void kgdboc_earlycon_deinit(void)
 {
-       if (!earlycon)
+       if (!kgdboc_earlycon_io_ops.cons)
                return;
 
-       if (earlycon->exit == kgdboc_earlycon_deferred_exit)
+       if (kgdboc_earlycon_io_ops.cons->exit == kgdboc_earlycon_deferred_exit)
                /*
                 * kgdboc_earlycon is exiting but original boot console exit
                 * was never called (AKA kgdboc_earlycon_deferred_exit()
                 * didn't ever run).  Undo our trap.
                 */
-               earlycon->exit = earlycon_orig_exit;
-       else if (earlycon->exit)
+               kgdboc_earlycon_io_ops.cons->exit = earlycon_orig_exit;
+       else if (kgdboc_earlycon_io_ops.cons->exit)
                /*
                 * We skipped calling the exit() routine so we could try to
                 * keep using the boot console even after it went away.  We're
                 * finally done so call the function now.
                 */
-               earlycon->exit(earlycon);
+               kgdboc_earlycon_io_ops.cons->exit(kgdboc_earlycon_io_ops.cons);
 
-       earlycon = NULL;
+       kgdboc_earlycon_io_ops.cons = NULL;
 }
 
 static struct kgdb_io kgdboc_earlycon_io_ops = {
@@ -511,7 +512,6 @@ static struct kgdb_io kgdboc_earlycon_io_ops = {
        .write_char             = kgdboc_earlycon_put_char,
        .pre_exception          = kgdboc_earlycon_pre_exp_handler,
        .deinit                 = kgdboc_earlycon_deinit,
-       .is_console             = true,
 };
 
 #define MAX_CONSOLE_NAME_LEN (sizeof((struct console *) 0)->name)
@@ -557,10 +557,10 @@ static int __init kgdboc_earlycon_init(char *opt)
                goto unlock;
        }
 
-       earlycon = con;
+       kgdboc_earlycon_io_ops.cons = con;
        pr_info("Going to register kgdb with earlycon '%s'\n", con->name);
        if (kgdb_register_io_module(&kgdboc_earlycon_io_ops) != 0) {
-               earlycon = NULL;
+               kgdboc_earlycon_io_ops.cons = NULL;
                pr_info("Failed to register kgdb with earlycon\n");
        } else {
                /* Trap exit so we can keep earlycon longer if needed. */
index b4f835e..b784323 100644 (file)
@@ -1698,21 +1698,21 @@ static int mxs_auart_probe(struct platform_device *pdev)
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
                ret = irq;
-               goto out_disable_clks;
+               goto out_iounmap;
        }
 
        s->port.irq = irq;
        ret = devm_request_irq(&pdev->dev, irq, mxs_auart_irq_handle, 0,
                               dev_name(&pdev->dev), s);
        if (ret)
-               goto out_disable_clks;
+               goto out_iounmap;
 
        platform_set_drvdata(pdev, s);
 
        ret = mxs_auart_init_gpios(s, &pdev->dev);
        if (ret) {
                dev_err(&pdev->dev, "Failed to initialize GPIOs.\n");
-               goto out_disable_clks;
+               goto out_iounmap;
        }
 
        /*
@@ -1720,7 +1720,7 @@ static int mxs_auart_probe(struct platform_device *pdev)
         */
        ret = mxs_auart_request_gpio_irq(s);
        if (ret)
-               goto out_disable_clks;
+               goto out_iounmap;
 
        auart_port[s->port.line] = s;
 
@@ -1746,6 +1746,9 @@ out_free_qpio_irq:
        mxs_auart_free_gpio_irq(s);
        auart_port[pdev->id] = NULL;
 
+out_iounmap:
+       iounmap(s->port.membase);
+
 out_disable_clks:
        if (is_asm9260_auart(s)) {
                clk_disable_unprepare(s->clk);
@@ -1761,6 +1764,7 @@ static int mxs_auart_remove(struct platform_device *pdev)
        uart_remove_one_port(&auart_driver, &s->port);
        auart_port[pdev->id] = NULL;
        mxs_auart_free_gpio_irq(s);
+       iounmap(s->port.membase);
        if (is_asm9260_auart(s)) {
                clk_disable_unprepare(s->clk);
                clk_disable_unprepare(s->clk_ahb);
index 57840cf..5f3daab 100644 (file)
@@ -41,8 +41,6 @@ static struct lock_class_key port_lock_key;
 
 #define HIGH_BITS_OFFSET       ((sizeof(long)-sizeof(int))*8)
 
-#define SYSRQ_TIMEOUT  (HZ * 5)
-
 static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
                                        struct ktermios *old_termios);
 static void uart_wait_until_sent(struct tty_struct *tty, int timeout);
@@ -1916,6 +1914,12 @@ static inline bool uart_console_enabled(struct uart_port *port)
        return uart_console(port) && (port->cons->flags & CON_ENABLED);
 }
 
+static void __uart_port_spin_lock_init(struct uart_port *port)
+{
+       spin_lock_init(&port->lock);
+       lockdep_set_class(&port->lock, &port_lock_key);
+}
+
 /*
  * Ensure that the serial console lock is initialised early.
  * If this port is a console, then the spinlock is already initialised.
@@ -1925,8 +1929,7 @@ static inline void uart_port_spin_lock_init(struct uart_port *port)
        if (uart_console(port))
                return;
 
-       spin_lock_init(&port->lock);
-       lockdep_set_class(&port->lock, &port_lock_key);
+       __uart_port_spin_lock_init(port);
 }
 
 #if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
@@ -2373,6 +2376,13 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
                uart_change_pm(state, UART_PM_STATE_ON);
 
                /*
+                * If this driver supports console, and it hasn't been
+                * successfully registered yet, initialise spin lock for it.
+                */
+               if (port->cons && !(port->cons->flags & CON_ENABLED))
+                       __uart_port_spin_lock_init(port);
+
+               /*
                 * Ensure that the modem control lines are de-activated.
                 * keep the DTR setting that is set in uart_set_options()
                 * We probably don't need a spinlock around this, but
@@ -3163,7 +3173,7 @@ static DECLARE_WORK(sysrq_enable_work, uart_sysrq_on);
  *     Returns false if @ch is out of enabling sequence and should be
  *     handled some other way, true if @ch was consumed.
  */
-static bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch)
+bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch)
 {
        int sysrq_toggle_seq_len = strlen(sysrq_toggle_seq);
 
@@ -3186,99 +3196,9 @@ static bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch)
        port->sysrq = 0;
        return true;
 }
-#else
-static inline bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch)
-{
-       return false;
-}
+EXPORT_SYMBOL_GPL(uart_try_toggle_sysrq);
 #endif
 
-int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
-{
-       if (!IS_ENABLED(CONFIG_MAGIC_SYSRQ_SERIAL))
-               return 0;
-
-       if (!port->has_sysrq || !port->sysrq)
-               return 0;
-
-       if (ch && time_before(jiffies, port->sysrq)) {
-               if (sysrq_mask()) {
-                       handle_sysrq(ch);
-                       port->sysrq = 0;
-                       return 1;
-               }
-               if (uart_try_toggle_sysrq(port, ch))
-                       return 1;
-       }
-       port->sysrq = 0;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(uart_handle_sysrq_char);
-
-int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
-{
-       if (!IS_ENABLED(CONFIG_MAGIC_SYSRQ_SERIAL))
-               return 0;
-
-       if (!port->has_sysrq || !port->sysrq)
-               return 0;
-
-       if (ch && time_before(jiffies, port->sysrq)) {
-               if (sysrq_mask()) {
-                       port->sysrq_ch = ch;
-                       port->sysrq = 0;
-                       return 1;
-               }
-               if (uart_try_toggle_sysrq(port, ch))
-                       return 1;
-       }
-       port->sysrq = 0;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(uart_prepare_sysrq_char);
-
-void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long flags)
-__releases(&port->lock)
-{
-       if (port->has_sysrq) {
-               int sysrq_ch = port->sysrq_ch;
-
-               port->sysrq_ch = 0;
-               spin_unlock_irqrestore(&port->lock, flags);
-               if (sysrq_ch)
-                       handle_sysrq(sysrq_ch);
-       } else {
-               spin_unlock_irqrestore(&port->lock, flags);
-       }
-}
-EXPORT_SYMBOL_GPL(uart_unlock_and_check_sysrq);
-
-/*
- * We do the SysRQ and SAK checking like this...
- */
-int uart_handle_break(struct uart_port *port)
-{
-       struct uart_state *state = port->state;
-
-       if (port->handle_break)
-               port->handle_break(port);
-
-       if (port->has_sysrq && uart_console(port)) {
-               if (!port->sysrq) {
-                       port->sysrq = jiffies + SYSRQ_TIMEOUT;
-                       return 1;
-               }
-               port->sysrq = 0;
-       }
-
-       if (port->flags & UPF_SAK)
-               do_SAK(state->port.tty);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(uart_handle_break);
-
 EXPORT_SYMBOL(uart_write_wakeup);
 EXPORT_SYMBOL(uart_register_driver);
 EXPORT_SYMBOL(uart_unregister_driver);
@@ -3289,8 +3209,7 @@ EXPORT_SYMBOL(uart_remove_one_port);
 
 /**
  * uart_get_rs485_mode() - retrieve rs485 properties for given uart
- * @dev: uart device
- * @rs485conf: output parameter
+ * @port: uart device's target port
  *
  * This function implements the device tree binding described in
  * Documentation/devicetree/bindings/serial/rs485.txt.
index e1179e7..204bb68 100644 (file)
@@ -3301,6 +3301,9 @@ static int sci_probe_single(struct platform_device *dev,
                sciport->port.flags |= UPF_HARD_FLOW;
        }
 
+       if (sci_uart_driver.cons->index == sciport->port.line)
+               spin_lock_init(&sciport->port.lock);
+
        ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
        if (ret) {
                sci_cleanup_single(sciport);
index b9d672a..672cfa0 100644 (file)
@@ -1465,7 +1465,6 @@ static int cdns_uart_probe(struct platform_device *pdev)
                cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS;
 #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
                cdns_uart_uart_driver.cons = &cdns_uart_console;
-               cdns_uart_console.index = id;
 #endif
 
                rc = uart_register_driver(&cdns_uart_uart_driver);
index ae319ef..b60173b 100644 (file)
@@ -159,9 +159,9 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
        priv->pdev = pdev;
 
        if (!uioinfo->irq) {
-               ret = platform_get_irq(pdev, 0);
+               ret = platform_get_irq_optional(pdev, 0);
                uioinfo->irq = ret;
-               if (ret == -ENXIO && pdev->dev.of_node)
+               if (ret == -ENXIO)
                        uioinfo->irq = UIO_IRQ_NONE;
                else if (ret == -EPROBE_DEFER)
                        return ret;
index 633c52d..9865750 100644 (file)
@@ -486,7 +486,7 @@ c67x00_giveback_urb(struct c67x00_hcd *c67x00, struct urb *urb, int status)
        c67x00_release_urb(c67x00, urb);
        usb_hcd_unlink_urb_from_ep(c67x00_hcd_to_hcd(c67x00), urb);
        spin_unlock(&c67x00->lock);
-       usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, urbp->status);
+       usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, status);
        spin_lock(&c67x00->lock);
 }
 
index 82645a2..5aa6998 100644 (file)
@@ -37,18 +37,18 @@ static void cdns3_ep0_run_transfer(struct cdns3_device *priv_dev,
        struct cdns3_usb_regs __iomem *regs = priv_dev->regs;
        struct cdns3_endpoint *priv_ep = priv_dev->eps[0];
 
-       priv_ep->trb_pool[0].buffer = TRB_BUFFER(dma_addr);
-       priv_ep->trb_pool[0].length = TRB_LEN(length);
+       priv_ep->trb_pool[0].buffer = cpu_to_le32(TRB_BUFFER(dma_addr));
+       priv_ep->trb_pool[0].length = cpu_to_le32(TRB_LEN(length));
 
        if (zlp) {
-               priv_ep->trb_pool[0].control = TRB_CYCLE | TRB_TYPE(TRB_NORMAL);
-               priv_ep->trb_pool[1].buffer = TRB_BUFFER(dma_addr);
-               priv_ep->trb_pool[1].length = TRB_LEN(0);
-               priv_ep->trb_pool[1].control = TRB_CYCLE | TRB_IOC |
-                   TRB_TYPE(TRB_NORMAL);
+               priv_ep->trb_pool[0].control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_NORMAL));
+               priv_ep->trb_pool[1].buffer = cpu_to_le32(TRB_BUFFER(dma_addr));
+               priv_ep->trb_pool[1].length = cpu_to_le32(TRB_LEN(0));
+               priv_ep->trb_pool[1].control = cpu_to_le32(TRB_CYCLE | TRB_IOC |
+                   TRB_TYPE(TRB_NORMAL));
        } else {
-               priv_ep->trb_pool[0].control = TRB_CYCLE | TRB_IOC |
-                   TRB_TYPE(TRB_NORMAL);
+               priv_ep->trb_pool[0].control = cpu_to_le32(TRB_CYCLE | TRB_IOC |
+                   TRB_TYPE(TRB_NORMAL));
                priv_ep->trb_pool[1].control = 0;
        }
 
@@ -264,11 +264,11 @@ static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev,
        case USB_RECIP_INTERFACE:
                return cdns3_ep0_delegate_req(priv_dev, ctrl);
        case USB_RECIP_ENDPOINT:
-               index = cdns3_ep_addr_to_index(ctrl->wIndex);
+               index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex));
                priv_ep = priv_dev->eps[index];
 
                /* check if endpoint is stalled or stall is pending */
-               cdns3_select_ep(priv_dev, ctrl->wIndex);
+               cdns3_select_ep(priv_dev, le16_to_cpu(ctrl->wIndex));
                if (EP_STS_STALL(readl(&priv_dev->regs->ep_sts)) ||
                    (priv_ep->flags & EP_STALL_PENDING))
                        usb_status =  BIT(USB_ENDPOINT_HALT);
@@ -327,7 +327,8 @@ static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev,
                if (!set || (tmode & 0xff) != 0)
                        return -EINVAL;
 
-               switch (tmode >> 8) {
+               tmode >>= 8;
+               switch (tmode) {
                case TEST_J:
                case TEST_K:
                case TEST_SE0_NAK:
@@ -380,10 +381,10 @@ static int cdns3_ep0_feature_handle_endpoint(struct cdns3_device *priv_dev,
        if (!(ctrl->wIndex & ~USB_DIR_IN))
                return 0;
 
-       index = cdns3_ep_addr_to_index(ctrl->wIndex);
+       index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex));
        priv_ep = priv_dev->eps[index];
 
-       cdns3_select_ep(priv_dev, ctrl->wIndex);
+       cdns3_select_ep(priv_dev, le16_to_cpu(ctrl->wIndex));
 
        if (set)
                __cdns3_gadget_ep_set_halt(priv_ep);
@@ -444,7 +445,7 @@ static int cdns3_req_ep0_set_sel(struct cdns3_device *priv_dev,
        if (priv_dev->gadget.state < USB_STATE_ADDRESS)
                return -EINVAL;
 
-       if (ctrl_req->wLength != 6) {
+       if (le16_to_cpu(ctrl_req->wLength) != 6) {
                dev_err(priv_dev->dev, "Set SEL should be 6 bytes, got %d\n",
                        ctrl_req->wLength);
                return -EINVAL;
@@ -468,7 +469,7 @@ static int cdns3_req_ep0_set_isoch_delay(struct cdns3_device *priv_dev,
        if (ctrl_req->wIndex || ctrl_req->wLength)
                return -EINVAL;
 
-       priv_dev->isoch_delay = ctrl_req->wValue;
+       priv_dev->isoch_delay = le16_to_cpu(ctrl_req->wValue);
 
        return 0;
 }
@@ -704,15 +705,17 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
        int ret = 0;
        u8 zlp = 0;
 
+       spin_lock_irqsave(&priv_dev->lock, flags);
        trace_cdns3_ep0_queue(priv_dev, request);
 
        /* cancel the request if controller receive new SETUP packet. */
-       if (cdns3_check_new_setup(priv_dev))
+       if (cdns3_check_new_setup(priv_dev)) {
+               spin_unlock_irqrestore(&priv_dev->lock, flags);
                return -ECONNRESET;
+       }
 
        /* send STATUS stage. Should be called only for SET_CONFIGURATION */
        if (priv_dev->ep0_stage == CDNS3_STATUS_STAGE) {
-               spin_lock_irqsave(&priv_dev->lock, flags);
                cdns3_select_ep(priv_dev, 0x00);
 
                erdy_sent = !priv_dev->hw_configured_flag;
@@ -737,7 +740,6 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
                return 0;
        }
 
-       spin_lock_irqsave(&priv_dev->lock, flags);
        if (!list_empty(&priv_ep->pending_req_list)) {
                dev_err(priv_dev->dev,
                        "can't handle multiple requests for ep0\n");
index 8d121e2..0a2a326 100644 (file)
@@ -156,7 +156,7 @@ DECLARE_EVENT_CLASS(cdns3_log_ep0_irq,
                __dynamic_array(char, str, CDNS3_MSG_MAX)
        ),
        TP_fast_assign(
-               __entry->ep_dir = priv_dev->ep0_data_dir;
+               __entry->ep_dir = priv_dev->selected_ep;
                __entry->ep_sts = ep_sts;
        ),
        TP_printk("%s", cdns3_decode_ep0_irq(__get_str(str),
@@ -404,9 +404,9 @@ DECLARE_EVENT_CLASS(cdns3_log_trb,
        TP_fast_assign(
                __assign_str(name, priv_ep->name);
                __entry->trb = trb;
-               __entry->buffer = trb->buffer;
-               __entry->length = trb->length;
-               __entry->control = trb->control;
+               __entry->buffer = le32_to_cpu(trb->buffer);
+               __entry->length = le32_to_cpu(trb->length);
+               __entry->control = le32_to_cpu(trb->control);
                __entry->type = usb_endpoint_type(priv_ep->endpoint.desc);
                __entry->last_stream_id = priv_ep->last_stream_id;
        ),
index 9a7c53d..bb13324 100644 (file)
@@ -1243,6 +1243,29 @@ static void ci_controller_suspend(struct ci_hdrc *ci)
        enable_irq(ci->irq);
 }
 
+/*
+ * Handle the wakeup interrupt triggered by extcon connector
+ * We need to call ci_irq again for extcon since the first
+ * interrupt (wakeup int) only let the controller be out of
+ * low power mode, but not handle any interrupts.
+ */
+static void ci_extcon_wakeup_int(struct ci_hdrc *ci)
+{
+       struct ci_hdrc_cable *cable_id, *cable_vbus;
+       u32 otgsc = hw_read_otgsc(ci, ~0);
+
+       cable_id = &ci->platdata->id_extcon;
+       cable_vbus = &ci->platdata->vbus_extcon;
+
+       if (!IS_ERR(cable_id->edev) && ci->is_otg &&
+               (otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS))
+               ci_irq(ci->irq, ci);
+
+       if (!IS_ERR(cable_vbus->edev) && ci->is_otg &&
+               (otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS))
+               ci_irq(ci->irq, ci);
+}
+
 static int ci_controller_resume(struct device *dev)
 {
        struct ci_hdrc *ci = dev_get_drvdata(dev);
@@ -1275,6 +1298,7 @@ static int ci_controller_resume(struct device *dev)
                enable_irq(ci->irq);
                if (ci_otg_is_fsm_mode(ci))
                        ci_otg_fsm_wakeup_by_srp(ci);
+               ci_extcon_wakeup_int(ci);
        }
 
        return 0;
index f67088b..d5187b5 100644 (file)
@@ -1689,6 +1689,8 @@ static int acm_pre_reset(struct usb_interface *intf)
 
 static const struct usb_device_id acm_ids[] = {
        /* quirky and broken devices */
+       { USB_DEVICE(0x0424, 0x274e), /* Microchip Technology, Inc. (formerly SMSC) */
+         .driver_info = DISABLE_ECHO, }, /* DISABLE ECHO in termios flag */
        { USB_DEVICE(0x076d, 0x0006), /* Denso Cradle CU-321 */
        .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
        { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */
index 3e8efe7..e0b7767 100644 (file)
@@ -218,11 +218,12 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Logitech HD Webcam C270 */
        { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
 
-       /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
+       /* Logitech HD Pro Webcams C920, C920-C, C922, C925e and C930e */
        { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
        { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
        { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
        { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT },
+       { USB_DEVICE(0x046d, 0x085c), .driver_info = USB_QUIRK_DELAY_INIT },
 
        /* Logitech ConferenceCam CC3000e */
        { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
index 12b98b4..7faf5f8 100644 (file)
@@ -4920,12 +4920,6 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
                                          epnum, 0);
        }
 
-       ret = usb_add_gadget_udc(dev, &hsotg->gadget);
-       if (ret) {
-               dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep,
-                                          hsotg->ctrl_req);
-               return ret;
-       }
        dwc2_hsotg_dump(hsotg);
 
        return 0;
index e571c8a..cb8ddbd 100644 (file)
@@ -342,7 +342,8 @@ static void dwc2_driver_shutdown(struct platform_device *dev)
 {
        struct dwc2_hsotg *hsotg = platform_get_drvdata(dev);
 
-       disable_irq(hsotg->irq);
+       dwc2_disable_global_interrupts(hsotg);
+       synchronize_irq(hsotg->irq);
 }
 
 /**
@@ -575,6 +576,17 @@ static int dwc2_driver_probe(struct platform_device *dev)
        if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
                dwc2_lowlevel_hw_disable(hsotg);
 
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
+       IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+       /* Postponed adding a new gadget to the udc class driver list */
+       if (hsotg->gadget_enabled) {
+               retval = usb_add_gadget_udc(hsotg->dev, &hsotg->gadget);
+               if (retval) {
+                       dwc2_hsotg_remove(hsotg);
+                       goto error_init;
+               }
+       }
+#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
        return 0;
 
 error_init:
index 48b68b6..90bb022 100644 (file)
@@ -162,12 +162,6 @@ static const struct dwc3_exynos_driverdata exynos5250_drvdata = {
        .suspend_clk_idx = -1,
 };
 
-static const struct dwc3_exynos_driverdata exynos5420_drvdata = {
-       .clk_names = { "usbdrd30", "usbdrd30_susp_clk"},
-       .num_clks = 2,
-       .suspend_clk_idx = 1,
-};
-
 static const struct dwc3_exynos_driverdata exynos5433_drvdata = {
        .clk_names = { "aclk", "susp_clk", "pipe_pclk", "phyclk" },
        .num_clks = 4,
@@ -185,9 +179,6 @@ static const struct of_device_id exynos_dwc3_match[] = {
                .compatible = "samsung,exynos5250-dwusb3",
                .data = &exynos5250_drvdata,
        }, {
-               .compatible = "samsung,exynos5420-dwusb3",
-               .data = &exynos5420_drvdata,
-       }, {
                .compatible = "samsung,exynos5433-dwusb3",
                .data = &exynos5433_drvdata,
        }, {
index b673727..139474c 100644 (file)
@@ -38,6 +38,8 @@
 #define PCI_DEVICE_ID_INTEL_ICLLP              0x34ee
 #define PCI_DEVICE_ID_INTEL_EHLLP              0x4b7e
 #define PCI_DEVICE_ID_INTEL_TGPLP              0xa0ee
+#define PCI_DEVICE_ID_INTEL_TGPH               0x43ee
+#define PCI_DEVICE_ID_INTEL_JSP                        0x4dee
 
 #define PCI_INTEL_BXT_DSM_GUID         "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
 #define PCI_INTEL_BXT_FUNC_PMU_PWR     4
@@ -206,8 +208,10 @@ static void dwc3_pci_resume_work(struct work_struct *work)
        int ret;
 
        ret = pm_runtime_get_sync(&dwc3->dev);
-       if (ret)
+       if (ret) {
+               pm_runtime_put_sync_autosuspend(&dwc3->dev);
                return;
+       }
 
        pm_runtime_mark_last_busy(&dwc3->dev);
        pm_runtime_put_sync_autosuspend(&dwc3->dev);
@@ -356,6 +360,12 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGPLP),
          (kernel_ulong_t) &dwc3_pci_intel_properties, },
 
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGPH),
+         (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_JSP),
+         (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NL_USB),
          (kernel_ulong_t) &dwc3_pci_amd_properties, },
        {  }    /* Terminating Entry */
index ea0d531..775cf70 100644 (file)
@@ -1058,7 +1058,8 @@ static int __init kgdbdbgp_parse_config(char *str)
                kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
        }
        kgdb_register_io_module(&kgdbdbgp_io_ops);
-       kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
+       if (early_dbgp_console.index != -1)
+               kgdbdbgp_io_ops.cons = &early_dbgp_console;
 
        return 0;
 }
index 349deae..e2d7f69 100644 (file)
@@ -336,7 +336,9 @@ static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req)
 
        /* Copy buffer is full, add it to the play_queue */
        if (audio_buf_size - copy_buf->actual < req->actual) {
+               spin_lock_irq(&audio->lock);
                list_add_tail(&copy_buf->list, &audio->play_queue);
+               spin_unlock_irq(&audio->lock);
                schedule_work(&audio->playback_work);
                copy_buf = f_audio_buffer_alloc(audio_buf_size);
                if (IS_ERR(copy_buf))
index d69f61f..9342a3d 100644 (file)
@@ -676,13 +676,7 @@ static int usba_ep_disable(struct usb_ep *_ep)
 
        if (!ep->ep.desc) {
                spin_unlock_irqrestore(&udc->lock, flags);
-               /* REVISIT because this driver disables endpoints in
-                * reset_all_endpoints() before calling disconnect(),
-                * most gadget drivers would trigger this non-error ...
-                */
-               if (udc->gadget.speed != USB_SPEED_UNKNOWN)
-                       DBG(DBG_ERR, "ep_disable: %s not enabled\n",
-                                       ep->ep.name);
+               DBG(DBG_ERR, "ep_disable: %s not enabled\n", ep->ep.name);
                return -EINVAL;
        }
        ep->ep.desc = NULL;
@@ -871,7 +865,7 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
        u32 status;
 
        DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n",
-                       ep->ep.name, req);
+                       ep->ep.name, _req);
 
        spin_lock_irqsave(&udc->lock, flags);
 
index 7164ad9..7419889 100644 (file)
@@ -1980,9 +1980,12 @@ static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit)
 
        if (num == 0) {
                _req = gr_alloc_request(&ep->ep, GFP_ATOMIC);
+               if (!_req)
+                       return -ENOMEM;
+
                buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC);
-               if (!_req || !buf) {
-                       /* possible _req freed by gr_probe via gr_remove */
+               if (!buf) {
+                       gr_free_request(&ep->ep, _req);
                        return -ENOMEM;
                }
 
index cafde05..80a1b52 100644 (file)
@@ -2313,7 +2313,8 @@ static int mv_udc_probe(struct platform_device *pdev)
        return 0;
 
 err_create_workqueue:
-       destroy_workqueue(udc->qwork);
+       if (udc->qwork)
+               destroy_workqueue(udc->qwork);
 err_destroy_dma:
        dma_pool_destroy(udc->dtd_pool);
 err_free_dma:
index 58a4d33..119505f 100644 (file)
@@ -68,7 +68,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_get_string);
 
 /**
  * usb_validate_langid - validate usb language identifiers
- * @lang: usb language identifier
+ * @langid: usb language identifier
  *
  * Returns true for valid language identifier, otherwise false.
  */
index a4e9abc..1a9b757 100644 (file)
@@ -203,9 +203,8 @@ static int exynos_ehci_probe(struct platform_device *pdev)
        hcd->rsrc_len = resource_size(res);
 
        irq = platform_get_irq(pdev, 0);
-       if (!irq) {
-               dev_err(&pdev->dev, "Failed to get IRQ\n");
-               err = -ENODEV;
+       if (irq < 0) {
+               err = irq;
                goto fail_io;
        }
 
index 3c3820a..af3c1b9 100644 (file)
@@ -216,6 +216,13 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
                ehci_info(ehci, "applying MosChip frame-index workaround\n");
                ehci->frame_index_bug = 1;
                break;
+       case PCI_VENDOR_ID_HUAWEI:
+               /* Synopsys HC bug */
+               if (pdev->device == 0xa239) {
+                       ehci_info(ehci, "applying Synopsys HC workaround\n");
+                       ehci->has_synopsys_hc_bug = 1;
+               }
+               break;
        }
 
        /* optional debug port, normally in the first BAR */
index cff9652..b91d50d 100644 (file)
@@ -191,6 +191,7 @@ static int ohci_hcd_sm501_drv_remove(struct platform_device *pdev)
        struct resource *mem;
 
        usb_remove_hcd(hcd);
+       iounmap(hcd->regs);
        release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
        usb_put_hcd(hcd);
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
index bfbdb3c..4311d4c 100644 (file)
@@ -587,6 +587,9 @@ static int xhci_mtk_remove(struct platform_device *dev)
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
        struct usb_hcd  *shared_hcd = xhci->shared_hcd;
 
+       pm_runtime_put_noidle(&dev->dev);
+       pm_runtime_disable(&dev->dev);
+
        usb_remove_hcd(shared_hcd);
        xhci->shared_hcd = NULL;
        device_init_wakeup(&dev->dev, false);
@@ -597,8 +600,6 @@ static int xhci_mtk_remove(struct platform_device *dev)
        xhci_mtk_sch_exit(mtk);
        xhci_mtk_clks_disable(mtk);
        xhci_mtk_ldos_disable(mtk);
-       pm_runtime_put_sync(&dev->dev);
-       pm_runtime_disable(&dev->dev);
 
        return 0;
 }
index bee5dec..ed468ee 100644 (file)
@@ -1430,6 +1430,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
                                xhci->devs[slot_id]->out_ctx, ep_index);
 
                ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
+               ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */
                ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
                ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
 
@@ -4390,6 +4391,9 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
        int             hird, exit_latency;
        int             ret;
 
+       if (xhci->quirks & XHCI_HW_LPM_DISABLE)
+               return -EPERM;
+
        if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
                        !udev->lpm_capable)
                return -EPERM;
@@ -4412,7 +4416,7 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
        xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
                        enable ? "enable" : "disable", port_num + 1);
 
-       if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) {
+       if (enable) {
                /* Host supports BESL timeout instead of HIRD */
                if (udev->usb2_hw_lpm_besl_capable) {
                        /* if device doesn't have a preferred BESL value use a
@@ -4471,6 +4475,9 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
                        mutex_lock(hcd->bandwidth_mutex);
                        xhci_change_max_exit_latency(xhci, udev, 0);
                        mutex_unlock(hcd->bandwidth_mutex);
+                       readl_poll_timeout(ports[port_num]->addr, pm_val,
+                                          (pm_val & PORT_PLS_MASK) == XDEV_U0,
+                                          100, 10000);
                        return 0;
                }
        }
index 2c6c4f8..c295e8a 100644 (file)
@@ -716,7 +716,7 @@ struct xhci_ep_ctx {
  * 4 - TRB error
  * 5-7 - reserved
  */
-#define EP_STATE_MASK          (0xf)
+#define EP_STATE_MASK          (0x7)
 #define EP_STATE_DISABLED      0
 #define EP_STATE_RUNNING       1
 #define EP_STATE_HALTED                2
index 98ada1a..bae8889 100644 (file)
@@ -2873,6 +2873,7 @@ static void usbtest_disconnect(struct usb_interface *intf)
 
        usb_set_intfdata(intf, NULL);
        dev_dbg(&intf->dev, "disconnect\n");
+       kfree(dev->buf);
        kfree(dev);
 }
 
index cffe2ac..03a3337 100644 (file)
@@ -1199,11 +1199,7 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, tegra_phy);
 
-       err = usb_add_phy_dev(&tegra_phy->u_phy);
-       if (err)
-               return err;
-
-       return 0;
+       return usb_add_phy_dev(&tegra_phy->u_phy);
 }
 
 static int tegra_usb_phy_remove(struct platform_device *pdev)
index 01c6a48..ac9a81a 100644 (file)
@@ -803,7 +803,8 @@ static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
        return info->dma_map_ctrl(chan->device->dev, pkt, map);
 }
 
-static void usbhsf_dma_complete(void *arg);
+static void usbhsf_dma_complete(void *arg,
+                               const struct dmaengine_result *result);
 static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
 {
        struct usbhs_pipe *pipe = pkt->pipe;
@@ -813,6 +814,7 @@ static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
        struct dma_chan *chan;
        struct device *dev = usbhs_priv_to_dev(priv);
        enum dma_transfer_direction dir;
+       dma_cookie_t cookie;
 
        fifo = usbhs_pipe_to_fifo(pipe);
        if (!fifo)
@@ -827,11 +829,11 @@ static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
        if (!desc)
                return;
 
-       desc->callback          = usbhsf_dma_complete;
-       desc->callback_param    = pipe;
+       desc->callback_result   = usbhsf_dma_complete;
+       desc->callback_param    = pkt;
 
-       pkt->cookie = dmaengine_submit(desc);
-       if (pkt->cookie < 0) {
+       cookie = dmaengine_submit(desc);
+       if (cookie < 0) {
                dev_err(dev, "Failed to submit dma descriptor\n");
                return;
        }
@@ -1152,12 +1154,10 @@ static size_t usbhs_dma_calc_received_size(struct usbhs_pkt *pkt,
                                           struct dma_chan *chan, int dtln)
 {
        struct usbhs_pipe *pipe = pkt->pipe;
-       struct dma_tx_state state;
        size_t received_size;
        int maxp = usbhs_pipe_get_maxpacket(pipe);
 
-       dmaengine_tx_status(chan, pkt->cookie, &state);
-       received_size = pkt->length - state.residue;
+       received_size = pkt->length - pkt->dma_result->residue;
 
        if (dtln) {
                received_size -= USBHS_USB_DMAC_XFER_SIZE;
@@ -1363,13 +1363,16 @@ static int usbhsf_irq_ready(struct usbhs_priv *priv,
        return 0;
 }
 
-static void usbhsf_dma_complete(void *arg)
+static void usbhsf_dma_complete(void *arg,
+                               const struct dmaengine_result *result)
 {
-       struct usbhs_pipe *pipe = arg;
+       struct usbhs_pkt *pkt = arg;
+       struct usbhs_pipe *pipe = pkt->pipe;
        struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
        struct device *dev = usbhs_priv_to_dev(priv);
        int ret;
 
+       pkt->dma_result = result;
        ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE);
        if (ret < 0)
                dev_err(dev, "dma_complete run_error %d : %d\n",
index 7d3700b..039a2b9 100644 (file)
@@ -50,7 +50,7 @@ struct usbhs_pkt {
                     struct usbhs_pkt *pkt);
        struct work_struct work;
        dma_addr_t dma;
-       dma_cookie_t cookie;
+       const struct dmaengine_result *dma_result;
        void *buf;
        int length;
        int trans;
index 89675ee..8fbaef5 100644 (file)
@@ -77,6 +77,7 @@
 
 static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x4348, 0x5523) },
+       { USB_DEVICE(0x1a86, 0x7522) },
        { USB_DEVICE(0x1a86, 0x7523) },
        { USB_DEVICE(0x1a86, 0x5523) },
        { },
index 216edd5..ecda821 100644 (file)
@@ -59,6 +59,7 @@ static const struct usb_device_id id_table_earthmate[] = {
 
 static const struct usb_device_id id_table_cyphidcomrs232[] = {
        { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
+       { USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) },
        { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
        { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) },
        { }                                             /* Terminating entry */
@@ -73,6 +74,7 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) },
        { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
        { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
+       { USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) },
        { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
        { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) },
        { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) },
index 35e2237..16b7410 100644 (file)
@@ -25,6 +25,9 @@
 #define VENDOR_ID_CYPRESS              0x04b4
 #define PRODUCT_ID_CYPHIDCOM           0x5500
 
+/* Simply Automated HID->COM UPB PIM (using Cypress PID 0x5500) */
+#define VENDOR_ID_SAI                  0x17dd
+
 /* FRWD Dongle - a GPS sports watch */
 #define VENDOR_ID_FRWD                 0x6737
 #define PRODUCT_ID_CYPHIDCOM_FRWD      0x0001
index d5bff69..b8dfeb4 100644 (file)
@@ -697,14 +697,16 @@ static int iuu_uart_write(struct tty_struct *tty, struct usb_serial_port *port,
        struct iuu_private *priv = usb_get_serial_port_data(port);
        unsigned long flags;
 
-       if (count > 256)
-               return -ENOMEM;
-
        spin_lock_irqsave(&priv->lock, flags);
 
+       count = min(count, 256 - priv->writelen);
+       if (count == 0)
+               goto out;
+
        /* fill the buffer */
        memcpy(priv->writebuf + priv->writelen, buf, count);
        priv->writelen += count;
+out:
        spin_unlock_irqrestore(&priv->lock, flags);
 
        return count;
index 254a8bb..9b7cee9 100644 (file)
@@ -245,6 +245,7 @@ static void option_instat_callback(struct urb *urb);
 /* These Quectel products use Quectel's vendor ID */
 #define QUECTEL_PRODUCT_EC21                   0x0121
 #define QUECTEL_PRODUCT_EC25                   0x0125
+#define QUECTEL_PRODUCT_EG95                   0x0195
 #define QUECTEL_PRODUCT_BG96                   0x0296
 #define QUECTEL_PRODUCT_EP06                   0x0306
 #define QUECTEL_PRODUCT_EM12                   0x0512
@@ -1097,6 +1098,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(4) },
        { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
          .driver_info = RSVD(4) },
+       { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95),
+         .driver_info = RSVD(4) },
        { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
          .driver_info = RSVD(4) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
@@ -2028,6 +2031,9 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(4) | RSVD(5) },
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff),                     /* Fibocom NL678 series */
          .driver_info = RSVD(6) },
+       { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) },                   /* GosunCn GM500 RNDIS */
+       { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) },                   /* GosunCn GM500 MBIM */
+       { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) },                   /* GosunCn GM500 ECM/NCM */
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
index 962bc69..70ddc9d 100644 (file)
@@ -148,7 +148,8 @@ pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_mux_state *state)
        msg[0] = PMC_USB_DP_HPD;
        msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
 
-       msg[1] = PMC_USB_DP_HPD_IRQ;
+       if (data->status & DP_STATUS_IRQ_HPD)
+               msg[1] = PMC_USB_DP_HPD_IRQ;
 
        if (data->status & DP_STATUS_HPD_STATE)
                msg[1] |= PMC_USB_DP_HPD_LVL;
@@ -161,6 +162,7 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state)
 {
        struct typec_displayport_data *data = state->data;
        struct altmode_req req = { };
+       int ret;
 
        if (data->status & DP_STATUS_IRQ_HPD)
                return pmc_usb_mux_dp_hpd(port, state);
@@ -181,7 +183,14 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state)
        if (data->status & DP_STATUS_HPD_STATE)
                req.mode_data |= PMC_USB_ALTMODE_HPD_HIGH;
 
-       return pmc_usb_command(port, (void *)&req, sizeof(req));
+       ret = pmc_usb_command(port, (void *)&req, sizeof(req));
+       if (ret)
+               return ret;
+
+       if (data->status & DP_STATUS_HPD_STATE)
+               return pmc_usb_mux_dp_hpd(port, state);
+
+       return 0;
 }
 
 static int
index 0173890..b56a088 100644 (file)
@@ -179,26 +179,6 @@ out:
        return tcpci_irq(chip->tcpci);
 }
 
-static int rt1711h_init_alert(struct rt1711h_chip *chip,
-                             struct i2c_client *client)
-{
-       int ret;
-
-       /* Disable chip interrupts before requesting irq */
-       ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0);
-       if (ret < 0)
-               return ret;
-
-       ret = devm_request_threaded_irq(chip->dev, client->irq, NULL,
-                                       rt1711h_irq,
-                                       IRQF_ONESHOT | IRQF_TRIGGER_LOW,
-                                       dev_name(chip->dev), chip);
-       if (ret < 0)
-               return ret;
-       enable_irq_wake(client->irq);
-       return 0;
-}
-
 static int rt1711h_sw_reset(struct rt1711h_chip *chip)
 {
        int ret;
@@ -260,7 +240,8 @@ static int rt1711h_probe(struct i2c_client *client,
        if (ret < 0)
                return ret;
 
-       ret = rt1711h_init_alert(chip, client);
+       /* Disable chip interrupts before requesting irq */
+       ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0);
        if (ret < 0)
                return ret;
 
@@ -271,6 +252,14 @@ static int rt1711h_probe(struct i2c_client *client,
        if (IS_ERR_OR_NULL(chip->tcpci))
                return PTR_ERR(chip->tcpci);
 
+       ret = devm_request_threaded_irq(chip->dev, client->irq, NULL,
+                                       rt1711h_irq,
+                                       IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+                                       dev_name(chip->dev), chip);
+       if (ret < 0)
+               return ret;
+       enable_irq_wake(client->irq);
+
        return 0;
 }
 
index ff6562f..de211ef 100644 (file)
@@ -63,7 +63,7 @@ static void vdpa_release_dev(struct device *d)
  * @config: the bus operations that is supported by this device
  * @size: size of the parent structure that contains private data
  *
- * Drvier should use vdap_alloc_device() wrapper macro instead of
+ * Driver should use vdpa_alloc_device() wrapper macro instead of
  * using this directly.
  *
  * Returns an error when parent/config/dma_dev is not set or fail to get
index 7c07790..f634c81 100644 (file)
@@ -521,10 +521,14 @@ static void vfio_pci_release(void *device_data)
                vfio_pci_vf_token_user_add(vdev, -1);
                vfio_spapr_pci_eeh_release(vdev->pdev);
                vfio_pci_disable(vdev);
-               if (vdev->err_trigger)
+               if (vdev->err_trigger) {
                        eventfd_ctx_put(vdev->err_trigger);
-               if (vdev->req_trigger)
+                       vdev->err_trigger = NULL;
+               }
+               if (vdev->req_trigger) {
                        eventfd_ctx_put(vdev->req_trigger);
+                       vdev->req_trigger = NULL;
+               }
        }
 
        mutex_unlock(&vdev->reflck->lock);
index 8746c94..d98843f 100644 (file)
@@ -398,9 +398,15 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write)
 /* Caller should hold memory_lock semaphore */
 bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
 {
+       struct pci_dev *pdev = vdev->pdev;
        u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
 
-       return cmd & PCI_COMMAND_MEMORY;
+       /*
+        * SR-IOV VF memory enable is handled by the MSE bit in the
+        * PF SR-IOV capability, there's therefore no need to trigger
+        * faults based on the virtual value.
+        */
+       return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY);
 }
 
 /*
@@ -1728,6 +1734,15 @@ int vfio_config_init(struct vfio_pci_device *vdev)
                                 vconfig[PCI_INTERRUPT_PIN]);
 
                vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */
+
+               /*
+                * VFs do no implement the memory enable bit of the COMMAND
+                * register therefore we'll not have it set in our initial
+                * copy of config space after pci_enable_device().  For
+                * consistency with PFs, set the virtual enable bit here.
+                */
+               *(__le16 *)&vconfig[PCI_COMMAND] |=
+                                       cpu_to_le16(PCI_COMMAND_MEMORY);
        }
 
        if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx)
index 0466921..a09dedc 100644 (file)
@@ -263,9 +263,62 @@ static int vhost_test_set_features(struct vhost_test *n, u64 features)
        return 0;
 }
 
+static long vhost_test_set_backend(struct vhost_test *n, unsigned index, int fd)
+{
+       static void *backend;
+
+       const bool enable = fd != -1;
+       struct vhost_virtqueue *vq;
+       int r;
+
+       mutex_lock(&n->dev.mutex);
+       r = vhost_dev_check_owner(&n->dev);
+       if (r)
+               goto err;
+
+       if (index >= VHOST_TEST_VQ_MAX) {
+               r = -ENOBUFS;
+               goto err;
+       }
+       vq = &n->vqs[index];
+       mutex_lock(&vq->mutex);
+
+       /* Verify that ring has been setup correctly. */
+       if (!vhost_vq_access_ok(vq)) {
+               r = -EFAULT;
+               goto err_vq;
+       }
+       if (!enable) {
+               vhost_poll_stop(&vq->poll);
+               backend = vhost_vq_get_backend(vq);
+               vhost_vq_set_backend(vq, NULL);
+       } else {
+               vhost_vq_set_backend(vq, backend);
+               r = vhost_vq_init_access(vq);
+               if (r == 0)
+                       r = vhost_poll_start(&vq->poll, vq->kick);
+       }
+
+       mutex_unlock(&vq->mutex);
+
+       if (enable) {
+               vhost_test_flush_vq(n, index);
+       }
+
+       mutex_unlock(&n->dev.mutex);
+       return 0;
+
+err_vq:
+       mutex_unlock(&vq->mutex);
+err:
+       mutex_unlock(&n->dev.mutex);
+       return r;
+}
+
 static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
                             unsigned long arg)
 {
+       struct vhost_vring_file backend;
        struct vhost_test *n = f->private_data;
        void __user *argp = (void __user *)arg;
        u64 __user *featurep = argp;
@@ -277,6 +330,10 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
                if (copy_from_user(&test, argp, sizeof test))
                        return -EFAULT;
                return vhost_test_run(n, test);
+       case VHOST_TEST_SET_BACKEND:
+               if (copy_from_user(&backend, argp, sizeof backend))
+                       return -EFAULT;
+               return vhost_test_set_backend(n, backend.index, backend.fd);
        case VHOST_GET_FEATURES:
                features = VHOST_FEATURES;
                if (copy_to_user(featurep, &features, sizeof features))
index 7dd265b..822bc4b 100644 (file)
@@ -4,5 +4,6 @@
 
 /* Start a given test on the virtio null device. 0 stops all tests. */
 #define VHOST_TEST_RUN _IOW(VHOST_VIRTIO, 0x31, int)
+#define VHOST_TEST_SET_BACKEND _IOW(VHOST_VIRTIO, 0x32, int)
 
 #endif
index 7580e34..a54b60d 100644 (file)
@@ -818,7 +818,7 @@ static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
        struct vdpa_device *vdpa = v->vdpa;
        const struct vdpa_config_ops *ops = vdpa->config;
        struct vdpa_notification_area notify;
-       int index = vma->vm_pgoff;
+       unsigned long index = vma->vm_pgoff;
 
        if (vma->vm_end - vma->vm_start != PAGE_SIZE)
                return -EINVAL;
index 6af2734..af9f5ab 100644 (file)
@@ -2402,7 +2402,8 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
                ops->graphics = 1;
 
                if (!blank) {
-                       var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE;
+                       var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE |
+                               FB_ACTIVATE_KD_TEXT;
                        fb_set_var(info, &var);
                        ops->graphics = 0;
                        ops->var = info->var;
index bee29aa..def14ac 100644 (file)
@@ -1836,7 +1836,7 @@ static int uvesafb_setup(char *options)
                else if (!strcmp(this_opt, "noedid"))
                        noedid = true;
                else if (!strcmp(this_opt, "noblank"))
-                       blank = true;
+                       blank = false;
                else if (!strncmp(this_opt, "vtotal:", 7))
                        vram_total = simple_strtoul(this_opt + 7, NULL, 0);
                else if (!strncmp(this_opt, "vremap:", 7))
index b690a8a..18ebd7a 100644 (file)
@@ -1444,7 +1444,7 @@ static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
        or_mask = caps->u.in.or_mask;
        not_mask = caps->u.in.not_mask;
 
-       if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
+       if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK)
                return -EINVAL;
 
        ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask,
@@ -1520,7 +1520,8 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
 
        /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */
        if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) ||
-           req == VBG_IOCTL_VMMDEV_REQUEST_BIG)
+           req == VBG_IOCTL_VMMDEV_REQUEST_BIG ||
+           req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT)
                return vbg_ioctl_vmmrequest(gdev, session, data);
 
        if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT)
@@ -1558,6 +1559,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
        case VBG_IOCTL_HGCM_CALL(0):
                return vbg_ioctl_hgcm_call(gdev, session, f32bit, data);
        case VBG_IOCTL_LOG(0):
+       case VBG_IOCTL_LOG_ALT(0):
                return vbg_ioctl_log(data);
        }
 
index 4188c12..77c3a9c 100644 (file)
 #include <linux/vboxguest.h>
 #include "vmmdev.h"
 
+/*
+ * The mainline kernel version (this version) of the vboxguest module
+ * contained a bug where it defined VBGL_IOCTL_VMMDEV_REQUEST_BIG and
+ * VBGL_IOCTL_LOG using _IOC(_IOC_READ | _IOC_WRITE, 'V', ...) instead
+ * of _IO(V, ...) as the out of tree VirtualBox upstream version does.
+ *
+ * These _ALT definitions keep compatibility with the wrong defines the
+ * mainline kernel version used for a while.
+ * Note the VirtualBox userspace bits have always been built against
+ * VirtualBox upstream's headers, so this is likely not necessary. But
+ * we must never break our ABI so we keep these around to be 100% sure.
+ */
+#define VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT _IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0)
+#define VBG_IOCTL_LOG_ALT(s)             _IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s)
+
 struct vbg_session;
 
 /** VBox guest memory balloon. */
index 6e8c0f1..32c2c52 100644 (file)
@@ -131,7 +131,8 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
         * the need for a bounce-buffer and another copy later on.
         */
        is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) ||
-                        req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
+                        req == VBG_IOCTL_VMMDEV_REQUEST_BIG ||
+                        req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT;
 
        if (is_vmmdev_req)
                buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT,
index 6337b8d..21f4081 100644 (file)
@@ -206,6 +206,8 @@ VMMDEV_ASSERT_SIZE(vmmdev_mask, 24 + 8);
  * not.
  */
 #define VMMDEV_GUEST_SUPPORTS_GRAPHICS                      BIT(2)
+/* The mask of valid capabilities, for sanity checking. */
+#define VMMDEV_GUEST_CAPABILITIES_MASK                      0x00000007U
 
 /** struct vmmdev_hypervisorinfo - Hypervisor info structure. */
 struct vmmdev_hypervisorinfo {
index 50c689f..f26f5f6 100644 (file)
@@ -101,6 +101,11 @@ struct virtio_mem {
 
        /* The parent resource for all memory added via this device. */
        struct resource *parent_resource;
+       /*
+        * Copy of "System RAM (virtio_mem)" to be used for
+        * add_memory_driver_managed().
+        */
+       const char *resource_name;
 
        /* Summary of all memory block states. */
        unsigned long nb_mb_state[VIRTIO_MEM_MB_STATE_COUNT];
@@ -414,8 +419,20 @@ static int virtio_mem_mb_add(struct virtio_mem *vm, unsigned long mb_id)
        if (nid == NUMA_NO_NODE)
                nid = memory_add_physaddr_to_nid(addr);
 
+       /*
+        * When force-unloading the driver and we still have memory added to
+        * Linux, the resource name has to stay.
+        */
+       if (!vm->resource_name) {
+               vm->resource_name = kstrdup_const("System RAM (virtio_mem)",
+                                                 GFP_KERNEL);
+               if (!vm->resource_name)
+                       return -ENOMEM;
+       }
+
        dev_dbg(&vm->vdev->dev, "adding memory block: %lu\n", mb_id);
-       return add_memory(nid, addr, memory_block_size_bytes());
+       return add_memory_driver_managed(nid, addr, memory_block_size_bytes(),
+                                        vm->resource_name);
 }
 
 /*
@@ -1192,7 +1209,7 @@ static int virtio_mem_mb_plug_any_sb(struct virtio_mem *vm, unsigned long mb_id,
                                                VIRTIO_MEM_MB_STATE_OFFLINE);
        }
 
-       return rc;
+       return 0;
 }
 
 /*
@@ -1890,10 +1907,12 @@ static void virtio_mem_remove(struct virtio_device *vdev)
            vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL] ||
            vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE] ||
            vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL] ||
-           vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE])
+           vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE]) {
                dev_warn(&vdev->dev, "device still has system memory added\n");
-       else
+       } else {
                virtio_mem_delete_resource(vm);
+               kfree_const(vm->resource_name);
+       }
 
        /* remove all tracking data - no locking needed */
        vfree(vm->mb_state);
index 040d2a4..786fbb7 100644 (file)
@@ -69,11 +69,27 @@ struct xenbus_map_node {
        unsigned int   nr_handles;
 };
 
+struct map_ring_valloc {
+       struct xenbus_map_node *node;
+
+       /* Why do we need two arrays? See comment of __xenbus_map_ring */
+       union {
+               unsigned long addrs[XENBUS_MAX_RING_GRANTS];
+               pte_t *ptes[XENBUS_MAX_RING_GRANTS];
+       };
+       phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
+
+       struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
+       struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
+
+       unsigned int idx;       /* HVM only. */
+};
+
 static DEFINE_SPINLOCK(xenbus_valloc_lock);
 static LIST_HEAD(xenbus_valloc_pages);
 
 struct xenbus_ring_ops {
-       int (*map)(struct xenbus_device *dev,
+       int (*map)(struct xenbus_device *dev, struct map_ring_valloc *info,
                   grant_ref_t *gnt_refs, unsigned int nr_grefs,
                   void **vaddr);
        int (*unmap)(struct xenbus_device *dev, void *vaddr);
@@ -440,8 +456,7 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
  * Map @nr_grefs pages of memory into this domain from another
  * domain's grant table.  xenbus_map_ring_valloc allocates @nr_grefs
  * pages of virtual address space, maps the pages to that address, and
- * sets *vaddr to that address.  Returns 0 on success, and GNTST_*
- * (see xen/include/interface/grant_table.h) or -ENOMEM / -EINVAL on
+ * sets *vaddr to that address.  Returns 0 on success, and -errno on
  * error. If an error is returned, device will switch to
  * XenbusStateClosing and the error message will be saved in XenStore.
  */
@@ -449,12 +464,25 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
                           unsigned int nr_grefs, void **vaddr)
 {
        int err;
+       struct map_ring_valloc *info;
+
+       *vaddr = NULL;
+
+       if (nr_grefs > XENBUS_MAX_RING_GRANTS)
+               return -EINVAL;
+
+       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
 
-       err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
-       /* Some hypervisors are buggy and can return 1. */
-       if (err > 0)
-               err = GNTST_general_error;
+       info->node = kzalloc(sizeof(*info->node), GFP_KERNEL);
+       if (!info->node)
+               err = -ENOMEM;
+       else
+               err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr);
 
+       kfree(info->node);
+       kfree(info);
        return err;
 }
 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
@@ -466,62 +494,57 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
                             grant_ref_t *gnt_refs,
                             unsigned int nr_grefs,
                             grant_handle_t *handles,
-                            phys_addr_t *addrs,
+                            struct map_ring_valloc *info,
                             unsigned int flags,
                             bool *leaked)
 {
-       struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
-       struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
        int i, j;
-       int err = GNTST_okay;
 
        if (nr_grefs > XENBUS_MAX_RING_GRANTS)
                return -EINVAL;
 
        for (i = 0; i < nr_grefs; i++) {
-               memset(&map[i], 0, sizeof(map[i]));
-               gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i],
-                                 dev->otherend_id);
+               gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags,
+                                 gnt_refs[i], dev->otherend_id);
                handles[i] = INVALID_GRANT_HANDLE;
        }
 
-       gnttab_batch_map(map, i);
+       gnttab_batch_map(info->map, i);
 
        for (i = 0; i < nr_grefs; i++) {
-               if (map[i].status != GNTST_okay) {
-                       err = map[i].status;
-                       xenbus_dev_fatal(dev, map[i].status,
+               if (info->map[i].status != GNTST_okay) {
+                       xenbus_dev_fatal(dev, info->map[i].status,
                                         "mapping in shared page %d from domain %d",
                                         gnt_refs[i], dev->otherend_id);
                        goto fail;
                } else
-                       handles[i] = map[i].handle;
+                       handles[i] = info->map[i].handle;
        }
 
-       return GNTST_okay;
+       return 0;
 
  fail:
        for (i = j = 0; i < nr_grefs; i++) {
                if (handles[i] != INVALID_GRANT_HANDLE) {
-                       memset(&unmap[j], 0, sizeof(unmap[j]));
-                       gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i],
+                       gnttab_set_unmap_op(&info->unmap[j],
+                                           info->phys_addrs[i],
                                            GNTMAP_host_map, handles[i]);
                        j++;
                }
        }
 
-       if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j))
+       if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j))
                BUG();
 
        *leaked = false;
        for (i = 0; i < j; i++) {
-               if (unmap[i].status != GNTST_okay) {
+               if (info->unmap[i].status != GNTST_okay) {
                        *leaked = true;
                        break;
                }
        }
 
-       return err;
+       return -ENOENT;
 }
 
 /**
@@ -566,21 +589,12 @@ static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles,
        return err;
 }
 
-struct map_ring_valloc_hvm
-{
-       unsigned int idx;
-
-       /* Why do we need two arrays? See comment of __xenbus_map_ring */
-       phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
-       unsigned long addrs[XENBUS_MAX_RING_GRANTS];
-};
-
 static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
                                            unsigned int goffset,
                                            unsigned int len,
                                            void *data)
 {
-       struct map_ring_valloc_hvm *info = data;
+       struct map_ring_valloc *info = data;
        unsigned long vaddr = (unsigned long)gfn_to_virt(gfn);
 
        info->phys_addrs[info->idx] = vaddr;
@@ -589,39 +603,28 @@ static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
        info->idx++;
 }
 
-static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
-                                     grant_ref_t *gnt_ref,
-                                     unsigned int nr_grefs,
-                                     void **vaddr)
+static int xenbus_map_ring_hvm(struct xenbus_device *dev,
+                              struct map_ring_valloc *info,
+                              grant_ref_t *gnt_ref,
+                              unsigned int nr_grefs,
+                              void **vaddr)
 {
-       struct xenbus_map_node *node;
+       struct xenbus_map_node *node = info->node;
        int err;
        void *addr;
        bool leaked = false;
-       struct map_ring_valloc_hvm info = {
-               .idx = 0,
-       };
        unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
 
-       if (nr_grefs > XENBUS_MAX_RING_GRANTS)
-               return -EINVAL;
-
-       *vaddr = NULL;
-
-       node = kzalloc(sizeof(*node), GFP_KERNEL);
-       if (!node)
-               return -ENOMEM;
-
        err = alloc_xenballooned_pages(nr_pages, node->hvm.pages);
        if (err)
                goto out_err;
 
        gnttab_foreach_grant(node->hvm.pages, nr_grefs,
                             xenbus_map_ring_setup_grant_hvm,
-                            &info);
+                            info);
 
        err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
-                               info.phys_addrs, GNTMAP_host_map, &leaked);
+                               info, GNTMAP_host_map, &leaked);
        node->nr_handles = nr_grefs;
 
        if (err)
@@ -641,11 +644,13 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
        spin_unlock(&xenbus_valloc_lock);
 
        *vaddr = addr;
+       info->node = NULL;
+
        return 0;
 
  out_xenbus_unmap_ring:
        if (!leaked)
-               xenbus_unmap_ring(dev, node->handles, nr_grefs, info.addrs);
+               xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs);
        else
                pr_alert("leaking %p size %u page(s)",
                         addr, nr_pages);
@@ -653,7 +658,6 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
        if (!leaked)
                free_xenballooned_pages(nr_pages, node->hvm.pages);
  out_err:
-       kfree(node);
        return err;
 }
 
@@ -676,40 +680,28 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
 EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
 
 #ifdef CONFIG_XEN_PV
-static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
-                                    grant_ref_t *gnt_refs,
-                                    unsigned int nr_grefs,
-                                    void **vaddr)
+static int xenbus_map_ring_pv(struct xenbus_device *dev,
+                             struct map_ring_valloc *info,
+                             grant_ref_t *gnt_refs,
+                             unsigned int nr_grefs,
+                             void **vaddr)
 {
-       struct xenbus_map_node *node;
+       struct xenbus_map_node *node = info->node;
        struct vm_struct *area;
-       pte_t *ptes[XENBUS_MAX_RING_GRANTS];
-       phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
        int err = GNTST_okay;
        int i;
        bool leaked;
 
-       *vaddr = NULL;
-
-       if (nr_grefs > XENBUS_MAX_RING_GRANTS)
-               return -EINVAL;
-
-       node = kzalloc(sizeof(*node), GFP_KERNEL);
-       if (!node)
+       area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, info->ptes);
+       if (!area)
                return -ENOMEM;
 
-       area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
-       if (!area) {
-               kfree(node);
-               return -ENOMEM;
-       }
-
        for (i = 0; i < nr_grefs; i++)
-               phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
+               info->phys_addrs[i] =
+                       arbitrary_virt_to_machine(info->ptes[i]).maddr;
 
        err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
-                               phys_addrs,
-                               GNTMAP_host_map | GNTMAP_contains_pte,
+                               info, GNTMAP_host_map | GNTMAP_contains_pte,
                                &leaked);
        if (err)
                goto failed;
@@ -722,6 +714,8 @@ static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
        spin_unlock(&xenbus_valloc_lock);
 
        *vaddr = area->addr;
+       info->node = NULL;
+
        return 0;
 
 failed:
@@ -730,11 +724,10 @@ failed:
        else
                pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
 
-       kfree(node);
        return err;
 }
 
-static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
+static int xenbus_unmap_ring_pv(struct xenbus_device *dev, void *vaddr)
 {
        struct xenbus_map_node *node;
        struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
@@ -798,12 +791,12 @@ static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
 }
 
 static const struct xenbus_ring_ops ring_ops_pv = {
-       .map = xenbus_map_ring_valloc_pv,
-       .unmap = xenbus_unmap_ring_vfree_pv,
+       .map = xenbus_map_ring_pv,
+       .unmap = xenbus_unmap_ring_pv,
 };
 #endif
 
-struct unmap_ring_vfree_hvm
+struct unmap_ring_hvm
 {
        unsigned int idx;
        unsigned long addrs[XENBUS_MAX_RING_GRANTS];
@@ -814,19 +807,19 @@ static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,
                                              unsigned int len,
                                              void *data)
 {
-       struct unmap_ring_vfree_hvm *info = data;
+       struct unmap_ring_hvm *info = data;
 
        info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn);
 
        info->idx++;
 }
 
-static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
+static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr)
 {
        int rv;
        struct xenbus_map_node *node;
        void *addr;
-       struct unmap_ring_vfree_hvm info = {
+       struct unmap_ring_hvm info = {
                .idx = 0,
        };
        unsigned int nr_pages;
@@ -887,8 +880,8 @@ enum xenbus_state xenbus_read_driver_state(const char *path)
 EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
 
 static const struct xenbus_ring_ops ring_ops_hvm = {
-       .map = xenbus_map_ring_valloc_hvm,
-       .unmap = xenbus_unmap_ring_vfree_hvm,
+       .map = xenbus_map_ring_hvm,
+       .unmap = xenbus_unmap_ring_hvm,
 };
 
 void __init xenbus_ring_ops_init(void)
index 005921e..5b79cdc 100644 (file)
@@ -154,10 +154,17 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
                return ERR_PTR(-ENOMEM);
        }
 
+       cell->name = kmalloc(namelen + 1, GFP_KERNEL);
+       if (!cell->name) {
+               kfree(cell);
+               return ERR_PTR(-ENOMEM);
+       }
+
        cell->net = net;
        cell->name_len = namelen;
        for (i = 0; i < namelen; i++)
                cell->name[i] = tolower(name[i]);
+       cell->name[i] = 0;
 
        atomic_set(&cell->usage, 2);
        INIT_WORK(&cell->manager, afs_manage_cell);
@@ -207,6 +214,7 @@ parse_failed:
        if (ret == -EINVAL)
                printk(KERN_ERR "kAFS: bad VL server IP address\n");
 error:
+       kfree(cell->name);
        kfree(cell);
        _leave(" = %d", ret);
        return ERR_PTR(ret);
@@ -489,6 +497,7 @@ static void afs_cell_destroy(struct rcu_head *rcu)
        afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers));
        afs_put_cell(cell->net, cell->alias_of);
        key_put(cell->anonymous_key);
+       kfree(cell->name);
        kfree(cell);
 
        _leave(" [destroyed]");
index c264839..24fd163 100644 (file)
@@ -71,7 +71,7 @@ static bool afs_get_io_locks(struct afs_operation *op)
                swap(vnode, vnode2);
 
        if (mutex_lock_interruptible(&vnode->io_lock) < 0) {
-               op->error = -EINTR;
+               op->error = -ERESTARTSYS;
                op->flags |= AFS_OPERATION_STOP;
                _leave(" = f [I 0]");
                return false;
@@ -80,7 +80,7 @@ static bool afs_get_io_locks(struct afs_operation *op)
 
        if (vnode2) {
                if (mutex_lock_interruptible_nested(&vnode2->io_lock, 1) < 0) {
-                       op->error = -EINTR;
+                       op->error = -ERESTARTSYS;
                        op->flags |= AFS_OPERATION_STOP;
                        mutex_unlock(&vnode->io_lock);
                        op->flags &= ~AFS_OPERATION_LOCK_0;
index d520535..792ac71 100644 (file)
@@ -388,7 +388,7 @@ struct afs_cell {
        struct afs_vlserver_list __rcu *vl_servers;
 
        u8                      name_len;       /* Length of name */
-       char                    name[64 + 1];   /* Cell name, case-flattened and NUL-padded */
+       char                    *name;          /* Cell name, case-flattened and NUL-padded */
 };
 
 /*
index 7437806..a121c24 100644 (file)
@@ -449,6 +449,7 @@ static int afs_store_data(struct address_space *mapping,
        op->store.first_offset = offset;
        op->store.last_to = to;
        op->mtime = vnode->vfs_inode.i_mtime;
+       op->flags |= AFS_OPERATION_UNINTR;
        op->ops = &afs_store_data_operation;
 
 try_next_key:
index b04c528..74c886f 100644 (file)
@@ -53,7 +53,7 @@ static int autofs_write(struct autofs_sb_info *sbi,
 
        mutex_lock(&sbi->pipe_mutex);
        while (bytes) {
-               wr = __kernel_write(file, data, bytes, &file->f_pos);
+               wr = kernel_write(file, data, bytes, &file->f_pos);
                if (wr <= 0)
                        break;
                data += wr;
index 176e8a2..c037ef5 100644 (file)
@@ -940,7 +940,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
        path = btrfs_alloc_path();
        if (!path) {
                ret = -ENOMEM;
-               goto out_put_group;
+               goto out;
        }
 
        /*
@@ -978,7 +978,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                ret = btrfs_orphan_add(trans, BTRFS_I(inode));
                if (ret) {
                        btrfs_add_delayed_iput(inode);
-                       goto out_put_group;
+                       goto out;
                }
                clear_nlink(inode);
                /* One for the block groups ref */
@@ -1001,13 +1001,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 
        ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
        if (ret < 0)
-               goto out_put_group;
+               goto out;
        if (ret > 0)
                btrfs_release_path(path);
        if (ret == 0) {
                ret = btrfs_del_item(trans, tree_root, path);
                if (ret)
-                       goto out_put_group;
+                       goto out;
                btrfs_release_path(path);
        }
 
@@ -1016,6 +1016,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                 &fs_info->block_group_cache_tree);
        RB_CLEAR_NODE(&block_group->cache_node);
 
+       /* Once for the block groups rbtree */
+       btrfs_put_block_group(block_group);
+
        if (fs_info->first_logical_byte == block_group->start)
                fs_info->first_logical_byte = (u64)-1;
        spin_unlock(&fs_info->block_group_cache_lock);
@@ -1089,6 +1092,25 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 
        spin_unlock(&block_group->space_info->lock);
 
+       /*
+        * Remove the free space for the block group from the free space tree
+        * and the block group's item from the extent tree before marking the
+        * block group as removed. This is to prevent races with tasks that
+        * freeze and unfreeze a block group, this task and another task
+        * allocating a new block group - the unfreeze task ends up removing
+        * the block group's extent map before the task calling this function
+        * deletes the block group item from the extent tree, allowing for
+        * another task to attempt to create another block group with the same
+        * item key (and failing with -EEXIST and a transaction abort).
+        */
+       ret = remove_block_group_free_space(trans, block_group);
+       if (ret)
+               goto out;
+
+       ret = remove_block_group_item(trans, path, block_group);
+       if (ret < 0)
+               goto out;
+
        mutex_lock(&fs_info->chunk_mutex);
        spin_lock(&block_group->lock);
        block_group->removed = 1;
@@ -1123,17 +1145,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 
        mutex_unlock(&fs_info->chunk_mutex);
 
-       ret = remove_block_group_free_space(trans, block_group);
-       if (ret)
-               goto out_put_group;
-
-       /* Once for the block groups rbtree */
-       btrfs_put_block_group(block_group);
-
-       ret = remove_block_group_item(trans, path, block_group);
-       if (ret < 0)
-               goto out;
-
        if (remove_em) {
                struct extent_map_tree *em_tree;
 
@@ -1145,10 +1156,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                free_extent_map(em);
        }
 
-out_put_group:
+out:
        /* Once for the lookup reference */
        btrfs_put_block_group(block_group);
-out:
        if (remove_rsv)
                btrfs_delayed_refs_rsv_release(fs_info, 1);
        btrfs_free_path(path);
index 3a7648b..82ab6e5 100644 (file)
@@ -1196,7 +1196,7 @@ __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
                switch (tm->op) {
                case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
                        BUG_ON(tm->slot < n);
-                       /* Fallthrough */
+                       fallthrough;
                case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
                case MOD_LOG_KEY_REMOVE:
                        btrfs_set_node_key(eb, &tm->key, tm->slot);
index 30ce703..d404cce 100644 (file)
@@ -1009,6 +1009,8 @@ enum {
        BTRFS_ROOT_DEAD_RELOC_TREE,
        /* Mark dead root stored on device whose cleanup needs to be resumed */
        BTRFS_ROOT_DEAD_TREE,
+       /* The root has a log tree. Used only for subvolume roots. */
+       BTRFS_ROOT_HAS_LOG_TREE,
 };
 
 /*
index 5615320..741c7e1 100644 (file)
@@ -619,6 +619,7 @@ void btrfs_discard_punt_unused_bgs_list(struct btrfs_fs_info *fs_info)
        list_for_each_entry_safe(block_group, next, &fs_info->unused_bgs,
                                 bg_list) {
                list_del_init(&block_group->bg_list);
+               btrfs_put_block_group(block_group);
                btrfs_discard_queue_work(&fs_info->discard_ctl, block_group);
        }
        spin_unlock(&fs_info->unused_bgs_lock);
index 7c6f0bb..b1a1480 100644 (file)
@@ -2593,10 +2593,12 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
                    !extent_buffer_uptodate(tree_root->node)) {
                        handle_error = true;
 
-                       if (IS_ERR(tree_root->node))
+                       if (IS_ERR(tree_root->node)) {
                                ret = PTR_ERR(tree_root->node);
-                       else if (!extent_buffer_uptodate(tree_root->node))
+                               tree_root->node = NULL;
+                       } else if (!extent_buffer_uptodate(tree_root->node)) {
                                ret = -EUCLEAN;
+                       }
 
                        btrfs_warn(fs_info, "failed to read tree root");
                        continue;
index 68c9605..608f934 100644 (file)
@@ -5058,25 +5058,28 @@ struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
 static void check_buffer_tree_ref(struct extent_buffer *eb)
 {
        int refs;
-       /* the ref bit is tricky.  We have to make sure it is set
-        * if we have the buffer dirty.   Otherwise the
-        * code to free a buffer can end up dropping a dirty
-        * page
+       /*
+        * The TREE_REF bit is first set when the extent_buffer is added
+        * to the radix tree. It is also reset, if unset, when a new reference
+        * is created by find_extent_buffer.
         *
-        * Once the ref bit is set, it won't go away while the
-        * buffer is dirty or in writeback, and it also won't
-        * go away while we have the reference count on the
-        * eb bumped.
+        * It is only cleared in two cases: freeing the last non-tree
+        * reference to the extent_buffer when its STALE bit is set or
+        * calling releasepage when the tree reference is the only reference.
         *
-        * We can't just set the ref bit without bumping the
-        * ref on the eb because free_extent_buffer might
-        * see the ref bit and try to clear it.  If this happens
-        * free_extent_buffer might end up dropping our original
-        * ref by mistake and freeing the page before we are able
-        * to add one more ref.
+        * In both cases, care is taken to ensure that the extent_buffer's
+        * pages are not under io. However, releasepage can be concurrently
+        * called with creating new references, which is prone to race
+        * conditions between the calls to check_buffer_tree_ref in those
+        * codepaths and clearing TREE_REF in try_release_extent_buffer.
         *
-        * So bump the ref count first, then set the bit.  If someone
-        * beat us to it, drop the ref we added.
+        * The actual lifetime of the extent_buffer in the radix tree is
+        * adequately protected by the refcount, but the TREE_REF bit and
+        * its corresponding reference are not. To protect against this
+        * class of races, we call check_buffer_tree_ref from the codepaths
+        * which trigger io after they set eb->io_pages. Note that once io is
+        * initiated, TREE_REF can no longer be cleared, so that is the
+        * moment at which any such race is best fixed.
         */
        refs = atomic_read(&eb->refs);
        if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
@@ -5527,6 +5530,11 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
        clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
        eb->read_mirror = 0;
        atomic_set(&eb->io_pages, num_reads);
+       /*
+        * It is possible for releasepage to clear the TREE_REF bit before we
+        * set io_pages. See check_buffer_tree_ref for a more detailed comment.
+        */
+       check_buffer_tree_ref(eb);
        for (i = 0; i < num_pages; i++) {
                page = eb->pages[i];
 
index 2c14312..b0d2c97 100644 (file)
@@ -1533,7 +1533,7 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
 }
 
 static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
-                                   size_t *write_bytes)
+                                   size_t *write_bytes, bool nowait)
 {
        struct btrfs_fs_info *fs_info = inode->root->fs_info;
        struct btrfs_root *root = inode->root;
@@ -1541,27 +1541,43 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
        u64 num_bytes;
        int ret;
 
-       if (!btrfs_drew_try_write_lock(&root->snapshot_lock))
+       if (!nowait && !btrfs_drew_try_write_lock(&root->snapshot_lock))
                return -EAGAIN;
 
        lockstart = round_down(pos, fs_info->sectorsize);
        lockend = round_up(pos + *write_bytes,
                           fs_info->sectorsize) - 1;
+       num_bytes = lockend - lockstart + 1;
 
-       btrfs_lock_and_flush_ordered_range(inode, lockstart,
-                                          lockend, NULL);
+       if (nowait) {
+               struct btrfs_ordered_extent *ordered;
+
+               if (!try_lock_extent(&inode->io_tree, lockstart, lockend))
+                       return -EAGAIN;
+
+               ordered = btrfs_lookup_ordered_range(inode, lockstart,
+                                                    num_bytes);
+               if (ordered) {
+                       btrfs_put_ordered_extent(ordered);
+                       ret = -EAGAIN;
+                       goto out_unlock;
+               }
+       } else {
+               btrfs_lock_and_flush_ordered_range(inode, lockstart,
+                                                  lockend, NULL);
+       }
 
-       num_bytes = lockend - lockstart + 1;
        ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
                        NULL, NULL, NULL);
        if (ret <= 0) {
                ret = 0;
-               btrfs_drew_write_unlock(&root->snapshot_lock);
+               if (!nowait)
+                       btrfs_drew_write_unlock(&root->snapshot_lock);
        } else {
                *write_bytes = min_t(size_t, *write_bytes ,
                                     num_bytes - pos + lockstart);
        }
-
+out_unlock:
        unlock_extent(&inode->io_tree, lockstart, lockend);
 
        return ret;
@@ -1633,7 +1649,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
                        if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
                                                      BTRFS_INODE_PREALLOC)) &&
                            check_can_nocow(BTRFS_I(inode), pos,
-                                       &write_bytes) > 0) {
+                                           &write_bytes, false) > 0) {
                                /*
                                 * For nodata cow case, no need to reserve
                                 * data space.
@@ -1904,13 +1920,25 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
        pos = iocb->ki_pos;
        count = iov_iter_count(from);
        if (iocb->ki_flags & IOCB_NOWAIT) {
+               size_t nocow_bytes = count;
+
                /*
                 * We will allocate space in case nodatacow is not set,
                 * so bail
                 */
                if (!(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
                                              BTRFS_INODE_PREALLOC)) ||
-                   check_can_nocow(BTRFS_I(inode), pos, &count) <= 0) {
+                   check_can_nocow(BTRFS_I(inode), pos, &nocow_bytes,
+                                   true) <= 0) {
+                       inode_unlock(inode);
+                       return -EAGAIN;
+               }
+               /*
+                * There are holes in the range or parts of the range that must
+                * be COWed (shared extents, RO block groups, etc), so just bail
+                * out.
+                */
+               if (nocow_bytes < count) {
                        inode_unlock(inode);
                        return -EAGAIN;
                }
@@ -3481,6 +3509,7 @@ const struct file_operations btrfs_file_operations = {
        .read_iter      = generic_file_read_iter,
        .splice_read    = generic_file_splice_read,
        .write_iter     = btrfs_file_write_iter,
+       .splice_write   = iter_file_splice_write,
        .mmap           = btrfs_file_mmap,
        .open           = btrfs_file_open,
        .release        = btrfs_release_file,
index d04c82c..43c803c 100644 (file)
@@ -985,6 +985,7 @@ static noinline int cow_file_range(struct inode *inode,
        u64 num_bytes;
        unsigned long ram_size;
        u64 cur_alloc_size = 0;
+       u64 min_alloc_size;
        u64 blocksize = fs_info->sectorsize;
        struct btrfs_key ins;
        struct extent_map *em;
@@ -1035,10 +1036,26 @@ static noinline int cow_file_range(struct inode *inode,
        btrfs_drop_extent_cache(BTRFS_I(inode), start,
                        start + num_bytes - 1, 0);
 
+       /*
+        * Relocation relies on the relocated extents to have exactly the same
+        * size as the original extents. Normally writeback for relocation data
+        * extents follows a NOCOW path because relocation preallocates the
+        * extents. However, due to an operation such as scrub turning a block
+        * group to RO mode, it may fallback to COW mode, so we must make sure
+        * an extent allocated during COW has exactly the requested size and can
+        * not be split into smaller extents, otherwise relocation breaks and
+        * fails during the stage where it updates the bytenr of file extent
+        * items.
+        */
+       if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
+               min_alloc_size = num_bytes;
+       else
+               min_alloc_size = fs_info->sectorsize;
+
        while (num_bytes > 0) {
                cur_alloc_size = num_bytes;
                ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
-                                          fs_info->sectorsize, 0, alloc_hint,
+                                          min_alloc_size, 0, alloc_hint,
                                           &ins, 1, 1);
                if (ret < 0)
                        goto out_unlock;
@@ -1361,6 +1378,8 @@ static int fallback_to_cow(struct inode *inode, struct page *locked_page,
                           int *page_started, unsigned long *nr_written)
 {
        const bool is_space_ino = btrfs_is_free_space_inode(BTRFS_I(inode));
+       const bool is_reloc_ino = (BTRFS_I(inode)->root->root_key.objectid ==
+                                  BTRFS_DATA_RELOC_TREE_OBJECTID);
        const u64 range_bytes = end + 1 - start;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
        u64 range_start = start;
@@ -1391,18 +1410,23 @@ static int fallback_to_cow(struct inode *inode, struct page *locked_page,
         *    data space info, which we incremented in the step above.
         *
         * If we need to fallback to cow and the inode corresponds to a free
-        * space cache inode, we must also increment bytes_may_use of the data
-        * space_info for the same reason. Space caches always get a prealloc
+        * space cache inode or an inode of the data relocation tree, we must
+        * also increment bytes_may_use of the data space_info for the same
+        * reason. Space caches and relocated data extents always get a prealloc
         * extent for them, however scrub or balance may have set the block
-        * group that contains that extent to RO mode.
+        * group that contains that extent to RO mode and therefore force COW
+        * when starting writeback.
         */
        count = count_range_bits(io_tree, &range_start, end, range_bytes,
                                 EXTENT_NORESERVE, 0);
-       if (count > 0 || is_space_ino) {
-               const u64 bytes = is_space_ino ? range_bytes : count;
+       if (count > 0 || is_space_ino || is_reloc_ino) {
+               u64 bytes = count;
                struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
                struct btrfs_space_info *sinfo = fs_info->data_sinfo;
 
+               if (is_space_ino || is_reloc_ino)
+                       bytes = range_bytes;
+
                spin_lock(&sinfo->lock);
                btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes);
                spin_unlock(&sinfo->lock);
@@ -1666,12 +1690,8 @@ out_check:
                        ret = fallback_to_cow(inode, locked_page, cow_start,
                                              found_key.offset - 1,
                                              page_started, nr_written);
-                       if (ret) {
-                               if (nocow)
-                                       btrfs_dec_nocow_writers(fs_info,
-                                                               disk_bytenr);
+                       if (ret)
                                goto error;
-                       }
                        cow_start = (u64)-1;
                }
 
@@ -1687,9 +1707,6 @@ out_check:
                                          ram_bytes, BTRFS_COMPRESS_NONE,
                                          BTRFS_ORDERED_PREALLOC);
                        if (IS_ERR(em)) {
-                               if (nocow)
-                                       btrfs_dec_nocow_writers(fs_info,
-                                                               disk_bytenr);
                                ret = PTR_ERR(em);
                                goto error;
                        }
@@ -7865,9 +7882,6 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
                        dio_data.overwrite = 1;
                        inode_unlock(inode);
                        relock = true;
-               } else if (iocb->ki_flags & IOCB_NOWAIT) {
-                       ret = -EAGAIN;
-                       goto out;
                }
                ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
                                                   offset, count);
index 168deb8..e8f7c5f 100644 (file)
@@ -2692,7 +2692,7 @@ out:
        btrfs_put_root(root);
 out_free:
        btrfs_free_path(path);
-       kzfree(subvol_info);
+       kfree(subvol_info);
        return ret;
 }
 
index 7887317..af92525 100644 (file)
@@ -509,7 +509,7 @@ static int process_leaf(struct btrfs_root *root,
                switch (key.type) {
                case BTRFS_EXTENT_ITEM_KEY:
                        *num_bytes = key.offset;
-                       /* fall through */
+                       fallthrough;
                case BTRFS_METADATA_ITEM_KEY:
                        *bytenr = key.objectid;
                        ret = process_extent_item(fs_info, path, &key, i,
index 41ee886..c7bd3fd 100644 (file)
@@ -879,8 +879,8 @@ static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
                return false;
        }
        global_rsv->reserved -= ticket->bytes;
+       remove_ticket(space_info, ticket);
        ticket->bytes = 0;
-       list_del_init(&ticket->list);
        wake_up(&ticket->wait);
        space_info->tickets_id++;
        if (global_rsv->reserved < global_rsv->size)
index bc73fd6..c3826ae 100644 (file)
@@ -523,7 +523,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                case Opt_compress_force:
                case Opt_compress_force_type:
                        compress_force = true;
-                       /* Fallthrough */
+                       fallthrough;
                case Opt_compress:
                case Opt_compress_type:
                        saved_compress_type = btrfs_test_opt(info,
@@ -622,7 +622,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                        btrfs_set_opt(info->mount_opt, NOSSD);
                        btrfs_clear_and_info(info, SSD,
                                             "not using ssd optimizations");
-                       /* Fallthrough */
+                       fallthrough;
                case Opt_nossd_spread:
                        btrfs_clear_and_info(info, SSD_SPREAD,
                                             "not using spread ssd allocation scheme");
@@ -793,7 +793,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                case Opt_recovery:
                        btrfs_warn(info,
                                   "'recovery' is deprecated, use 'usebackuproot' instead");
-                       /* fall through */
+                       fallthrough;
                case Opt_usebackuproot:
                        btrfs_info(info,
                                   "trying to use backup root at mount time");
index 920cee3..cd5348f 100644 (file)
@@ -169,6 +169,7 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
                if (ret)
                        goto out;
 
+               set_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
                clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
                root->log_start_pid = current->pid;
        }
@@ -195,6 +196,9 @@ static int join_running_log_trans(struct btrfs_root *root)
 {
        int ret = -ENOENT;
 
+       if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state))
+               return ret;
+
        mutex_lock(&root->log_mutex);
        if (root->log_root) {
                ret = 0;
@@ -3303,6 +3307,7 @@ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
        if (root->log_root) {
                free_log_tree(trans, root->log_root);
                root->log_root = NULL;
+               clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
        }
        return 0;
 }
index f067b59..75af233 100644 (file)
@@ -408,7 +408,7 @@ static inline enum btrfs_map_op btrfs_op(struct bio *bio)
                return BTRFS_MAP_WRITE;
        default:
                WARN_ON_ONCE(1);
-               /* fall through */
+               fallthrough;
        case REQ_OP_READ:
                return BTRFS_MAP_READ;
        }
index e7726f5..3080cda 100644 (file)
@@ -937,7 +937,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
        }
 
        data = kmap(page);
-       ret = __kernel_write(file, data, len, &pos);
+       ret = kernel_write(file, data, len, &pos);
        kunmap(page);
        fput(file);
        if (ret != len)
index fc98b97..53588d7 100644 (file)
@@ -399,6 +399,10 @@ skip_rdma:
                        if (ses->sign)
                                seq_puts(m, " signed");
 
+                       seq_printf(m, "\n\tUser: %d Cred User: %d",
+                                  from_kuid(&init_user_ns, ses->linux_uid),
+                                  from_kuid(&init_user_ns, ses->cred_uid));
+
                        if (ses->chan_count > 1) {
                                seq_printf(m, "\n\n\tExtra Channels: %zu\n",
                                           ses->chan_count-1);
@@ -406,7 +410,7 @@ skip_rdma:
                                        cifs_dump_channel(m, j, &ses->chans[j]);
                        }
 
-                       seq_puts(m, "\n\tShares:");
+                       seq_puts(m, "\n\n\tShares:");
                        j = 0;
 
                        seq_printf(m, "\n\t%d) IPC: ", j);
index c7a311d..99b3180 100644 (file)
@@ -156,5 +156,5 @@ extern int cifs_truncate_page(struct address_space *mapping, loff_t from);
 extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "2.27"
+#define CIFS_VERSION   "2.28"
 #endif                         /* _CIFSFS_H */
index 5fac34f..a61abde 100644 (file)
@@ -5306,9 +5306,15 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
        vol_info->nocase = master_tcon->nocase;
        vol_info->nohandlecache = master_tcon->nohandlecache;
        vol_info->local_lease = master_tcon->local_lease;
+       vol_info->no_lease = master_tcon->no_lease;
+       vol_info->resilient = master_tcon->use_resilient;
+       vol_info->persistent = master_tcon->use_persistent;
+       vol_info->handle_timeout = master_tcon->handle_timeout;
        vol_info->no_linux_ext = !master_tcon->unix_ext;
+       vol_info->linux_ext = master_tcon->posix_extensions;
        vol_info->sectype = master_tcon->ses->sectype;
        vol_info->sign = master_tcon->ses->sign;
+       vol_info->seal = master_tcon->seal;
 
        rc = cifs_set_vol_auth(vol_info, master_tcon->ses);
        if (rc) {
@@ -5334,10 +5340,6 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
                goto out;
        }
 
-       /* if new SMB3.11 POSIX extensions are supported do not remap / and \ */
-       if (tcon->posix_extensions)
-               cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
-
        if (cap_unix(ses))
                reset_cifs_unix_caps(0, tcon, NULL, vol_info);
 
index 4fe757c..be46fab 100644 (file)
@@ -1149,20 +1149,20 @@ cifs_posix_lock_test(struct file *file, struct file_lock *flock)
 
 /*
  * Set the byte-range lock (posix style). Returns:
- * 1) 0, if we set the lock and don't need to request to the server;
- * 2) 1, if we need to request to the server;
- * 3) <0, if the error occurs while setting the lock.
+ * 1) <0, if the error occurs while setting the lock;
+ * 2) 0, if we set the lock and don't need to request to the server;
+ * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
+ * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
  */
 static int
 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
 {
        struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
-       int rc = 1;
+       int rc = FILE_LOCK_DEFERRED + 1;
 
        if ((flock->fl_flags & FL_POSIX) == 0)
                return rc;
 
-try_again:
        cifs_down_write(&cinode->lock_sem);
        if (!cinode->can_cache_brlcks) {
                up_write(&cinode->lock_sem);
@@ -1171,13 +1171,6 @@ try_again:
 
        rc = posix_lock_file(file, flock, NULL);
        up_write(&cinode->lock_sem);
-       if (rc == FILE_LOCK_DEFERRED) {
-               rc = wait_event_interruptible(flock->fl_wait,
-                                       list_empty(&flock->fl_blocked_member));
-               if (!rc)
-                       goto try_again;
-               locks_delete_block(flock);
-       }
        return rc;
 }
 
@@ -1652,7 +1645,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
                int posix_lock_type;
 
                rc = cifs_posix_lock_set(file, flock);
-               if (!rc || rc < 0)
+               if (rc <= FILE_LOCK_DEFERRED)
                        return rc;
 
                if (type & server->vals->shared_lock_type)
@@ -4336,7 +4329,8 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
                        break;
 
                __SetPageLocked(page);
-               if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
+               rc = add_to_page_cache_locked(page, mapping, page->index, gfp);
+               if (rc) {
                        __ClearPageLocked(page);
                        break;
                }
@@ -4352,6 +4346,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
        struct list_head *page_list, unsigned num_pages)
 {
        int rc;
+       int err = 0;
        struct list_head tmplist;
        struct cifsFileInfo *open_file = file->private_data;
        struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
@@ -4396,7 +4391,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
         * the order of declining indexes. When we put the pages in
         * the rdata->pages, then we want them in increasing order.
         */
-       while (!list_empty(page_list)) {
+       while (!list_empty(page_list) && !err) {
                unsigned int i, nr_pages, bytes, rsize;
                loff_t offset;
                struct page *page, *tpage;
@@ -4429,9 +4424,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
                        return 0;
                }
 
-               rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
+               nr_pages = 0;
+               err = readpages_get_pages(mapping, page_list, rsize, &tmplist,
                                         &nr_pages, &offset, &bytes);
-               if (rc) {
+               if (!nr_pages) {
                        add_credits_and_wake_if(server, credits, 0);
                        break;
                }
index 583f5e4..49c3ea8 100644 (file)
@@ -2044,6 +2044,7 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry,
        FILE_UNIX_BASIC_INFO *info_buf_target;
        unsigned int xid;
        int rc, tmprc;
+       bool new_target = d_really_is_negative(target_dentry);
 
        if (flags & ~RENAME_NOREPLACE)
                return -EINVAL;
@@ -2120,8 +2121,13 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry,
         */
 
 unlink_target:
-       /* Try unlinking the target dentry if it's not negative */
-       if (d_really_is_positive(target_dentry) && (rc == -EACCES || rc == -EEXIST)) {
+       /*
+        * If the target dentry was created during the rename, try
+        * unlinking it if it's not negative
+        */
+       if (new_target &&
+           d_really_is_positive(target_dentry) &&
+           (rc == -EACCES || rc == -EEXIST)) {
                if (d_is_dir(target_dentry))
                        tmprc = cifs_rmdir(target_dir, target_dentry);
                else
@@ -2535,6 +2541,15 @@ set_size_out:
        if (rc == 0) {
                cifsInode->server_eof = attrs->ia_size;
                cifs_setsize(inode, attrs->ia_size);
+
+               /*
+                * The man page of truncate says if the size changed,
+                * then the st_ctime and st_mtime fields for the file
+                * are updated.
+                */
+               attrs->ia_ctime = attrs->ia_mtime = current_time(inode);
+               attrs->ia_valid |= ATTR_CTIME | ATTR_MTIME;
+
                cifs_truncate_page(inode->i_mapping, inode->i_size);
        }
 
index 4a73e63..dcde44f 100644 (file)
@@ -169,6 +169,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
        unsigned int xid;
        struct cifsFileInfo *pSMBFile = filep->private_data;
        struct cifs_tcon *tcon;
+       struct tcon_link *tlink;
        struct cifs_sb_info *cifs_sb;
        __u64   ExtAttrBits = 0;
        __u64   caps;
@@ -307,13 +308,19 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
                                break;
                        }
                        cifs_sb = CIFS_SB(inode->i_sb);
-                       tcon = tlink_tcon(cifs_sb_tlink(cifs_sb));
+                       tlink = cifs_sb_tlink(cifs_sb);
+                       if (IS_ERR(tlink)) {
+                               rc = PTR_ERR(tlink);
+                               break;
+                       }
+                       tcon = tlink_tcon(tlink);
                        if (tcon && tcon->ses->server->ops->notify) {
                                rc = tcon->ses->server->ops->notify(xid,
                                                filep, (void __user *)arg);
                                cifs_dbg(FYI, "ioctl notify rc %d\n", rc);
                        } else
                                rc = -EOPNOTSUPP;
+                       cifs_put_tlink(tlink);
                        break;
                default:
                        cifs_dbg(FYI, "unsupported ioctl\n");
index 56791a6..e44d049 100644 (file)
@@ -844,28 +844,26 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
        struct bio_vec *bv = NULL;
 
        if (iov_iter_is_kvec(iter)) {
-               memcpy(&ctx->iter, iter, sizeof(struct iov_iter));
+               memcpy(&ctx->iter, iter, sizeof(*iter));
                ctx->len = count;
                iov_iter_advance(iter, count);
                return 0;
        }
 
-       if (max_pages * sizeof(struct bio_vec) <= CIFS_AIO_KMALLOC_LIMIT)
-               bv = kmalloc_array(max_pages, sizeof(struct bio_vec),
-                                  GFP_KERNEL);
+       if (array_size(max_pages, sizeof(*bv)) <= CIFS_AIO_KMALLOC_LIMIT)
+               bv = kmalloc_array(max_pages, sizeof(*bv), GFP_KERNEL);
 
        if (!bv) {
-               bv = vmalloc(array_size(max_pages, sizeof(struct bio_vec)));
+               bv = vmalloc(array_size(max_pages, sizeof(*bv)));
                if (!bv)
                        return -ENOMEM;
        }
 
-       if (max_pages * sizeof(struct page *) <= CIFS_AIO_KMALLOC_LIMIT)
-               pages = kmalloc_array(max_pages, sizeof(struct page *),
-                                     GFP_KERNEL);
+       if (array_size(max_pages, sizeof(*pages)) <= CIFS_AIO_KMALLOC_LIMIT)
+               pages = kmalloc_array(max_pages, sizeof(*pages), GFP_KERNEL);
 
        if (!pages) {
-               pages = vmalloc(array_size(max_pages, sizeof(struct page *)));
+               pages = vmalloc(array_size(max_pages, sizeof(*pages)));
                if (!pages) {
                        kvfree(bv);
                        return -ENOMEM;
index 6a39451..1579928 100644 (file)
@@ -354,9 +354,13 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_sync_hdr *shdr)
                  ((struct smb2_ioctl_rsp *)shdr)->OutputCount);
                break;
        case SMB2_CHANGE_NOTIFY:
+               *off = le16_to_cpu(
+                 ((struct smb2_change_notify_rsp *)shdr)->OutputBufferOffset);
+               *len = le32_to_cpu(
+                 ((struct smb2_change_notify_rsp *)shdr)->OutputBufferLength);
+               break;
        default:
-               /* BB FIXME for unimplemented cases above */
-               cifs_dbg(VFS, "no length check for command\n");
+               cifs_dbg(VFS, "no length check for command %d\n", le16_to_cpu(shdr->Command));
                break;
        }
 
index 736d86b..32f90dc 100644 (file)
@@ -763,6 +763,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
                        /* close extra handle outside of crit sec */
                        SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
                }
+               rc = 0;
                goto oshr_free;
        }
 
@@ -2147,7 +2148,7 @@ smb3_notify(const unsigned int xid, struct file *pfile,
 
        tcon = cifs_sb_master_tcon(cifs_sb);
        oparms.tcon = tcon;
-       oparms.desired_access = FILE_READ_ATTRIBUTES;
+       oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
        oparms.disposition = FILE_OPEN;
        oparms.create_options = cifs_create_options(cifs_sb, 0);
        oparms.fid = &fid;
@@ -3187,6 +3188,11 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
        trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
                              ses->Suid, offset, len);
 
+       /*
+        * We zero the range through ioctl, so we need remove the page caches
+        * first, otherwise the data may be inconsistent with the server.
+        */
+       truncate_pagecache_range(inode, offset, offset + len - 1);
 
        /* if file not oplocked can't be sure whether asking to extend size */
        if (!CIFS_CACHE_READ(cifsi))
@@ -3253,6 +3259,12 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
                return rc;
        }
 
+       /*
+        * We implement the punch hole through ioctl, so we need remove the page
+        * caches first, otherwise the data may be inconsistent with the server.
+        */
+       truncate_pagecache_range(inode, offset, offset + len - 1);
+
        cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
 
        fsctl_buf.FileOffset = cpu_to_le64(offset);
index d11e310..84433d0 100644 (file)
@@ -523,7 +523,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
                      const int timeout, const int flags,
                      unsigned int *instance)
 {
-       int rc;
+       long rc;
        int *credits;
        int optype;
        long int t;
index e9e27a2..feaa5e1 100644 (file)
@@ -51,6 +51,7 @@ static ssize_t efivarfs_file_write(struct file *file,
        } else {
                inode_lock(inode);
                i_size_write(inode, datasize + sizeof(attributes));
+               inode->i_mtime = current_time(inode);
                inode_unlock(inode);
        }
 
@@ -72,10 +73,8 @@ static ssize_t efivarfs_file_read(struct file *file, char __user *userbuf,
        ssize_t size = 0;
        int err;
 
-       while (!__ratelimit(&file->f_cred->user->ratelimit)) {
-               if (!msleep_interruptible(50))
-                       return -EINTR;
-       }
+       while (!__ratelimit(&file->f_cred->user->ratelimit))
+               msleep(50);
 
        err = efivar_entry_size(var, &datasize);
 
index 7824f55..9b66c28 100644 (file)
@@ -144,22 +144,22 @@ static inline void z_erofs_onlinepage_init(struct page *page)
 static inline void z_erofs_onlinepage_fixup(struct page *page,
        uintptr_t index, bool down)
 {
-       unsigned long *p, o, v, id;
-repeat:
-       p = &page_private(page);
-       o = READ_ONCE(*p);
+       union z_erofs_onlinepage_converter u = { .v = &page_private(page) };
+       int orig, orig_index, val;
 
-       id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
-       if (id) {
+repeat:
+       orig = atomic_read(u.o);
+       orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
+       if (orig_index) {
                if (!index)
                        return;
 
-               DBG_BUGON(id != index);
+               DBG_BUGON(orig_index != index);
        }
 
-       v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
-               ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
-       if (cmpxchg(p, o, v) != o)
+       val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
+               ((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
+       if (atomic_cmpxchg(u.o, orig, val) != orig)
                goto repeat;
 }
 
index de43534..91ece64 100644 (file)
@@ -309,7 +309,7 @@ const struct file_operations exfat_dir_operations = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
        .iterate        = exfat_iterate,
-       .fsync          = generic_file_fsync,
+       .fsync          = exfat_file_fsync,
 };
 
 int exfat_alloc_new_dir(struct inode *inode, struct exfat_chain *clu)
@@ -425,10 +425,12 @@ static void exfat_init_name_entry(struct exfat_dentry *ep,
        ep->dentry.name.flags = 0x0;
 
        for (i = 0; i < EXFAT_FILE_NAME_LEN; i++) {
-               ep->dentry.name.unicode_0_14[i] = cpu_to_le16(*uniname);
-               if (*uniname == 0x0)
-                       break;
-               uniname++;
+               if (*uniname != 0x0) {
+                       ep->dentry.name.unicode_0_14[i] = cpu_to_le16(*uniname);
+                       uniname++;
+               } else {
+                       ep->dentry.name.unicode_0_14[i] = 0x0;
+               }
        }
 }
 
index 595f311..7579cd3 100644 (file)
@@ -420,6 +420,7 @@ void exfat_truncate(struct inode *inode, loff_t size);
 int exfat_setattr(struct dentry *dentry, struct iattr *attr);
 int exfat_getattr(const struct path *path, struct kstat *stat,
                unsigned int request_mask, unsigned int query_flags);
+int exfat_file_fsync(struct file *file, loff_t start, loff_t end, int datasync);
 
 /* namei.c */
 extern const struct dentry_operations exfat_dentry_ops;
index fce03f3..3b7fea4 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/slab.h>
 #include <linux/cred.h>
 #include <linux/buffer_head.h>
+#include <linux/blkdev.h>
 
 #include "exfat_raw.h"
 #include "exfat_fs.h"
@@ -346,12 +347,28 @@ out:
        return error;
 }
 
+int exfat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
+{
+       struct inode *inode = filp->f_mapping->host;
+       int err;
+
+       err = __generic_file_fsync(filp, start, end, datasync);
+       if (err)
+               return err;
+
+       err = sync_blockdev(inode->i_sb->s_bdev);
+       if (err)
+               return err;
+
+       return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
+}
+
 const struct file_operations exfat_file_operations = {
        .llseek         = generic_file_llseek,
        .read_iter      = generic_file_read_iter,
        .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
-       .fsync          = generic_file_fsync,
+       .fsync          = exfat_file_fsync,
        .splice_read    = generic_file_splice_read,
        .splice_write   = iter_file_splice_write,
 };
index 5b0f353..2b9e210 100644 (file)
@@ -975,7 +975,6 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
                goto unlock;
        }
 
-       exfat_set_vol_flags(sb, VOL_DIRTY);
        exfat_chain_set(&clu_to_free, ei->start_clu,
                EXFAT_B_TO_CLU_ROUND_UP(i_size_read(inode), sbi), ei->flags);
 
@@ -1002,6 +1001,7 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
        num_entries++;
        brelse(bh);
 
+       exfat_set_vol_flags(sb, VOL_DIRTY);
        err = exfat_remove_entries(dir, &cdir, entry, 0, num_entries);
        if (err) {
                exfat_err(sb, "failed to exfat_remove_entries : err(%d)", err);
@@ -1077,10 +1077,14 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir,
 
                epold = exfat_get_dentry(sb, p_dir, oldentry + 1, &old_bh,
                        &sector_old);
+               if (!epold)
+                       return -EIO;
                epnew = exfat_get_dentry(sb, p_dir, newentry + 1, &new_bh,
                        &sector_new);
-               if (!epold || !epnew)
+               if (!epnew) {
+                       brelse(old_bh);
                        return -EIO;
+               }
 
                memcpy(epnew, epold, DENTRY_SIZE);
                exfat_update_bh(sb, new_bh, sync);
@@ -1161,10 +1165,14 @@ static int exfat_move_file(struct inode *inode, struct exfat_chain *p_olddir,
 
        epmov = exfat_get_dentry(sb, p_olddir, oldentry + 1, &mov_bh,
                &sector_mov);
+       if (!epmov)
+               return -EIO;
        epnew = exfat_get_dentry(sb, p_newdir, newentry + 1, &new_bh,
                &sector_new);
-       if (!epmov || !epnew)
+       if (!epnew) {
+               brelse(mov_bh);
                return -EIO;
+       }
 
        memcpy(epnew, epmov, DENTRY_SIZE);
        exfat_update_bh(sb, new_bh, IS_DIRSYNC(inode));
index e650e65..253a924 100644 (file)
@@ -693,10 +693,20 @@ static void exfat_free(struct fs_context *fc)
        }
 }
 
+static int exfat_reconfigure(struct fs_context *fc)
+{
+       fc->sb_flags |= SB_NODIRATIME;
+
+       /* volume flag will be updated in exfat_sync_fs */
+       sync_filesystem(fc->root->d_sb);
+       return 0;
+}
+
 static const struct fs_context_operations exfat_context_ops = {
        .parse_param    = exfat_parse_param,
        .get_tree       = exfat_get_tree,
        .free           = exfat_free,
+       .reconfigure    = exfat_reconfigure,
 };
 
 static int exfat_init_fs_context(struct fs_context *fc)
index e573b0c..83d917f 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/swap.h>
 #include <linux/falloc.h>
 #include <linux/uio.h>
+#include <linux/fs.h>
 
 static struct page **fuse_pages_alloc(unsigned int npages, gfp_t flags,
                                      struct fuse_page_desc **desc)
@@ -1586,7 +1587,6 @@ static void fuse_writepage_finish(struct fuse_conn *fc,
        struct backing_dev_info *bdi = inode_to_bdi(inode);
        int i;
 
-       rb_erase(&wpa->writepages_entry, &fi->writepages);
        for (i = 0; i < ap->num_pages; i++) {
                dec_wb_stat(&bdi->wb, WB_WRITEBACK);
                dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP);
@@ -1637,6 +1637,7 @@ __acquires(fi->lock)
 
  out_free:
        fi->writectr--;
+       rb_erase(&wpa->writepages_entry, &fi->writepages);
        fuse_writepage_finish(fc, wpa);
        spin_unlock(&fi->lock);
 
@@ -1674,7 +1675,8 @@ __acquires(fi->lock)
        }
 }
 
-static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa)
+static struct fuse_writepage_args *fuse_insert_writeback(struct rb_root *root,
+                                               struct fuse_writepage_args *wpa)
 {
        pgoff_t idx_from = wpa->ia.write.in.offset >> PAGE_SHIFT;
        pgoff_t idx_to = idx_from + wpa->ia.ap.num_pages - 1;
@@ -1697,11 +1699,17 @@ static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa)
                else if (idx_to < curr_index)
                        p = &(*p)->rb_left;
                else
-                       return (void) WARN_ON(true);
+                       return curr;
        }
 
        rb_link_node(&wpa->writepages_entry, parent, p);
        rb_insert_color(&wpa->writepages_entry, root);
+       return NULL;
+}
+
+static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa)
+{
+       WARN_ON(fuse_insert_writeback(root, wpa));
 }
 
 static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_args *args,
@@ -1714,6 +1722,7 @@ static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_args *args,
 
        mapping_set_error(inode->i_mapping, error);
        spin_lock(&fi->lock);
+       rb_erase(&wpa->writepages_entry, &fi->writepages);
        while (wpa->next) {
                struct fuse_conn *fc = get_fuse_conn(inode);
                struct fuse_write_in *inarg = &wpa->ia.write.in;
@@ -1952,14 +1961,14 @@ static void fuse_writepages_send(struct fuse_fill_wb_data *data)
 }
 
 /*
- * First recheck under fi->lock if the offending offset is still under
- * writeback.  If yes, then iterate auxiliary write requests, to see if there's
+ * Check under fi->lock if the page is under writeback, and insert it onto the
+ * rb_tree if not. Otherwise iterate auxiliary write requests, to see if there's
  * one already added for a page at this offset.  If there's none, then insert
  * this new request onto the auxiliary list, otherwise reuse the existing one by
- * copying the new page contents over to the old temporary page.
+ * swapping the new temp page with the old one.
  */
-static bool fuse_writepage_in_flight(struct fuse_writepage_args *new_wpa,
-                                    struct page *page)
+static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa,
+                              struct page *page)
 {
        struct fuse_inode *fi = get_fuse_inode(new_wpa->inode);
        struct fuse_writepage_args *tmp;
@@ -1967,17 +1976,15 @@ static bool fuse_writepage_in_flight(struct fuse_writepage_args *new_wpa,
        struct fuse_args_pages *new_ap = &new_wpa->ia.ap;
 
        WARN_ON(new_ap->num_pages != 0);
+       new_ap->num_pages = 1;
 
        spin_lock(&fi->lock);
-       rb_erase(&new_wpa->writepages_entry, &fi->writepages);
-       old_wpa = fuse_find_writeback(fi, page->index, page->index);
+       old_wpa = fuse_insert_writeback(&fi->writepages, new_wpa);
        if (!old_wpa) {
-               tree_insert(&fi->writepages, new_wpa);
                spin_unlock(&fi->lock);
-               return false;
+               return true;
        }
 
-       new_ap->num_pages = 1;
        for (tmp = old_wpa->next; tmp; tmp = tmp->next) {
                pgoff_t curr_index;
 
@@ -2006,7 +2013,41 @@ static bool fuse_writepage_in_flight(struct fuse_writepage_args *new_wpa,
                fuse_writepage_free(new_wpa);
        }
 
-       return true;
+       return false;
+}
+
+static bool fuse_writepage_need_send(struct fuse_conn *fc, struct page *page,
+                                    struct fuse_args_pages *ap,
+                                    struct fuse_fill_wb_data *data)
+{
+       WARN_ON(!ap->num_pages);
+
+       /*
+        * Being under writeback is unlikely but possible.  For example direct
+        * read to an mmaped fuse file will set the page dirty twice; once when
+        * the pages are faulted with get_user_pages(), and then after the read
+        * completed.
+        */
+       if (fuse_page_is_writeback(data->inode, page->index))
+               return true;
+
+       /* Reached max pages */
+       if (ap->num_pages == fc->max_pages)
+               return true;
+
+       /* Reached max write bytes */
+       if ((ap->num_pages + 1) * PAGE_SIZE > fc->max_write)
+               return true;
+
+       /* Discontinuity */
+       if (data->orig_pages[ap->num_pages - 1]->index + 1 != page->index)
+               return true;
+
+       /* Need to grow the pages array?  If so, did the expansion fail? */
+       if (ap->num_pages == data->max_pages && !fuse_pages_realloc(data))
+               return true;
+
+       return false;
 }
 
 static int fuse_writepages_fill(struct page *page,
@@ -2019,7 +2060,6 @@ static int fuse_writepages_fill(struct page *page,
        struct fuse_inode *fi = get_fuse_inode(inode);
        struct fuse_conn *fc = get_fuse_conn(inode);
        struct page *tmp_page;
-       bool is_writeback;
        int err;
 
        if (!data->ff) {
@@ -2029,25 +2069,9 @@ static int fuse_writepages_fill(struct page *page,
                        goto out_unlock;
        }
 
-       /*
-        * Being under writeback is unlikely but possible.  For example direct
-        * read to an mmaped fuse file will set the page dirty twice; once when
-        * the pages are faulted with get_user_pages(), and then after the read
-        * completed.
-        */
-       is_writeback = fuse_page_is_writeback(inode, page->index);
-
-       if (wpa && ap->num_pages &&
-           (is_writeback || ap->num_pages == fc->max_pages ||
-            (ap->num_pages + 1) * PAGE_SIZE > fc->max_write ||
-            data->orig_pages[ap->num_pages - 1]->index + 1 != page->index)) {
+       if (wpa && fuse_writepage_need_send(fc, page, ap, data)) {
                fuse_writepages_send(data);
                data->wpa = NULL;
-       } else if (wpa && ap->num_pages == data->max_pages) {
-               if (!fuse_pages_realloc(data)) {
-                       fuse_writepages_send(data);
-                       data->wpa = NULL;
-               }
        }
 
        err = -ENOMEM;
@@ -2085,12 +2109,6 @@ static int fuse_writepages_fill(struct page *page,
                ap->args.end = fuse_writepage_end;
                ap->num_pages = 0;
                wpa->inode = inode;
-
-               spin_lock(&fi->lock);
-               tree_insert(&fi->writepages, wpa);
-               spin_unlock(&fi->lock);
-
-               data->wpa = wpa;
        }
        set_page_writeback(page);
 
@@ -2098,26 +2116,25 @@ static int fuse_writepages_fill(struct page *page,
        ap->pages[ap->num_pages] = tmp_page;
        ap->descs[ap->num_pages].offset = 0;
        ap->descs[ap->num_pages].length = PAGE_SIZE;
+       data->orig_pages[ap->num_pages] = page;
 
        inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
        inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
 
        err = 0;
-       if (is_writeback && fuse_writepage_in_flight(wpa, page)) {
+       if (data->wpa) {
+               /*
+                * Protected by fi->lock against concurrent access by
+                * fuse_page_is_writeback().
+                */
+               spin_lock(&fi->lock);
+               ap->num_pages++;
+               spin_unlock(&fi->lock);
+       } else if (fuse_writepage_add(wpa, page)) {
+               data->wpa = wpa;
+       } else {
                end_page_writeback(page);
-               data->wpa = NULL;
-               goto out_unlock;
        }
-       data->orig_pages[ap->num_pages] = page;
-
-       /*
-        * Protected by fi->lock against concurrent access by
-        * fuse_page_is_writeback().
-        */
-       spin_lock(&fi->lock);
-       ap->num_pages++;
-       spin_unlock(&fi->lock);
-
 out_unlock:
        unlock_page(page);
 
@@ -2149,10 +2166,8 @@ static int fuse_writepages(struct address_space *mapping,
 
        err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data);
        if (data.wpa) {
-               /* Ignore errors if we can write at least one page */
                WARN_ON(!data.wpa->ia.ap.num_pages);
                fuse_writepages_send(&data);
-               err = 0;
        }
        if (data.ff)
                fuse_file_put(data.ff, false, false);
@@ -2761,7 +2776,16 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
                struct iovec *iov = iov_page;
 
                iov->iov_base = (void __user *)arg;
-               iov->iov_len = _IOC_SIZE(cmd);
+
+               switch (cmd) {
+               case FS_IOC_GETFLAGS:
+               case FS_IOC_SETFLAGS:
+                       iov->iov_len = sizeof(int);
+                       break;
+               default:
+                       iov->iov_len = _IOC_SIZE(cmd);
+                       break;
+               }
 
                if (_IOC_DIR(cmd) & _IOC_WRITE) {
                        in_iov = iov;
index 5b4aebf..bba7475 100644 (file)
@@ -121,10 +121,12 @@ static void fuse_evict_inode(struct inode *inode)
        }
 }
 
-static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
+static int fuse_reconfigure(struct fs_context *fc)
 {
+       struct super_block *sb = fc->root->d_sb;
+
        sync_filesystem(sb);
-       if (*flags & SB_MANDLOCK)
+       if (fc->sb_flags & SB_MANDLOCK)
                return -EINVAL;
 
        return 0;
@@ -475,6 +477,17 @@ static int fuse_parse_param(struct fs_context *fc, struct fs_parameter *param)
        struct fuse_fs_context *ctx = fc->fs_private;
        int opt;
 
+       if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+               /*
+                * Ignore options coming from mount(MS_REMOUNT) for backward
+                * compatibility.
+                */
+               if (fc->oldapi)
+                       return 0;
+
+               return invalfc(fc, "No changes allowed in reconfigure");
+       }
+
        opt = fs_parse(fc, fuse_fs_parameters, param, &result);
        if (opt < 0)
                return opt;
@@ -817,7 +830,6 @@ static const struct super_operations fuse_super_operations = {
        .evict_inode    = fuse_evict_inode,
        .write_inode    = fuse_write_inode,
        .drop_inode     = generic_delete_inode,
-       .remount_fs     = fuse_remount_fs,
        .put_super      = fuse_put_super,
        .umount_begin   = fuse_umount_begin,
        .statfs         = fuse_statfs,
@@ -1296,6 +1308,7 @@ static int fuse_get_tree(struct fs_context *fc)
 static const struct fs_context_operations fuse_context_ops = {
        .free           = fuse_free_fc,
        .parse_param    = fuse_parse_param,
+       .reconfigure    = fuse_reconfigure,
        .get_tree       = fuse_get_tree,
 };
 
index 72c9560..68cd700 100644 (file)
@@ -468,21 +468,10 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
 }
 
 
-/**
- * __gfs2_readpage - readpage
- * @file: The file to read a page for
- * @page: The page to read
- *
- * This is the core of gfs2's readpage. It's used by the internal file
- * reading code as in that case we already hold the glock. Also it's
- * called by gfs2_readpage() once the required lock has been granted.
- */
-
 static int __gfs2_readpage(void *file, struct page *page)
 {
        struct gfs2_inode *ip = GFS2_I(page->mapping->host);
        struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
-
        int error;
 
        if (i_blocksize(page->mapping->host) == PAGE_SIZE &&
@@ -505,36 +494,11 @@ static int __gfs2_readpage(void *file, struct page *page)
  * gfs2_readpage - read a page of a file
  * @file: The file to read
  * @page: The page of the file
- *
- * This deals with the locking required. We have to unlock and
- * relock the page in order to get the locking in the right
- * order.
  */
 
 static int gfs2_readpage(struct file *file, struct page *page)
 {
-       struct address_space *mapping = page->mapping;
-       struct gfs2_inode *ip = GFS2_I(mapping->host);
-       struct gfs2_holder gh;
-       int error;
-
-       unlock_page(page);
-       gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
-       error = gfs2_glock_nq(&gh);
-       if (unlikely(error))
-               goto out;
-       error = AOP_TRUNCATED_PAGE;
-       lock_page(page);
-       if (page->mapping == mapping && !PageUptodate(page))
-               error = __gfs2_readpage(file, page);
-       else
-               unlock_page(page);
-       gfs2_glock_dq(&gh);
-out:
-       gfs2_holder_uninit(&gh);
-       if (error && error != AOP_TRUNCATED_PAGE)
-               lock_page(page);
-       return error;
+       return __gfs2_readpage(file, page);
 }
 
 /**
@@ -598,16 +562,9 @@ static void gfs2_readahead(struct readahead_control *rac)
 {
        struct inode *inode = rac->mapping->host;
        struct gfs2_inode *ip = GFS2_I(inode);
-       struct gfs2_holder gh;
 
-       gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
-       if (gfs2_glock_nq(&gh))
-               goto out_uninit;
        if (!gfs2_is_stuffed(ip))
                mpage_readahead(rac, gfs2_block_map);
-       gfs2_glock_dq(&gh);
-out_uninit:
-       gfs2_holder_uninit(&gh);
 }
 
 /**
index fe305e4..bebde53 100644 (file)
@@ -558,8 +558,29 @@ out_uninit:
        return block_page_mkwrite_return(ret);
 }
 
+static vm_fault_t gfs2_fault(struct vm_fault *vmf)
+{
+       struct inode *inode = file_inode(vmf->vma->vm_file);
+       struct gfs2_inode *ip = GFS2_I(inode);
+       struct gfs2_holder gh;
+       vm_fault_t ret;
+       int err;
+
+       gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
+       err = gfs2_glock_nq(&gh);
+       if (err) {
+               ret = block_page_mkwrite_return(err);
+               goto out_uninit;
+       }
+       ret = filemap_fault(vmf);
+       gfs2_glock_dq(&gh);
+out_uninit:
+       gfs2_holder_uninit(&gh);
+       return ret;
+}
+
 static const struct vm_operations_struct gfs2_vm_ops = {
-       .fault = filemap_fault,
+       .fault = gfs2_fault,
        .map_pages = filemap_map_pages,
        .page_mkwrite = gfs2_page_mkwrite,
 };
@@ -824,6 +845,9 @@ out_uninit:
 
 static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
+       struct gfs2_inode *ip;
+       struct gfs2_holder gh;
+       size_t written = 0;
        ssize_t ret;
 
        if (iocb->ki_flags & IOCB_DIRECT) {
@@ -832,7 +856,31 @@ static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
                        return ret;
                iocb->ki_flags &= ~IOCB_DIRECT;
        }
-       return generic_file_read_iter(iocb, to);
+       iocb->ki_flags |= IOCB_NOIO;
+       ret = generic_file_read_iter(iocb, to);
+       iocb->ki_flags &= ~IOCB_NOIO;
+       if (ret >= 0) {
+               if (!iov_iter_count(to))
+                       return ret;
+               written = ret;
+       } else {
+               if (ret != -EAGAIN)
+                       return ret;
+               if (iocb->ki_flags & IOCB_NOWAIT)
+                       return ret;
+       }
+       ip = GFS2_I(iocb->ki_filp->f_mapping->host);
+       gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
+       ret = gfs2_glock_nq(&gh);
+       if (ret)
+               goto out_uninit;
+       ret = generic_file_read_iter(iocb, to);
+       if (ret > 0)
+               written += ret;
+       gfs2_glock_dq(&gh);
+out_uninit:
+       gfs2_holder_uninit(&gh);
+       return written ? written : ret;
 }
 
 /**
index 2299dcc..8545024 100644 (file)
@@ -1899,7 +1899,10 @@ bool gfs2_delete_work_queued(const struct gfs2_glock *gl)
 
 static void flush_delete_work(struct gfs2_glock *gl)
 {
-       flush_delayed_work(&gl->gl_delete);
+       if (cancel_delayed_work(&gl->gl_delete)) {
+               queue_delayed_work(gfs2_delete_workqueue,
+                                  &gl->gl_delete, 0);
+       }
        gfs2_glock_queue_work(gl, 0);
 }
 
index c848877..de1d5f1 100644 (file)
@@ -531,8 +531,7 @@ static int freeze_go_sync(struct gfs2_glock *gl)
        int error = 0;
        struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 
-       if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
-           test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
+       if (gl->gl_req == LM_ST_EXCLUSIVE && !gfs2_withdrawn(sdp)) {
                atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
                error = freeze_super(sdp->sd_vfs);
                if (error) {
@@ -545,8 +544,11 @@ static int freeze_go_sync(struct gfs2_glock *gl)
                        gfs2_assert_withdraw(sdp, 0);
                }
                queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
-               gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
-                              GFS2_LFC_FREEZE_GO_SYNC);
+               if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
+                       gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
+                                      GFS2_LFC_FREEZE_GO_SYNC);
+               else /* read-only mounts */
+                       atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
        }
        return 0;
 }
index 03ab11f..ca2ec02 100644 (file)
@@ -399,7 +399,6 @@ enum {
        GIF_QD_LOCKED           = 1,
        GIF_ALLOC_FAILED        = 2,
        GIF_SW_PAGED            = 3,
-       GIF_ORDERED             = 4,
        GIF_FREE_VFS_INODE      = 5,
        GIF_GLOP_PENDING        = 6,
        GIF_DEFERRED_DELETE     = 7,
index 370c3a4..6774865 100644 (file)
@@ -207,10 +207,11 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
 
        if (no_formal_ino && ip->i_no_formal_ino &&
            no_formal_ino != ip->i_no_formal_ino) {
+               error = -ESTALE;
                if (inode->i_state & I_NEW)
                        goto fail;
                iput(inode);
-               return ERR_PTR(-ESTALE);
+               return ERR_PTR(error);
        }
 
        if (inode->i_state & I_NEW)
index 3e47344..a76e55b 100644 (file)
@@ -613,6 +613,12 @@ static int ip_cmp(void *priv, struct list_head *a, struct list_head *b)
        return 0;
 }
 
+static void __ordered_del_inode(struct gfs2_inode *ip)
+{
+       if (!list_empty(&ip->i_ordered))
+               list_del_init(&ip->i_ordered);
+}
+
 static void gfs2_ordered_write(struct gfs2_sbd *sdp)
 {
        struct gfs2_inode *ip;
@@ -623,8 +629,7 @@ static void gfs2_ordered_write(struct gfs2_sbd *sdp)
        while (!list_empty(&sdp->sd_log_ordered)) {
                ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
                if (ip->i_inode.i_mapping->nrpages == 0) {
-                       test_and_clear_bit(GIF_ORDERED, &ip->i_flags);
-                       list_del(&ip->i_ordered);
+                       __ordered_del_inode(ip);
                        continue;
                }
                list_move(&ip->i_ordered, &written);
@@ -643,8 +648,7 @@ static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
        spin_lock(&sdp->sd_ordered_lock);
        while (!list_empty(&sdp->sd_log_ordered)) {
                ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
-               list_del(&ip->i_ordered);
-               WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags));
+               __ordered_del_inode(ip);
                if (ip->i_inode.i_mapping->nrpages == 0)
                        continue;
                spin_unlock(&sdp->sd_ordered_lock);
@@ -659,8 +663,7 @@ void gfs2_ordered_del_inode(struct gfs2_inode *ip)
        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 
        spin_lock(&sdp->sd_ordered_lock);
-       if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags))
-               list_del(&ip->i_ordered);
+       __ordered_del_inode(ip);
        spin_unlock(&sdp->sd_ordered_lock);
 }
 
@@ -1002,6 +1005,16 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
 
 out:
        if (gfs2_withdrawn(sdp)) {
+               /**
+                * If the tr_list is empty, we're withdrawing during a log
+                * flush that targets a transaction, but the transaction was
+                * never queued onto any of the ail lists. Here we add it to
+                * ail1 just so that ail_drain() will find and free it.
+                */
+               spin_lock(&sdp->sd_ail_lock);
+               if (tr && list_empty(&tr->tr_list))
+                       list_add(&tr->tr_list, &sdp->sd_ail1_list);
+               spin_unlock(&sdp->sd_ail_lock);
                ail_drain(sdp); /* frees all transactions */
                tr = NULL;
        }
index c1cd6ae..8965c75 100644 (file)
@@ -53,9 +53,9 @@ static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip)
        if (gfs2_is_jdata(ip) || !gfs2_is_ordered(sdp))
                return;
 
-       if (!test_bit(GIF_ORDERED, &ip->i_flags)) {
+       if (list_empty(&ip->i_ordered)) {
                spin_lock(&sdp->sd_ordered_lock);
-               if (!test_and_set_bit(GIF_ORDERED, &ip->i_flags))
+               if (list_empty(&ip->i_ordered))
                        list_add(&ip->i_ordered, &sdp->sd_log_ordered);
                spin_unlock(&sdp->sd_ordered_lock);
        }
index 733470c..c7393ee 100644 (file)
@@ -39,6 +39,7 @@ static void gfs2_init_inode_once(void *foo)
        atomic_set(&ip->i_sizehint, 0);
        init_rwsem(&ip->i_rw_mutex);
        INIT_LIST_HEAD(&ip->i_trunc_list);
+       INIT_LIST_HEAD(&ip->i_ordered);
        ip->i_qadata = NULL;
        gfs2_holder_mark_uninitialized(&ip->i_rgd_gh);
        memset(&ip->i_res, 0, sizeof(ip->i_res));
index 094f5fe..6d18d2c 100644 (file)
@@ -1136,7 +1136,18 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
                goto fail_per_node;
        }
 
-       if (!sb_rdonly(sb)) {
+       if (sb_rdonly(sb)) {
+               struct gfs2_holder freeze_gh;
+
+               error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+                                          LM_FLAG_NOEXP | GL_EXACT,
+                                          &freeze_gh);
+               if (error) {
+                       fs_err(sdp, "can't make FS RO: %d\n", error);
+                       goto fail_per_node;
+               }
+               gfs2_glock_dq_uninit(&freeze_gh);
+       } else {
                error = gfs2_make_fs_rw(sdp);
                if (error) {
                        fs_err(sdp, "can't make FS RW: %d\n", error);
index 96c345f..390ea79 100644 (file)
@@ -364,8 +364,8 @@ void gfs2_recover_func(struct work_struct *work)
                /* Acquire a shared hold on the freeze lock */
 
                error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
-                                          LM_FLAG_NOEXP | LM_FLAG_PRIORITY,
-                                          &thaw_gh);
+                                          LM_FLAG_NOEXP | LM_FLAG_PRIORITY |
+                                          GL_EXACT, &thaw_gh);
                if (error)
                        goto fail_gunlock_ji;
 
index 32d8d26..47d0ae1 100644 (file)
@@ -167,7 +167,8 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
        if (error)
                return error;
 
-       error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
+       error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+                                  LM_FLAG_NOEXP | GL_EXACT,
                                   &freeze_gh);
        if (error)
                goto fail_threads;
@@ -203,7 +204,6 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
        return 0;
 
 fail:
-       freeze_gh.gh_flags |= GL_NOCACHE;
        gfs2_glock_dq_uninit(&freeze_gh);
 fail_threads:
        if (sdp->sd_quotad_process)
@@ -430,7 +430,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
        }
 
        error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
-                                  GL_NOCACHE, &sdp->sd_freeze_gh);
+                                  LM_FLAG_NOEXP, &sdp->sd_freeze_gh);
        if (error)
                goto out;
 
@@ -613,13 +613,15 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
            !gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) {
                if (!log_write_allowed) {
                        error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
-                                                  LM_ST_SHARED, GL_NOCACHE |
-                                                  LM_FLAG_TRY, &freeze_gh);
+                                                  LM_ST_SHARED, LM_FLAG_TRY |
+                                                  LM_FLAG_NOEXP | GL_EXACT,
+                                                  &freeze_gh);
                        if (error == GLR_TRYFAILED)
                                error = 0;
                } else {
                        error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
-                                                  LM_ST_SHARED, GL_NOCACHE,
+                                                  LM_ST_SHARED,
+                                                  LM_FLAG_NOEXP | GL_EXACT,
                                                   &freeze_gh);
                        if (error && !gfs2_withdrawn(sdp))
                                return error;
@@ -761,8 +763,8 @@ void gfs2_freeze_func(struct work_struct *work)
        struct super_block *sb = sdp->sd_vfs;
 
        atomic_inc(&sb->s_active);
-       error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
-                                  &freeze_gh);
+       error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+                                  LM_FLAG_NOEXP | GL_EXACT, &freeze_gh);
        if (error) {
                fs_info(sdp, "GFS2: couldn't get freeze lock : %d\n", error);
                gfs2_assert_withdraw(sdp, 0);
@@ -774,8 +776,6 @@ void gfs2_freeze_func(struct work_struct *work)
                                error);
                        gfs2_assert_withdraw(sdp, 0);
                }
-               if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
-                       freeze_gh.gh_flags |= GL_NOCACHE;
                gfs2_glock_dq_uninit(&freeze_gh);
        }
        deactivate_super(sb);
index a78201b..74bc4a0 100644 (file)
@@ -890,6 +890,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
                                 struct io_uring_files_update *ip,
                                 unsigned nr_args);
 static int io_grab_files(struct io_kiocb *req);
+static void io_complete_rw_common(struct kiocb *kiocb, long res);
 static void io_cleanup_req(struct io_kiocb *req);
 static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
                       int fd, struct file **out_file, bool fixed);
@@ -1095,6 +1096,8 @@ static inline void io_prep_async_work(struct io_kiocb *req,
 {
        const struct io_op_def *def = &io_op_defs[req->opcode];
 
+       io_req_init_async(req);
+
        if (req->flags & REQ_F_ISREG) {
                if (def->hash_reg_file)
                        io_wq_hash_work(&req->work, file_inode(req->file));
@@ -1103,7 +1106,6 @@ static inline void io_prep_async_work(struct io_kiocb *req,
                        req->work.flags |= IO_WQ_WORK_UNBOUND;
        }
 
-       io_req_init_async(req);
        io_req_work_grab_env(req, def);
 
        *link = io_prep_linked_timeout(req);
@@ -1273,6 +1275,7 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
        if (cqe) {
                clear_bit(0, &ctx->sq_check_overflow);
                clear_bit(0, &ctx->cq_check_overflow);
+               ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
        }
        spin_unlock_irqrestore(&ctx->completion_lock, flags);
        io_cqring_ev_posted(ctx);
@@ -1310,6 +1313,7 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
                if (list_empty(&ctx->cq_overflow_list)) {
                        set_bit(0, &ctx->sq_check_overflow);
                        set_bit(0, &ctx->cq_check_overflow);
+                       ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
                }
                req->flags |= REQ_F_OVERFLOW;
                refcount_inc(&req->refs);
@@ -1749,6 +1753,14 @@ static void io_iopoll_queue(struct list_head *again)
        do {
                req = list_first_entry(again, struct io_kiocb, list);
                list_del(&req->list);
+
+               /* shouldn't happen unless io_uring is dying, cancel reqs */
+               if (unlikely(!current->mm)) {
+                       io_complete_rw_common(&req->rw.kiocb, -EAGAIN);
+                       io_put_req(req);
+                       continue;
+               }
+
                refcount_inc(&req->refs);
                io_queue_async_work(req);
        } while (!list_empty(again));
@@ -1994,10 +2006,8 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
 
        WRITE_ONCE(req->result, res);
        /* order with io_poll_complete() checking ->result */
-       if (res != -EAGAIN) {
-               smp_wmb();
-               WRITE_ONCE(req->iopoll_completed, 1);
-       }
+       smp_wmb();
+       WRITE_ONCE(req->iopoll_completed, 1);
 }
 
 /*
@@ -3544,6 +3554,7 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        if (req->flags & REQ_F_NEED_CLEANUP)
                return 0;
 
+       io->msg.msg.msg_name = &io->msg.addr;
        io->msg.iov = io->msg.fast_iov;
        ret = sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
                                        &io->msg.iov);
@@ -3725,6 +3736,7 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
 
 static int io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_ctx *io)
 {
+       io->msg.msg.msg_name = &io->msg.addr;
        io->msg.iov = io->msg.fast_iov;
 
 #ifdef CONFIG_COMPAT
@@ -3833,10 +3845,16 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock)
 
                ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.msg,
                                                kmsg->uaddr, flags);
-               if (force_nonblock && ret == -EAGAIN)
-                       return io_setup_async_msg(req, kmsg);
+               if (force_nonblock && ret == -EAGAIN) {
+                       ret = io_setup_async_msg(req, kmsg);
+                       if (ret != -EAGAIN)
+                               kfree(kbuf);
+                       return ret;
+               }
                if (ret == -ERESTARTSYS)
                        ret = -EINTR;
+               if (kbuf)
+                       kfree(kbuf);
        }
 
        if (kmsg && kmsg->iov != kmsg->fast_iov)
@@ -4065,6 +4083,29 @@ struct io_poll_table {
        int error;
 };
 
+static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
+{
+       struct task_struct *tsk = req->task;
+       struct io_ring_ctx *ctx = req->ctx;
+       int ret, notify = TWA_RESUME;
+
+       /*
+        * SQPOLL kernel thread doesn't need notification, just a wakeup.
+        * If we're not using an eventfd, then TWA_RESUME is always fine,
+        * as we won't have dependencies between request completions for
+        * other kernel wait conditions.
+        */
+       if (ctx->flags & IORING_SETUP_SQPOLL)
+               notify = 0;
+       else if (ctx->cq_ev_fd)
+               notify = TWA_SIGNAL;
+
+       ret = task_work_add(tsk, cb, notify);
+       if (!ret)
+               wake_up_process(tsk);
+       return ret;
+}
+
 static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
                           __poll_t mask, task_work_func_t func)
 {
@@ -4088,13 +4129,13 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
         * of executing it. We can't safely execute it anyway, as we may not
         * have the needed state needed for it anyway.
         */
-       ret = task_work_add(tsk, &req->task_work, true);
+       ret = io_req_task_work_add(req, &req->task_work);
        if (unlikely(ret)) {
                WRITE_ONCE(poll->canceled, true);
                tsk = io_wq_get_task(req->ctx->io_wq);
-               task_work_add(tsk, &req->task_work, true);
+               task_work_add(tsk, &req->task_work, 0);
+               wake_up_process(tsk);
        }
-       wake_up_process(tsk);
        return 1;
 }
 
@@ -5353,9 +5394,6 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
        if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
                const bool in_async = io_wq_current_is_worker();
 
-               if (req->result == -EAGAIN)
-                       return -EAGAIN;
-
                /* workqueue context doesn't hold uring_lock, grab it now */
                if (in_async)
                        mutex_lock(&ctx->uring_lock);
@@ -6011,7 +6049,7 @@ static int io_sq_thread(void *data)
                 * If submit got -EBUSY, flag us as needing the application
                 * to enter the kernel to reap and flush events.
                 */
-               if (!to_submit || ret == -EBUSY) {
+               if (!to_submit || ret == -EBUSY || need_resched()) {
                        /*
                         * Drop cur_mm before scheduling, we can't hold it for
                         * long periods (or over schedule()). Do this before
@@ -6027,7 +6065,7 @@ static int io_sq_thread(void *data)
                         * more IO, we should wait for the application to
                         * reap events and wake us up.
                         */
-                       if (!list_empty(&ctx->poll_list) ||
+                       if (!list_empty(&ctx->poll_list) || need_resched() ||
                            (!time_after(jiffies, timeout) && ret != -EBUSY &&
                            !percpu_ref_is_dying(&ctx->refs))) {
                                if (current->task_works)
@@ -6053,9 +6091,9 @@ static int io_sq_thread(void *data)
                        }
 
                        /* Tell userspace we may need a wakeup call */
+                       spin_lock_irq(&ctx->completion_lock);
                        ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
-                       /* make sure to read SQ tail after writing flags */
-                       smp_mb();
+                       spin_unlock_irq(&ctx->completion_lock);
 
                        to_submit = io_sqring_entries(ctx);
                        if (!to_submit || ret == -EBUSY) {
@@ -6073,13 +6111,17 @@ static int io_sq_thread(void *data)
                                schedule();
                                finish_wait(&ctx->sqo_wait, &wait);
 
+                               spin_lock_irq(&ctx->completion_lock);
                                ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
+                               spin_unlock_irq(&ctx->completion_lock);
                                ret = 0;
                                continue;
                        }
                        finish_wait(&ctx->sqo_wait, &wait);
 
+                       spin_lock_irq(&ctx->completion_lock);
                        ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
+                       spin_unlock_irq(&ctx->completion_lock);
                }
 
                mutex_lock(&ctx->uring_lock);
@@ -6178,15 +6220,23 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
        do {
                prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
                                                TASK_INTERRUPTIBLE);
+               /* make sure we run task_work before checking for signals */
                if (current->task_works)
                        task_work_run();
-               if (io_should_wake(&iowq, false))
-                       break;
-               schedule();
                if (signal_pending(current)) {
+                       if (current->jobctl & JOBCTL_TASK_WORK) {
+                               spin_lock_irq(&current->sighand->siglock);
+                               current->jobctl &= ~JOBCTL_TASK_WORK;
+                               recalc_sigpending();
+                               spin_unlock_irq(&current->sighand->siglock);
+                               continue;
+                       }
                        ret = -EINTR;
                        break;
                }
+               if (io_should_wake(&iowq, false))
+                       break;
+               schedule();
        } while (1);
        finish_wait(&ctx->wait, &iowq.wq);
 
@@ -6658,6 +6708,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
                for (i = 0; i < nr_tables; i++)
                        kfree(ctx->file_data->table[i].files);
 
+               percpu_ref_exit(&ctx->file_data->refs);
                kfree(ctx->file_data->table);
                kfree(ctx->file_data);
                ctx->file_data = NULL;
@@ -6810,8 +6861,10 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
                        }
                        table->files[index] = file;
                        err = io_sqe_file_register(ctx, file, i);
-                       if (err)
+                       if (err) {
+                               fput(file);
                                break;
+                       }
                }
                nr_args--;
                done++;
@@ -7307,9 +7360,6 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
        io_mem_free(ctx->sq_sqes);
 
        percpu_ref_exit(&ctx->refs);
-       if (ctx->account_mem)
-               io_unaccount_mem(ctx->user,
-                               ring_pages(ctx->sq_entries, ctx->cq_entries));
        free_uid(ctx->user);
        put_cred(ctx->creds);
        kfree(ctx->cancel_hash);
@@ -7394,6 +7444,16 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
        if (ctx->rings)
                io_cqring_overflow_flush(ctx, true);
        idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
+
+       /*
+        * Do this upfront, so we won't have a grace period where the ring
+        * is closed but resources aren't reaped yet. This can cause
+        * spurious failure in setting up a new ring.
+        */
+       if (ctx->account_mem)
+               io_unaccount_mem(ctx->user,
+                               ring_pages(ctx->sq_entries, ctx->cq_entries));
+
        INIT_WORK(&ctx->exit_work, io_ring_exit_work);
        queue_work(system_wq, &ctx->exit_work);
 }
@@ -7453,6 +7513,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
                        if (list_empty(&ctx->cq_overflow_list)) {
                                clear_bit(0, &ctx->sq_check_overflow);
                                clear_bit(0, &ctx->cq_check_overflow);
+                               ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
                        }
                        spin_unlock_irq(&ctx->completion_lock);
 
index f30ed40..4a0f600 100644 (file)
@@ -2603,6 +2603,7 @@ static int do_remount(struct path *path, int ms_flags, int sb_flags,
        if (IS_ERR(fc))
                return PTR_ERR(fc);
 
+       fc->oldapi = true;
        err = parse_monolithic_mount_data(fc, data);
        if (!err) {
                down_write(&sb->s_umount);
index 7d399f7..de03e44 100644 (file)
@@ -907,9 +907,8 @@ retry:
                goto out_mds;
 
        /* Use a direct mapping of ds_idx to pgio mirror_idx */
-       if (WARN_ON_ONCE(pgio->pg_mirror_count !=
-           FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
-               goto out_mds;
+       if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
+               goto out_eagain;
 
        for (i = 0; i < pgio->pg_mirror_count; i++) {
                mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
@@ -931,7 +930,10 @@ retry:
                        (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
                pgio->pg_maxretrans = io_maxretrans;
        return;
-
+out_eagain:
+       pnfs_generic_pg_cleanup(pgio);
+       pgio->pg_error = -EAGAIN;
+       return;
 out_mds:
        trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
                        0, NFS4_MAX_UINT64, IOMODE_RW,
@@ -941,6 +943,7 @@ out_mds:
        pgio->pg_lseg = NULL;
        pgio->pg_maxretrans = 0;
        nfs_pageio_reset_write_mds(pgio);
+       pgio->pg_error = -EAGAIN;
 }
 
 static unsigned int
index a3ab6e2..8733423 100644 (file)
@@ -308,6 +308,7 @@ static int try_location(struct fs_context *fc,
        if (IS_ERR(export_path))
                return PTR_ERR(export_path);
 
+       kfree(ctx->nfs_server.export_path);
        ctx->nfs_server.export_path = export_path;
 
        source = kmalloc(len + 1 + ctx->nfs_server.export_path_len + 1,
index e32717f..2e2dac2 100644 (file)
@@ -774,6 +774,14 @@ static void nfs4_slot_sequence_acked(struct nfs4_slot *slot,
        slot->seq_nr_last_acked = seqnr;
 }
 
+static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred,
+                               struct nfs4_slot *slot)
+{
+       struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true);
+       if (!IS_ERR(task))
+               rpc_put_task_async(task);
+}
+
 static int nfs41_sequence_process(struct rpc_task *task,
                struct nfs4_sequence_res *res)
 {
@@ -790,6 +798,7 @@ static int nfs41_sequence_process(struct rpc_task *task,
                goto out;
 
        session = slot->table->session;
+       clp = session->clp;
 
        trace_nfs4_sequence_done(session, res);
 
@@ -804,7 +813,6 @@ static int nfs41_sequence_process(struct rpc_task *task,
                nfs4_slot_sequence_acked(slot, slot->seq_nr);
                /* Update the slot's sequence and clientid lease timer */
                slot->seq_done = 1;
-               clp = session->clp;
                do_renew_lease(clp, res->sr_timestamp);
                /* Check sequence flags */
                nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags,
@@ -852,10 +860,18 @@ static int nfs41_sequence_process(struct rpc_task *task,
                /*
                 * Were one or more calls using this slot interrupted?
                 * If the server never received the request, then our
-                * transmitted slot sequence number may be too high.
+                * transmitted slot sequence number may be too high. However,
+                * if the server did receive the request then it might
+                * accidentally give us a reply with a mismatched operation.
+                * We can sort this out by sending a lone sequence operation
+                * to the server on the same slot.
                 */
                if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) {
                        slot->seq_nr--;
+                       if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) {
+                               nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot);
+                               res->sr_slot = NULL;
+                       }
                        goto retry_nowait;
                }
                /*
index bb3d2c3..cce2510 100644 (file)
@@ -7912,9 +7912,14 @@ nfs4_state_start_net(struct net *net)
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
        int ret;
 
-       ret = nfs4_state_create_net(net);
+       ret = get_nfsdfs(net);
        if (ret)
                return ret;
+       ret = nfs4_state_create_net(net);
+       if (ret) {
+               mntput(nn->nfsd_mnt);
+               return ret;
+       }
        locks_start_grace(net, &nn->nfsd4_manager);
        nfsd4_client_tracking_init(net);
        if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
@@ -7984,6 +7989,7 @@ nfs4_state_shutdown_net(struct net *net)
 
        nfsd4_client_tracking_exit(net);
        nfs4_state_destroy_net(net);
+       mntput(nn->nfsd_mnt);
 }
 
 void
index b68e966..cd05732 100644 (file)
@@ -1335,6 +1335,7 @@ void nfsd_client_rmdir(struct dentry *dentry)
        WARN_ON_ONCE(ret);
        fsnotify_rmdir(dir, dentry);
        d_delete(dentry);
+       dput(dentry);
        inode_unlock(dir);
 }
 
@@ -1424,6 +1425,18 @@ static struct file_system_type nfsd_fs_type = {
 };
 MODULE_ALIAS_FS("nfsd");
 
+int get_nfsdfs(struct net *net)
+{
+       struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+       struct vfsmount *mnt;
+
+       mnt =  vfs_kern_mount(&nfsd_fs_type, SB_KERNMOUNT, "nfsd", NULL);
+       if (IS_ERR(mnt))
+               return PTR_ERR(mnt);
+       nn->nfsd_mnt = mnt;
+       return 0;
+}
+
 #ifdef CONFIG_PROC_FS
 static int create_proc_exports_entry(void)
 {
@@ -1451,7 +1464,6 @@ unsigned int nfsd_net_id;
 static __net_init int nfsd_init_net(struct net *net)
 {
        int retval;
-       struct vfsmount *mnt;
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
        retval = nfsd_export_init(net);
@@ -1478,16 +1490,8 @@ static __net_init int nfsd_init_net(struct net *net)
        init_waitqueue_head(&nn->ntf_wq);
        seqlock_init(&nn->boot_lock);
 
-       mnt =  vfs_kern_mount(&nfsd_fs_type, SB_KERNMOUNT, "nfsd", NULL);
-       if (IS_ERR(mnt)) {
-               retval = PTR_ERR(mnt);
-               goto out_mount_err;
-       }
-       nn->nfsd_mnt = mnt;
        return 0;
 
-out_mount_err:
-       nfsd_reply_cache_shutdown(nn);
 out_drc_error:
        nfsd_idmap_shutdown(net);
 out_idmap_error:
@@ -1500,7 +1504,6 @@ static __net_exit void nfsd_exit_net(struct net *net)
 {
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
-       mntput(nn->nfsd_mnt);
        nfsd_reply_cache_shutdown(nn);
        nfsd_idmap_shutdown(net);
        nfsd_export_shutdown(net);
index 36cdd81..57c832d 100644 (file)
@@ -90,6 +90,8 @@ void          nfsd_destroy(struct net *net);
 
 bool           i_am_nfsd(void);
 
+int get_nfsdfs(struct net *);
+
 struct nfsdfs_client {
        struct kref cl_ref;
        void (*cl_release)(struct kref *kref);
@@ -100,6 +102,7 @@ struct dentry *nfsd_client_mkdir(struct nfsd_net *nn,
                struct nfsdfs_client *ncl, u32 id, const struct tree_descr *);
 void nfsd_client_rmdir(struct dentry *dentry);
 
+
 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
 #ifdef CONFIG_NFSD_V2_ACL
 extern const struct svc_version nfsd_acl_version2;
index c3fbab1..d22a056 100644 (file)
@@ -1226,6 +1226,9 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
                iap->ia_mode = 0;
        iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type;
 
+       if (!IS_POSIXACL(dirp))
+               iap->ia_mode &= ~current_umask();
+
        err = 0;
        host_err = 0;
        switch (type) {
@@ -1458,6 +1461,9 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
                goto out;
        }
 
+       if (!IS_POSIXACL(dirp))
+               iap->ia_mode &= ~current_umask();
+
        host_err = vfs_create(dirp, dchild, iap->ia_mode, true);
        if (host_err < 0) {
                fh_drop_write(fhp);
index 152a0fc..751bc4d 100644 (file)
@@ -689,6 +689,12 @@ static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
                                   &ocfs2_nfs_sync_lops, osb);
 }
 
+static void ocfs2_nfs_sync_lock_init(struct ocfs2_super *osb)
+{
+       ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
+       init_rwsem(&osb->nfs_sync_rwlock);
+}
+
 void ocfs2_trim_fs_lock_res_init(struct ocfs2_super *osb)
 {
        struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
@@ -2855,6 +2861,11 @@ int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex)
        if (ocfs2_is_hard_readonly(osb))
                return -EROFS;
 
+       if (ex)
+               down_write(&osb->nfs_sync_rwlock);
+       else
+               down_read(&osb->nfs_sync_rwlock);
+
        if (ocfs2_mount_local(osb))
                return 0;
 
@@ -2873,6 +2884,10 @@ void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex)
        if (!ocfs2_mount_local(osb))
                ocfs2_cluster_unlock(osb, lockres,
                                     ex ? LKM_EXMODE : LKM_PRMODE);
+       if (ex)
+               up_write(&osb->nfs_sync_rwlock);
+       else
+               up_read(&osb->nfs_sync_rwlock);
 }
 
 int ocfs2_trim_fs_lock(struct ocfs2_super *osb,
@@ -3340,7 +3355,7 @@ int ocfs2_dlm_init(struct ocfs2_super *osb)
 local:
        ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
        ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
-       ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
+       ocfs2_nfs_sync_lock_init(osb);
        ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);
 
        osb->cconn = conn;
index ee5d985..2dd71d6 100644 (file)
@@ -395,6 +395,7 @@ struct ocfs2_super
        struct ocfs2_lock_res osb_super_lockres;
        struct ocfs2_lock_res osb_rename_lockres;
        struct ocfs2_lock_res osb_nfs_sync_lockres;
+       struct rw_semaphore nfs_sync_rwlock;
        struct ocfs2_lock_res osb_trim_fs_lockres;
        struct mutex obs_trim_fs_mutex;
        struct ocfs2_dlm_debug *osb_dlm_debug;
index 0dd8c41..19137c6 100644 (file)
 #define OCFS2_MAX_SLOTS                        255
 
 /* Slot map indicator for an empty slot */
-#define OCFS2_INVALID_SLOT             -1
+#define OCFS2_INVALID_SLOT             ((u16)-1)
 
 #define OCFS2_VOL_UUID_LEN             16
 #define OCFS2_MAX_VOL_LABEL_LEN                64
@@ -326,8 +326,8 @@ struct ocfs2_system_inode_info {
 enum {
        BAD_BLOCK_SYSTEM_INODE = 0,
        GLOBAL_INODE_ALLOC_SYSTEM_INODE,
+#define OCFS2_FIRST_ONLINE_SYSTEM_INODE GLOBAL_INODE_ALLOC_SYSTEM_INODE
        SLOT_MAP_SYSTEM_INODE,
-#define OCFS2_FIRST_ONLINE_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE
        HEARTBEAT_SYSTEM_INODE,
        GLOBAL_BITMAP_SYSTEM_INODE,
        USER_QUOTA_SYSTEM_INODE,
index 4836bec..45745cc 100644 (file)
@@ -2825,9 +2825,12 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
                goto bail;
        }
 
-       inode_alloc_inode =
-               ocfs2_get_system_file_inode(osb, INODE_ALLOC_SYSTEM_INODE,
-                                           suballoc_slot);
+       if (suballoc_slot == (u16)OCFS2_INVALID_SLOT)
+               inode_alloc_inode = ocfs2_get_system_file_inode(osb,
+                       GLOBAL_INODE_ALLOC_SYSTEM_INODE, suballoc_slot);
+       else
+               inode_alloc_inode = ocfs2_get_system_file_inode(osb,
+                       INODE_ALLOC_SYSTEM_INODE, suballoc_slot);
        if (!inode_alloc_inode) {
                /* the error code could be inaccurate, but we are not able to
                 * get the correct one. */
index 79dd052..5e0cde8 100644 (file)
@@ -895,7 +895,7 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
        return err;
 }
 
-int ovl_copy_up_flags(struct dentry *dentry, int flags)
+static int ovl_copy_up_flags(struct dentry *dentry, int flags)
 {
        int err = 0;
        const struct cred *old_cred = ovl_override_creds(dentry->d_sb);
index 8f42864..0e696f7 100644 (file)
@@ -476,7 +476,7 @@ static struct dentry *ovl_lookup_real_inode(struct super_block *sb,
        if (IS_ERR_OR_NULL(this))
                return this;
 
-       if (WARN_ON(ovl_dentry_real_at(this, layer->idx) != real)) {
+       if (ovl_dentry_real_at(this, layer->idx) != real) {
                dput(this);
                this = ERR_PTR(-EIO);
        }
index 01820e6..0d940e2 100644 (file)
@@ -33,13 +33,16 @@ static char ovl_whatisit(struct inode *inode, struct inode *realinode)
                return 'm';
 }
 
+/* No atime modificaton nor notify on underlying */
+#define OVL_OPEN_FLAGS (O_NOATIME | FMODE_NONOTIFY)
+
 static struct file *ovl_open_realfile(const struct file *file,
                                      struct inode *realinode)
 {
        struct inode *inode = file_inode(file);
        struct file *realfile;
        const struct cred *old_cred;
-       int flags = file->f_flags | O_NOATIME | FMODE_NONOTIFY;
+       int flags = file->f_flags | OVL_OPEN_FLAGS;
        int acc_mode = ACC_MODE(flags);
        int err;
 
@@ -72,8 +75,7 @@ static int ovl_change_flags(struct file *file, unsigned int flags)
        struct inode *inode = file_inode(file);
        int err;
 
-       /* No atime modificaton on underlying */
-       flags |= O_NOATIME | FMODE_NONOTIFY;
+       flags |= OVL_OPEN_FLAGS;
 
        /* If some flag changed that cannot be changed then something's amiss */
        if (WARN_ON((file->f_flags ^ flags) & ~OVL_SETFL_MASK))
@@ -126,7 +128,7 @@ static int ovl_real_fdget_meta(const struct file *file, struct fd *real,
        }
 
        /* Did the flags change since open? */
-       if (unlikely((file->f_flags ^ real->file->f_flags) & ~O_NOATIME))
+       if (unlikely((file->f_flags ^ real->file->f_flags) & ~OVL_OPEN_FLAGS))
                return ovl_change_flags(real->file, file->f_flags);
 
        return 0;
index 3566282..f7d4358 100644 (file)
@@ -389,7 +389,7 @@ invalid:
 }
 
 static int ovl_check_origin(struct ovl_fs *ofs, struct dentry *upperdentry,
-                           struct ovl_path **stackp, unsigned int *ctrp)
+                           struct ovl_path **stackp)
 {
        struct ovl_fh *fh = ovl_get_fh(upperdentry, OVL_XATTR_ORIGIN);
        int err;
@@ -406,10 +406,6 @@ static int ovl_check_origin(struct ovl_fs *ofs, struct dentry *upperdentry,
                return err;
        }
 
-       if (WARN_ON(*ctrp))
-               return -EIO;
-
-       *ctrp = 1;
        return 0;
 }
 
@@ -861,8 +857,6 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
                        goto out;
                }
                if (upperdentry && !d.is_dir) {
-                       unsigned int origin_ctr = 0;
-
                        /*
                         * Lookup copy up origin by decoding origin file handle.
                         * We may get a disconnected dentry, which is fine,
@@ -873,8 +867,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
                         * number - it's the same as if we held a reference
                         * to a dentry in lower layer that was moved under us.
                         */
-                       err = ovl_check_origin(ofs, upperdentry, &origin_path,
-                                              &origin_ctr);
+                       err = ovl_check_origin(ofs, upperdentry, &origin_path);
                        if (err)
                                goto out_put_upper;
 
@@ -1073,6 +1066,10 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
                        upperredirect = NULL;
                        goto out_free_oe;
                }
+               err = ovl_check_metacopy_xattr(upperdentry);
+               if (err < 0)
+                       goto out_free_oe;
+               uppermetacopy = err;
        }
 
        if (upperdentry || ctr) {
index b725c7f..29bc1ec 100644 (file)
@@ -483,7 +483,6 @@ void ovl_aio_request_cache_destroy(void);
 /* copy_up.c */
 int ovl_copy_up(struct dentry *dentry);
 int ovl_copy_up_with_data(struct dentry *dentry);
-int ovl_copy_up_flags(struct dentry *dentry, int flags);
 int ovl_maybe_copy_up(struct dentry *dentry, int flags);
 int ovl_copy_xattr(struct dentry *old, struct dentry *new);
 int ovl_set_attr(struct dentry *upper, struct kstat *stat);
index 91476bc..4b38141 100644 (file)
@@ -580,12 +580,19 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
                }
        }
 
-       /* Workdir is useless in non-upper mount */
-       if (!config->upperdir && config->workdir) {
-               pr_info("option \"workdir=%s\" is useless in a non-upper mount, ignore\n",
-                       config->workdir);
-               kfree(config->workdir);
-               config->workdir = NULL;
+       /* Workdir/index are useless in non-upper mount */
+       if (!config->upperdir) {
+               if (config->workdir) {
+                       pr_info("option \"workdir=%s\" is useless in a non-upper mount, ignore\n",
+                               config->workdir);
+                       kfree(config->workdir);
+                       config->workdir = NULL;
+               }
+               if (config->index && index_opt) {
+                       pr_info("option \"index=on\" is useless in a non-upper mount, ignore\n");
+                       index_opt = false;
+               }
+               config->index = false;
        }
 
        err = ovl_parse_redirect_mode(config, config->redirect_mode);
@@ -622,11 +629,13 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
 
        /* Resolve nfs_export -> index dependency */
        if (config->nfs_export && !config->index) {
-               if (nfs_export_opt && index_opt) {
+               if (!config->upperdir && config->redirect_follow) {
+                       pr_info("NFS export requires \"redirect_dir=nofollow\" on non-upper mount, falling back to nfs_export=off.\n");
+                       config->nfs_export = false;
+               } else if (nfs_export_opt && index_opt) {
                        pr_err("conflicting options: nfs_export=on,index=off\n");
                        return -EINVAL;
-               }
-               if (index_opt) {
+               } else if (index_opt) {
                        /*
                         * There was an explicit index=off that resulted
                         * in this conflict.
@@ -1352,8 +1361,15 @@ static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs,
                goto out;
        }
 
+       /* index dir will act also as workdir */
+       iput(ofs->workdir_trap);
+       ofs->workdir_trap = NULL;
+       dput(ofs->workdir);
+       ofs->workdir = NULL;
        ofs->indexdir = ovl_workdir_create(ofs, OVL_INDEXDIR_NAME, true);
        if (ofs->indexdir) {
+               ofs->workdir = dget(ofs->indexdir);
+
                err = ovl_setup_trap(sb, ofs->indexdir, &ofs->indexdir_trap,
                                     "indexdir");
                if (err)
@@ -1396,6 +1412,18 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid)
        if (!ofs->config.nfs_export && !ovl_upper_mnt(ofs))
                return true;
 
+       /*
+        * We allow using single lower with null uuid for index and nfs_export
+        * for example to support those features with single lower squashfs.
+        * To avoid regressions in setups of overlay with re-formatted lower
+        * squashfs, do not allow decoding origin with lower null uuid unless
+        * user opted-in to one of the new features that require following the
+        * lower inode of non-dir upper.
+        */
+       if (!ofs->config.index && !ofs->config.metacopy && !ofs->config.xino &&
+           uuid_is_null(uuid))
+               return false;
+
        for (i = 0; i < ofs->numfs; i++) {
                /*
                 * We use uuid to associate an overlay lower file handle with a
@@ -1493,14 +1521,23 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
                if (err < 0)
                        goto out;
 
+               /*
+                * Check if lower root conflicts with this overlay layers before
+                * checking if it is in-use as upperdir/workdir of "another"
+                * mount, because we do not bother to check in ovl_is_inuse() if
+                * the upperdir/workdir is in fact in-use by our
+                * upperdir/workdir.
+                */
                err = ovl_setup_trap(sb, stack[i].dentry, &trap, "lowerdir");
                if (err)
                        goto out;
 
                if (ovl_is_inuse(stack[i].dentry)) {
                        err = ovl_report_in_use(ofs, "lowerdir");
-                       if (err)
+                       if (err) {
+                               iput(trap);
                                goto out;
+                       }
                }
 
                mnt = clone_private_mount(&stack[i]);
@@ -1575,10 +1612,6 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
        if (!ofs->config.upperdir && numlower == 1) {
                pr_err("at least 2 lowerdir are needed while upperdir nonexistent\n");
                return ERR_PTR(-EINVAL);
-       } else if (!ofs->config.upperdir && ofs->config.nfs_export &&
-                  ofs->config.redirect_follow) {
-               pr_warn("NFS export requires \"redirect_dir=nofollow\" on non-upper mount, falling back to nfs_export=off.\n");
-               ofs->config.nfs_export = false;
        }
 
        stack = kcalloc(numlower, sizeof(struct path), GFP_KERNEL);
@@ -1842,21 +1875,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        if (!ovl_upper_mnt(ofs))
                sb->s_flags |= SB_RDONLY;
 
-       if (!(ovl_force_readonly(ofs)) && ofs->config.index) {
-               /* index dir will act also as workdir */
-               dput(ofs->workdir);
-               ofs->workdir = NULL;
-               iput(ofs->workdir_trap);
-               ofs->workdir_trap = NULL;
-
+       if (!ovl_force_readonly(ofs) && ofs->config.index) {
                err = ovl_get_indexdir(sb, ofs, oe, &upperpath);
                if (err)
                        goto out_free_oe;
 
                /* Force r/o mount with no index dir */
-               if (ofs->indexdir)
-                       ofs->workdir = dget(ofs->indexdir);
-               else
+               if (!ofs->indexdir)
                        sb->s_flags |= SB_RDONLY;
        }
 
index 42c5128..6c1166c 100644 (file)
@@ -566,8 +566,9 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *ubuf,
                goto out;
 
        /* don't even try if the size is too large */
-       if (count > KMALLOC_MAX_SIZE)
-               return -ENOMEM;
+       error = -ENOMEM;
+       if (count >= KMALLOC_MAX_SIZE)
+               goto out;
 
        if (write) {
                kbuf = memdup_user_nul(ubuf, count);
@@ -576,7 +577,6 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *ubuf,
                        goto out;
                }
        } else {
-               error = -ENOMEM;
                kbuf = kzalloc(count, GFP_KERNEL);
                if (!kbuf)
                        goto out;
index bbfa9b1..4fb7978 100644 (file)
@@ -419,28 +419,42 @@ static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, lo
        return ret;
 }
 
-ssize_t __vfs_read(struct file *file, char __user *buf, size_t count,
-                  loff_t *pos)
+ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos)
 {
+       mm_segment_t old_fs = get_fs();
+       ssize_t ret;
+
+       if (WARN_ON_ONCE(!(file->f_mode & FMODE_READ)))
+               return -EINVAL;
+       if (!(file->f_mode & FMODE_CAN_READ))
+               return -EINVAL;
+
+       if (count > MAX_RW_COUNT)
+               count =  MAX_RW_COUNT;
+       set_fs(KERNEL_DS);
        if (file->f_op->read)
-               return file->f_op->read(file, buf, count, pos);
+               ret = file->f_op->read(file, (void __user *)buf, count, pos);
        else if (file->f_op->read_iter)
-               return new_sync_read(file, buf, count, pos);
+               ret = new_sync_read(file, (void __user *)buf, count, pos);
        else
-               return -EINVAL;
+               ret = -EINVAL;
+       set_fs(old_fs);
+       if (ret > 0) {
+               fsnotify_access(file);
+               add_rchar(current, ret);
+       }
+       inc_syscr(current);
+       return ret;
 }
 
 ssize_t kernel_read(struct file *file, void *buf, size_t count, loff_t *pos)
 {
-       mm_segment_t old_fs;
-       ssize_t result;
+       ssize_t ret;
 
-       old_fs = get_fs();
-       set_fs(KERNEL_DS);
-       /* The cast to a user pointer is valid due to the set_fs() */
-       result = vfs_read(file, (void __user *)buf, count, pos);
-       set_fs(old_fs);
-       return result;
+       ret = rw_verify_area(READ, file, pos, count);
+       if (ret)
+               return ret;
+       return __kernel_read(file, buf, count, pos);
 }
 EXPORT_SYMBOL(kernel_read);
 
@@ -456,17 +470,22 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
                return -EFAULT;
 
        ret = rw_verify_area(READ, file, pos, count);
-       if (!ret) {
-               if (count > MAX_RW_COUNT)
-                       count =  MAX_RW_COUNT;
-               ret = __vfs_read(file, buf, count, pos);
-               if (ret > 0) {
-                       fsnotify_access(file);
-                       add_rchar(current, ret);
-               }
-               inc_syscr(current);
-       }
+       if (ret)
+               return ret;
+       if (count > MAX_RW_COUNT)
+               count =  MAX_RW_COUNT;
 
+       if (file->f_op->read)
+               ret = file->f_op->read(file, buf, count, pos);
+       else if (file->f_op->read_iter)
+               ret = new_sync_read(file, buf, count, pos);
+       else
+               ret = -EINVAL;
+       if (ret > 0) {
+               fsnotify_access(file);
+               add_rchar(current, ret);
+       }
+       inc_syscr(current);
        return ret;
 }
 
@@ -488,23 +507,15 @@ static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t
        return ret;
 }
 
-static ssize_t __vfs_write(struct file *file, const char __user *p,
-                          size_t count, loff_t *pos)
-{
-       if (file->f_op->write)
-               return file->f_op->write(file, p, count, pos);
-       else if (file->f_op->write_iter)
-               return new_sync_write(file, p, count, pos);
-       else
-               return -EINVAL;
-}
-
+/* caller is responsible for file_start_write/file_end_write */
 ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t *pos)
 {
        mm_segment_t old_fs;
        const char __user *p;
        ssize_t ret;
 
+       if (WARN_ON_ONCE(!(file->f_mode & FMODE_WRITE)))
+               return -EBADF;
        if (!(file->f_mode & FMODE_CAN_WRITE))
                return -EINVAL;
 
@@ -513,7 +524,12 @@ ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t
        p = (__force const char __user *)buf;
        if (count > MAX_RW_COUNT)
                count =  MAX_RW_COUNT;
-       ret = __vfs_write(file, p, count, pos);
+       if (file->f_op->write)
+               ret = file->f_op->write(file, p, count, pos);
+       else if (file->f_op->write_iter)
+               ret = new_sync_write(file, p, count, pos);
+       else
+               ret = -EINVAL;
        set_fs(old_fs);
        if (ret > 0) {
                fsnotify_modify(file);
@@ -522,21 +538,20 @@ ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t
        inc_syscw(current);
        return ret;
 }
-EXPORT_SYMBOL(__kernel_write);
 
 ssize_t kernel_write(struct file *file, const void *buf, size_t count,
                            loff_t *pos)
 {
-       mm_segment_t old_fs;
-       ssize_t res;
+       ssize_t ret;
 
-       old_fs = get_fs();
-       set_fs(KERNEL_DS);
-       /* The cast to a user pointer is valid due to the set_fs() */
-       res = vfs_write(file, (__force const char __user *)buf, count, pos);
-       set_fs(old_fs);
+       ret = rw_verify_area(WRITE, file, pos, count);
+       if (ret)
+               return ret;
 
-       return res;
+       file_start_write(file);
+       ret =  __kernel_write(file, buf, count, pos);
+       file_end_write(file);
+       return ret;
 }
 EXPORT_SYMBOL(kernel_write);
 
@@ -552,19 +567,23 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
                return -EFAULT;
 
        ret = rw_verify_area(WRITE, file, pos, count);
-       if (!ret) {
-               if (count > MAX_RW_COUNT)
-                       count =  MAX_RW_COUNT;
-               file_start_write(file);
-               ret = __vfs_write(file, buf, count, pos);
-               if (ret > 0) {
-                       fsnotify_modify(file);
-                       add_wchar(current, ret);
-               }
-               inc_syscw(current);
-               file_end_write(file);
+       if (ret)
+               return ret;
+       if (count > MAX_RW_COUNT)
+               count =  MAX_RW_COUNT;
+       file_start_write(file);
+       if (file->f_op->write)
+               ret = file->f_op->write(file, buf, count, pos);
+       else if (file->f_op->write_iter)
+               ret = new_sync_write(file, buf, count, pos);
+       else
+               ret = -EINVAL;
+       if (ret > 0) {
+               fsnotify_modify(file);
+               add_wchar(current, ret);
        }
-
+       inc_syscw(current);
+       file_end_write(file);
        return ret;
 }
 
index b43f0e8..9ed9036 100644 (file)
@@ -671,7 +671,8 @@ xlog_cil_push_work(
        /*
         * Wake up any background push waiters now this context is being pushed.
         */
-       wake_up_all(&ctx->push_wait);
+       if (ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log))
+               wake_up_all(&cil->xc_push_wait);
 
        /*
         * Check if we've anything to push. If there is nothing, then we don't
@@ -743,13 +744,12 @@ xlog_cil_push_work(
 
        /*
         * initialise the new context and attach it to the CIL. Then attach
-        * the current context to the CIL committing lsit so it can be found
+        * the current context to the CIL committing list so it can be found
         * during log forces to extract the commit lsn of the sequence that
         * needs to be forced.
         */
        INIT_LIST_HEAD(&new_ctx->committing);
        INIT_LIST_HEAD(&new_ctx->busy_extents);
-       init_waitqueue_head(&new_ctx->push_wait);
        new_ctx->sequence = ctx->sequence + 1;
        new_ctx->cil = cil;
        cil->xc_ctx = new_ctx;
@@ -937,7 +937,7 @@ xlog_cil_push_background(
        if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log)) {
                trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
                ASSERT(cil->xc_ctx->space_used < log->l_logsize);
-               xlog_wait(&cil->xc_ctx->push_wait, &cil->xc_push_lock);
+               xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
                return;
        }
 
@@ -1216,12 +1216,12 @@ xlog_cil_init(
        INIT_LIST_HEAD(&cil->xc_committing);
        spin_lock_init(&cil->xc_cil_lock);
        spin_lock_init(&cil->xc_push_lock);
+       init_waitqueue_head(&cil->xc_push_wait);
        init_rwsem(&cil->xc_ctx_lock);
        init_waitqueue_head(&cil->xc_commit_wait);
 
        INIT_LIST_HEAD(&ctx->committing);
        INIT_LIST_HEAD(&ctx->busy_extents);
-       init_waitqueue_head(&ctx->push_wait);
        ctx->sequence = 1;
        ctx->cil = cil;
        cil->xc_ctx = ctx;
index ec22c7a..75a6287 100644 (file)
@@ -240,7 +240,6 @@ struct xfs_cil_ctx {
        struct xfs_log_vec      *lv_chain;      /* logvecs being pushed */
        struct list_head        iclog_entry;
        struct list_head        committing;     /* ctx committing list */
-       wait_queue_head_t       push_wait;      /* background push throttle */
        struct work_struct      discard_endio_work;
 };
 
@@ -274,6 +273,7 @@ struct xfs_cil {
        wait_queue_head_t       xc_commit_wait;
        xfs_lsn_t               xc_current_sequence;
        struct work_struct      xc_push_work;
+       wait_queue_head_t       xc_push_wait;   /* background push throttle */
 } ____cacheline_aligned_in_smp;
 
 /*
index 907fa5d..4a674db 100644 (file)
@@ -2,6 +2,11 @@
 #ifndef _ASM_GENERIC_CACHEFLUSH_H
 #define _ASM_GENERIC_CACHEFLUSH_H
 
+struct mm_struct;
+struct vm_area_struct;
+struct page;
+struct address_space;
+
 /*
  * The cache doesn't need to be flushed when TLB entries change when
  * the cache is mapped to physical memory, not virtual memory
index 9439ff0..5698fca 100644 (file)
@@ -27,7 +27,7 @@
 #include <asm/smp.h>
 
 DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state);
-#define __mmiowb_state()       this_cpu_ptr(&__mmiowb_state)
+#define __mmiowb_state()       raw_cpu_ptr(&__mmiowb_state)
 #else
 #define __mmiowb_state()       arch_mmiowb_state()
 #endif /* arch_mmiowb_state */
@@ -35,7 +35,9 @@ DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state);
 static inline void mmiowb_set_pending(void)
 {
        struct mmiowb_state *ms = __mmiowb_state();
-       ms->mmiowb_pending = ms->nesting_count;
+
+       if (likely(ms->nesting_count))
+               ms->mmiowb_pending = ms->nesting_count;
 }
 
 static inline void mmiowb_spin_lock(void)
index 56527c8..088c1de 100644 (file)
@@ -29,8 +29,8 @@ struct alg_sock {
 
        struct sock *parent;
 
-       unsigned int refcnt;
-       unsigned int nokey_refcnt;
+       atomic_t refcnt;
+       atomic_t nokey_refcnt;
 
        const struct af_alg_type *type;
        void *private;
index 421a30f..4efec30 100644 (file)
@@ -968,6 +968,48 @@ struct drm_connector_helper_funcs {
         */
        enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
                                           struct drm_display_mode *mode);
+
+       /**
+        * @mode_valid_ctx:
+        *
+        * Callback to validate a mode for a connector, irrespective of the
+        * specific display configuration.
+        *
+        * This callback is used by the probe helpers to filter the mode list
+        * (which is usually derived from the EDID data block from the sink).
+        * See e.g. drm_helper_probe_single_connector_modes().
+        *
+        * This function is optional, and is the atomic version of
+        * &drm_connector_helper_funcs.mode_valid.
+        *
+        * To allow for accessing the atomic state of modesetting objects, the
+        * helper libraries always call this with ctx set to a valid context,
+        * and &drm_mode_config.connection_mutex will always be locked with
+        * the ctx parameter set to @ctx. This allows for taking additional
+        * locks as required.
+        *
+        * Even though additional locks may be acquired, this callback is
+        * still expected not to take any constraints into account which would
+        * be influenced by the currently set display state - such constraints
+        * should be handled in the driver's atomic check. For example, if a
+        * connector shares display bandwidth with other connectors then it
+        * would be ok to validate the minimum bandwidth requirement of a mode
+        * against the maximum possible bandwidth of the connector. But it
+        * wouldn't be ok to take the current bandwidth usage of other
+        * connectors into account, as this would change depending on the
+        * display state.
+        *
+        * Returns:
+        * 0 if &drm_connector_helper_funcs.mode_valid_ctx succeeded and wrote
+        * the &enum drm_mode_status value to @status, or a negative error
+        * code otherwise.
+        *
+        */
+       int (*mode_valid_ctx)(struct drm_connector *connector,
+                             struct drm_display_mode *mode,
+                             struct drm_modeset_acquire_ctx *ctx,
+                             enum drm_mode_status *status);
+
        /**
         * @best_encoder:
         *
index bc989de..96e408b 100644 (file)
        INTEL_VGA_DEVICE(0x4551, info), \
        INTEL_VGA_DEVICE(0x4541, info), \
        INTEL_VGA_DEVICE(0x4E71, info), \
+       INTEL_VGA_DEVICE(0x4557, info), \
+       INTEL_VGA_DEVICE(0x4555, info), \
        INTEL_VGA_DEVICE(0x4E61, info), \
+       INTEL_VGA_DEVICE(0x4E57, info), \
+       INTEL_VGA_DEVICE(0x4E55, info), \
        INTEL_VGA_DEVICE(0x4E51, info)
 
 /* TGL */
        INTEL_VGA_DEVICE(0x4C90, info), \
        INTEL_VGA_DEVICE(0x4C9A, info)
 
+/* DG1 */
+#define INTEL_DG1_IDS(info) \
+       INTEL_VGA_DEVICE(0x4905, info)
+
 #endif /* _I915_PCIIDS_H */
index b1c705a..a9e13b2 100644 (file)
@@ -692,7 +692,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
 
 int ttm_bo_swapout(struct ttm_bo_global *glob,
                        struct ttm_operation_ctx *ctx);
-void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
+void ttm_bo_swapout_all(void);
 
 /**
  * ttm_bo_uses_embedded_gem_object - check if the given bo uses the
index 71b195e..5a37f1c 100644 (file)
@@ -526,17 +526,6 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
  */
 
 /**
- * ttm_mem_reg_is_pci
- *
- * @bdev: Pointer to a struct ttm_bo_device.
- * @mem: A valid struct ttm_mem_reg.
- *
- * Returns true if the memory described by @mem is PCI memory,
- * false otherwise.
- */
-bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
-
-/**
  * ttm_bo_mem_space
  *
  * @bo: Pointer to a struct ttm_buffer_object. the data of which
index c0e928a..5e2393f 100644 (file)
@@ -70,7 +70,7 @@ struct ttm_backend_func {
         * Unbind previously bound backend pages. This function should be
         * able to handle differences between aperture and system page sizes.
         */
-       int (*unbind) (struct ttm_tt *ttm);
+       void (*unbind) (struct ttm_tt *ttm);
 
        /**
         * struct ttm_backend_func member destroy
index 2c4927b..fd525c7 100644 (file)
@@ -77,6 +77,9 @@
 
 #endif /* cmpxchg64_relaxed */
 
+#define arch_atomic_read atomic_read
+#define arch_atomic_read_acquire atomic_read_acquire
+
 #ifndef atomic_read_acquire
 static __always_inline int
 atomic_read_acquire(const atomic_t *v)
@@ -86,6 +89,9 @@ atomic_read_acquire(const atomic_t *v)
 #define atomic_read_acquire atomic_read_acquire
 #endif
 
+#define arch_atomic_set atomic_set
+#define arch_atomic_set_release atomic_set_release
+
 #ifndef atomic_set_release
 static __always_inline void
 atomic_set_release(atomic_t *v, int i)
@@ -95,6 +101,13 @@ atomic_set_release(atomic_t *v, int i)
 #define atomic_set_release atomic_set_release
 #endif
 
+#define arch_atomic_add atomic_add
+
+#define arch_atomic_add_return atomic_add_return
+#define arch_atomic_add_return_acquire atomic_add_return_acquire
+#define arch_atomic_add_return_release atomic_add_return_release
+#define arch_atomic_add_return_relaxed atomic_add_return_relaxed
+
 #ifndef atomic_add_return_relaxed
 #define atomic_add_return_acquire atomic_add_return
 #define atomic_add_return_release atomic_add_return
@@ -137,6 +150,11 @@ atomic_add_return(int i, atomic_t *v)
 
 #endif /* atomic_add_return_relaxed */
 
+#define arch_atomic_fetch_add atomic_fetch_add
+#define arch_atomic_fetch_add_acquire atomic_fetch_add_acquire
+#define arch_atomic_fetch_add_release atomic_fetch_add_release
+#define arch_atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+
 #ifndef atomic_fetch_add_relaxed
 #define atomic_fetch_add_acquire atomic_fetch_add
 #define atomic_fetch_add_release atomic_fetch_add
@@ -179,6 +197,13 @@ atomic_fetch_add(int i, atomic_t *v)
 
 #endif /* atomic_fetch_add_relaxed */
 
+#define arch_atomic_sub atomic_sub
+
+#define arch_atomic_sub_return atomic_sub_return
+#define arch_atomic_sub_return_acquire atomic_sub_return_acquire
+#define arch_atomic_sub_return_release atomic_sub_return_release
+#define arch_atomic_sub_return_relaxed atomic_sub_return_relaxed
+
 #ifndef atomic_sub_return_relaxed
 #define atomic_sub_return_acquire atomic_sub_return
 #define atomic_sub_return_release atomic_sub_return
@@ -221,6 +246,11 @@ atomic_sub_return(int i, atomic_t *v)
 
 #endif /* atomic_sub_return_relaxed */
 
+#define arch_atomic_fetch_sub atomic_fetch_sub
+#define arch_atomic_fetch_sub_acquire atomic_fetch_sub_acquire
+#define arch_atomic_fetch_sub_release atomic_fetch_sub_release
+#define arch_atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+
 #ifndef atomic_fetch_sub_relaxed
 #define atomic_fetch_sub_acquire atomic_fetch_sub
 #define atomic_fetch_sub_release atomic_fetch_sub
@@ -263,6 +293,8 @@ atomic_fetch_sub(int i, atomic_t *v)
 
 #endif /* atomic_fetch_sub_relaxed */
 
+#define arch_atomic_inc atomic_inc
+
 #ifndef atomic_inc
 static __always_inline void
 atomic_inc(atomic_t *v)
@@ -272,6 +304,11 @@ atomic_inc(atomic_t *v)
 #define atomic_inc atomic_inc
 #endif
 
+#define arch_atomic_inc_return atomic_inc_return
+#define arch_atomic_inc_return_acquire atomic_inc_return_acquire
+#define arch_atomic_inc_return_release atomic_inc_return_release
+#define arch_atomic_inc_return_relaxed atomic_inc_return_relaxed
+
 #ifndef atomic_inc_return_relaxed
 #ifdef atomic_inc_return
 #define atomic_inc_return_acquire atomic_inc_return
@@ -353,6 +390,11 @@ atomic_inc_return(atomic_t *v)
 
 #endif /* atomic_inc_return_relaxed */
 
+#define arch_atomic_fetch_inc atomic_fetch_inc
+#define arch_atomic_fetch_inc_acquire atomic_fetch_inc_acquire
+#define arch_atomic_fetch_inc_release atomic_fetch_inc_release
+#define arch_atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
+
 #ifndef atomic_fetch_inc_relaxed
 #ifdef atomic_fetch_inc
 #define atomic_fetch_inc_acquire atomic_fetch_inc
@@ -434,6 +476,8 @@ atomic_fetch_inc(atomic_t *v)
 
 #endif /* atomic_fetch_inc_relaxed */
 
+#define arch_atomic_dec atomic_dec
+
 #ifndef atomic_dec
 static __always_inline void
 atomic_dec(atomic_t *v)
@@ -443,6 +487,11 @@ atomic_dec(atomic_t *v)
 #define atomic_dec atomic_dec
 #endif
 
+#define arch_atomic_dec_return atomic_dec_return
+#define arch_atomic_dec_return_acquire atomic_dec_return_acquire
+#define arch_atomic_dec_return_release atomic_dec_return_release
+#define arch_atomic_dec_return_relaxed atomic_dec_return_relaxed
+
 #ifndef atomic_dec_return_relaxed
 #ifdef atomic_dec_return
 #define atomic_dec_return_acquire atomic_dec_return
@@ -524,6 +573,11 @@ atomic_dec_return(atomic_t *v)
 
 #endif /* atomic_dec_return_relaxed */
 
+#define arch_atomic_fetch_dec atomic_fetch_dec
+#define arch_atomic_fetch_dec_acquire atomic_fetch_dec_acquire
+#define arch_atomic_fetch_dec_release atomic_fetch_dec_release
+#define arch_atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
+
 #ifndef atomic_fetch_dec_relaxed
 #ifdef atomic_fetch_dec
 #define atomic_fetch_dec_acquire atomic_fetch_dec
@@ -605,6 +659,13 @@ atomic_fetch_dec(atomic_t *v)
 
 #endif /* atomic_fetch_dec_relaxed */
 
+#define arch_atomic_and atomic_and
+
+#define arch_atomic_fetch_and atomic_fetch_and
+#define arch_atomic_fetch_and_acquire atomic_fetch_and_acquire
+#define arch_atomic_fetch_and_release atomic_fetch_and_release
+#define arch_atomic_fetch_and_relaxed atomic_fetch_and_relaxed
+
 #ifndef atomic_fetch_and_relaxed
 #define atomic_fetch_and_acquire atomic_fetch_and
 #define atomic_fetch_and_release atomic_fetch_and
@@ -647,6 +708,8 @@ atomic_fetch_and(int i, atomic_t *v)
 
 #endif /* atomic_fetch_and_relaxed */
 
+#define arch_atomic_andnot atomic_andnot
+
 #ifndef atomic_andnot
 static __always_inline void
 atomic_andnot(int i, atomic_t *v)
@@ -656,6 +719,11 @@ atomic_andnot(int i, atomic_t *v)
 #define atomic_andnot atomic_andnot
 #endif
 
+#define arch_atomic_fetch_andnot atomic_fetch_andnot
+#define arch_atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
+#define arch_atomic_fetch_andnot_release atomic_fetch_andnot_release
+#define arch_atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
+
 #ifndef atomic_fetch_andnot_relaxed
 #ifdef atomic_fetch_andnot
 #define atomic_fetch_andnot_acquire atomic_fetch_andnot
@@ -737,6 +805,13 @@ atomic_fetch_andnot(int i, atomic_t *v)
 
 #endif /* atomic_fetch_andnot_relaxed */
 
+#define arch_atomic_or atomic_or
+
+#define arch_atomic_fetch_or atomic_fetch_or
+#define arch_atomic_fetch_or_acquire atomic_fetch_or_acquire
+#define arch_atomic_fetch_or_release atomic_fetch_or_release
+#define arch_atomic_fetch_or_relaxed atomic_fetch_or_relaxed
+
 #ifndef atomic_fetch_or_relaxed
 #define atomic_fetch_or_acquire atomic_fetch_or
 #define atomic_fetch_or_release atomic_fetch_or
@@ -779,6 +854,13 @@ atomic_fetch_or(int i, atomic_t *v)
 
 #endif /* atomic_fetch_or_relaxed */
 
+#define arch_atomic_xor atomic_xor
+
+#define arch_atomic_fetch_xor atomic_fetch_xor
+#define arch_atomic_fetch_xor_acquire atomic_fetch_xor_acquire
+#define arch_atomic_fetch_xor_release atomic_fetch_xor_release
+#define arch_atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+
 #ifndef atomic_fetch_xor_relaxed
 #define atomic_fetch_xor_acquire atomic_fetch_xor
 #define atomic_fetch_xor_release atomic_fetch_xor
@@ -821,6 +903,11 @@ atomic_fetch_xor(int i, atomic_t *v)
 
 #endif /* atomic_fetch_xor_relaxed */
 
+#define arch_atomic_xchg atomic_xchg
+#define arch_atomic_xchg_acquire atomic_xchg_acquire
+#define arch_atomic_xchg_release atomic_xchg_release
+#define arch_atomic_xchg_relaxed atomic_xchg_relaxed
+
 #ifndef atomic_xchg_relaxed
 #define atomic_xchg_acquire atomic_xchg
 #define atomic_xchg_release atomic_xchg
@@ -863,6 +950,11 @@ atomic_xchg(atomic_t *v, int i)
 
 #endif /* atomic_xchg_relaxed */
 
+#define arch_atomic_cmpxchg atomic_cmpxchg
+#define arch_atomic_cmpxchg_acquire atomic_cmpxchg_acquire
+#define arch_atomic_cmpxchg_release atomic_cmpxchg_release
+#define arch_atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
+
 #ifndef atomic_cmpxchg_relaxed
 #define atomic_cmpxchg_acquire atomic_cmpxchg
 #define atomic_cmpxchg_release atomic_cmpxchg
@@ -905,6 +997,11 @@ atomic_cmpxchg(atomic_t *v, int old, int new)
 
 #endif /* atomic_cmpxchg_relaxed */
 
+#define arch_atomic_try_cmpxchg atomic_try_cmpxchg
+#define arch_atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
+#define arch_atomic_try_cmpxchg_release atomic_try_cmpxchg_release
+#define arch_atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
+
 #ifndef atomic_try_cmpxchg_relaxed
 #ifdef atomic_try_cmpxchg
 #define atomic_try_cmpxchg_acquire atomic_try_cmpxchg
@@ -1002,6 +1099,8 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new)
 
 #endif /* atomic_try_cmpxchg_relaxed */
 
+#define arch_atomic_sub_and_test atomic_sub_and_test
+
 #ifndef atomic_sub_and_test
 /**
  * atomic_sub_and_test - subtract value from variable and test result
@@ -1020,6 +1119,8 @@ atomic_sub_and_test(int i, atomic_t *v)
 #define atomic_sub_and_test atomic_sub_and_test
 #endif
 
+#define arch_atomic_dec_and_test atomic_dec_and_test
+
 #ifndef atomic_dec_and_test
 /**
  * atomic_dec_and_test - decrement and test
@@ -1037,6 +1138,8 @@ atomic_dec_and_test(atomic_t *v)
 #define atomic_dec_and_test atomic_dec_and_test
 #endif
 
+#define arch_atomic_inc_and_test atomic_inc_and_test
+
 #ifndef atomic_inc_and_test
 /**
  * atomic_inc_and_test - increment and test
@@ -1054,6 +1157,8 @@ atomic_inc_and_test(atomic_t *v)
 #define atomic_inc_and_test atomic_inc_and_test
 #endif
 
+#define arch_atomic_add_negative atomic_add_negative
+
 #ifndef atomic_add_negative
 /**
  * atomic_add_negative - add and test if negative
@@ -1072,6 +1177,8 @@ atomic_add_negative(int i, atomic_t *v)
 #define atomic_add_negative atomic_add_negative
 #endif
 
+#define arch_atomic_fetch_add_unless atomic_fetch_add_unless
+
 #ifndef atomic_fetch_add_unless
 /**
  * atomic_fetch_add_unless - add unless the number is already a given value
@@ -1097,6 +1204,8 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u)
 #define atomic_fetch_add_unless atomic_fetch_add_unless
 #endif
 
+#define arch_atomic_add_unless atomic_add_unless
+
 #ifndef atomic_add_unless
 /**
  * atomic_add_unless - add unless the number is already a given value
@@ -1115,6 +1224,8 @@ atomic_add_unless(atomic_t *v, int a, int u)
 #define atomic_add_unless atomic_add_unless
 #endif
 
+#define arch_atomic_inc_not_zero atomic_inc_not_zero
+
 #ifndef atomic_inc_not_zero
 /**
  * atomic_inc_not_zero - increment unless the number is zero
@@ -1131,6 +1242,8 @@ atomic_inc_not_zero(atomic_t *v)
 #define atomic_inc_not_zero atomic_inc_not_zero
 #endif
 
+#define arch_atomic_inc_unless_negative atomic_inc_unless_negative
+
 #ifndef atomic_inc_unless_negative
 static __always_inline bool
 atomic_inc_unless_negative(atomic_t *v)
@@ -1147,6 +1260,8 @@ atomic_inc_unless_negative(atomic_t *v)
 #define atomic_inc_unless_negative atomic_inc_unless_negative
 #endif
 
+#define arch_atomic_dec_unless_positive atomic_dec_unless_positive
+
 #ifndef atomic_dec_unless_positive
 static __always_inline bool
 atomic_dec_unless_positive(atomic_t *v)
@@ -1163,6 +1278,8 @@ atomic_dec_unless_positive(atomic_t *v)
 #define atomic_dec_unless_positive atomic_dec_unless_positive
 #endif
 
+#define arch_atomic_dec_if_positive atomic_dec_if_positive
+
 #ifndef atomic_dec_if_positive
 static __always_inline int
 atomic_dec_if_positive(atomic_t *v)
@@ -1184,6 +1301,9 @@ atomic_dec_if_positive(atomic_t *v)
 #include <asm-generic/atomic64.h>
 #endif
 
+#define arch_atomic64_read atomic64_read
+#define arch_atomic64_read_acquire atomic64_read_acquire
+
 #ifndef atomic64_read_acquire
 static __always_inline s64
 atomic64_read_acquire(const atomic64_t *v)
@@ -1193,6 +1313,9 @@ atomic64_read_acquire(const atomic64_t *v)
 #define atomic64_read_acquire atomic64_read_acquire
 #endif
 
+#define arch_atomic64_set atomic64_set
+#define arch_atomic64_set_release atomic64_set_release
+
 #ifndef atomic64_set_release
 static __always_inline void
 atomic64_set_release(atomic64_t *v, s64 i)
@@ -1202,6 +1325,13 @@ atomic64_set_release(atomic64_t *v, s64 i)
 #define atomic64_set_release atomic64_set_release
 #endif
 
+#define arch_atomic64_add atomic64_add
+
+#define arch_atomic64_add_return atomic64_add_return
+#define arch_atomic64_add_return_acquire atomic64_add_return_acquire
+#define arch_atomic64_add_return_release atomic64_add_return_release
+#define arch_atomic64_add_return_relaxed atomic64_add_return_relaxed
+
 #ifndef atomic64_add_return_relaxed
 #define atomic64_add_return_acquire atomic64_add_return
 #define atomic64_add_return_release atomic64_add_return
@@ -1244,6 +1374,11 @@ atomic64_add_return(s64 i, atomic64_t *v)
 
 #endif /* atomic64_add_return_relaxed */
 
+#define arch_atomic64_fetch_add atomic64_fetch_add
+#define arch_atomic64_fetch_add_acquire atomic64_fetch_add_acquire
+#define arch_atomic64_fetch_add_release atomic64_fetch_add_release
+#define arch_atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+
 #ifndef atomic64_fetch_add_relaxed
 #define atomic64_fetch_add_acquire atomic64_fetch_add
 #define atomic64_fetch_add_release atomic64_fetch_add
@@ -1286,6 +1421,13 @@ atomic64_fetch_add(s64 i, atomic64_t *v)
 
 #endif /* atomic64_fetch_add_relaxed */
 
+#define arch_atomic64_sub atomic64_sub
+
+#define arch_atomic64_sub_return atomic64_sub_return
+#define arch_atomic64_sub_return_acquire atomic64_sub_return_acquire
+#define arch_atomic64_sub_return_release atomic64_sub_return_release
+#define arch_atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+
 #ifndef atomic64_sub_return_relaxed
 #define atomic64_sub_return_acquire atomic64_sub_return
 #define atomic64_sub_return_release atomic64_sub_return
@@ -1328,6 +1470,11 @@ atomic64_sub_return(s64 i, atomic64_t *v)
 
 #endif /* atomic64_sub_return_relaxed */
 
+#define arch_atomic64_fetch_sub atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
+#define arch_atomic64_fetch_sub_release atomic64_fetch_sub_release
+#define arch_atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+
 #ifndef atomic64_fetch_sub_relaxed
 #define atomic64_fetch_sub_acquire atomic64_fetch_sub
 #define atomic64_fetch_sub_release atomic64_fetch_sub
@@ -1370,6 +1517,8 @@ atomic64_fetch_sub(s64 i, atomic64_t *v)
 
 #endif /* atomic64_fetch_sub_relaxed */
 
+#define arch_atomic64_inc atomic64_inc
+
 #ifndef atomic64_inc
 static __always_inline void
 atomic64_inc(atomic64_t *v)
@@ -1379,6 +1528,11 @@ atomic64_inc(atomic64_t *v)
 #define atomic64_inc atomic64_inc
 #endif
 
+#define arch_atomic64_inc_return atomic64_inc_return
+#define arch_atomic64_inc_return_acquire atomic64_inc_return_acquire
+#define arch_atomic64_inc_return_release atomic64_inc_return_release
+#define arch_atomic64_inc_return_relaxed atomic64_inc_return_relaxed
+
 #ifndef atomic64_inc_return_relaxed
 #ifdef atomic64_inc_return
 #define atomic64_inc_return_acquire atomic64_inc_return
@@ -1460,6 +1614,11 @@ atomic64_inc_return(atomic64_t *v)
 
 #endif /* atomic64_inc_return_relaxed */
 
+#define arch_atomic64_fetch_inc atomic64_fetch_inc
+#define arch_atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
+#define arch_atomic64_fetch_inc_release atomic64_fetch_inc_release
+#define arch_atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
+
 #ifndef atomic64_fetch_inc_relaxed
 #ifdef atomic64_fetch_inc
 #define atomic64_fetch_inc_acquire atomic64_fetch_inc
@@ -1541,6 +1700,8 @@ atomic64_fetch_inc(atomic64_t *v)
 
 #endif /* atomic64_fetch_inc_relaxed */
 
+#define arch_atomic64_dec atomic64_dec
+
 #ifndef atomic64_dec
 static __always_inline void
 atomic64_dec(atomic64_t *v)
@@ -1550,6 +1711,11 @@ atomic64_dec(atomic64_t *v)
 #define atomic64_dec atomic64_dec
 #endif
 
+#define arch_atomic64_dec_return atomic64_dec_return
+#define arch_atomic64_dec_return_acquire atomic64_dec_return_acquire
+#define arch_atomic64_dec_return_release atomic64_dec_return_release
+#define arch_atomic64_dec_return_relaxed atomic64_dec_return_relaxed
+
 #ifndef atomic64_dec_return_relaxed
 #ifdef atomic64_dec_return
 #define atomic64_dec_return_acquire atomic64_dec_return
@@ -1631,6 +1797,11 @@ atomic64_dec_return(atomic64_t *v)
 
 #endif /* atomic64_dec_return_relaxed */
 
+#define arch_atomic64_fetch_dec atomic64_fetch_dec
+#define arch_atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
+#define arch_atomic64_fetch_dec_release atomic64_fetch_dec_release
+#define arch_atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
+
 #ifndef atomic64_fetch_dec_relaxed
 #ifdef atomic64_fetch_dec
 #define atomic64_fetch_dec_acquire atomic64_fetch_dec
@@ -1712,6 +1883,13 @@ atomic64_fetch_dec(atomic64_t *v)
 
 #endif /* atomic64_fetch_dec_relaxed */
 
+#define arch_atomic64_and atomic64_and
+
+#define arch_atomic64_fetch_and atomic64_fetch_and
+#define arch_atomic64_fetch_and_acquire atomic64_fetch_and_acquire
+#define arch_atomic64_fetch_and_release atomic64_fetch_and_release
+#define arch_atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+
 #ifndef atomic64_fetch_and_relaxed
 #define atomic64_fetch_and_acquire atomic64_fetch_and
 #define atomic64_fetch_and_release atomic64_fetch_and
@@ -1754,6 +1932,8 @@ atomic64_fetch_and(s64 i, atomic64_t *v)
 
 #endif /* atomic64_fetch_and_relaxed */
 
+#define arch_atomic64_andnot atomic64_andnot
+
 #ifndef atomic64_andnot
 static __always_inline void
 atomic64_andnot(s64 i, atomic64_t *v)
@@ -1763,6 +1943,11 @@ atomic64_andnot(s64 i, atomic64_t *v)
 #define atomic64_andnot atomic64_andnot
 #endif
 
+#define arch_atomic64_fetch_andnot atomic64_fetch_andnot
+#define arch_atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
+#define arch_atomic64_fetch_andnot_release atomic64_fetch_andnot_release
+#define arch_atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
+
 #ifndef atomic64_fetch_andnot_relaxed
 #ifdef atomic64_fetch_andnot
 #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
@@ -1844,6 +2029,13 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v)
 
 #endif /* atomic64_fetch_andnot_relaxed */
 
+#define arch_atomic64_or atomic64_or
+
+#define arch_atomic64_fetch_or atomic64_fetch_or
+#define arch_atomic64_fetch_or_acquire atomic64_fetch_or_acquire
+#define arch_atomic64_fetch_or_release atomic64_fetch_or_release
+#define arch_atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
+
 #ifndef atomic64_fetch_or_relaxed
 #define atomic64_fetch_or_acquire atomic64_fetch_or
 #define atomic64_fetch_or_release atomic64_fetch_or
@@ -1886,6 +2078,13 @@ atomic64_fetch_or(s64 i, atomic64_t *v)
 
 #endif /* atomic64_fetch_or_relaxed */
 
+#define arch_atomic64_xor atomic64_xor
+
+#define arch_atomic64_fetch_xor atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
+#define arch_atomic64_fetch_xor_release atomic64_fetch_xor_release
+#define arch_atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+
 #ifndef atomic64_fetch_xor_relaxed
 #define atomic64_fetch_xor_acquire atomic64_fetch_xor
 #define atomic64_fetch_xor_release atomic64_fetch_xor
@@ -1928,6 +2127,11 @@ atomic64_fetch_xor(s64 i, atomic64_t *v)
 
 #endif /* atomic64_fetch_xor_relaxed */
 
+#define arch_atomic64_xchg atomic64_xchg
+#define arch_atomic64_xchg_acquire atomic64_xchg_acquire
+#define arch_atomic64_xchg_release atomic64_xchg_release
+#define arch_atomic64_xchg_relaxed atomic64_xchg_relaxed
+
 #ifndef atomic64_xchg_relaxed
 #define atomic64_xchg_acquire atomic64_xchg
 #define atomic64_xchg_release atomic64_xchg
@@ -1970,6 +2174,11 @@ atomic64_xchg(atomic64_t *v, s64 i)
 
 #endif /* atomic64_xchg_relaxed */
 
+#define arch_atomic64_cmpxchg atomic64_cmpxchg
+#define arch_atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
+#define arch_atomic64_cmpxchg_release atomic64_cmpxchg_release
+#define arch_atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
+
 #ifndef atomic64_cmpxchg_relaxed
 #define atomic64_cmpxchg_acquire atomic64_cmpxchg
 #define atomic64_cmpxchg_release atomic64_cmpxchg
@@ -2012,6 +2221,11 @@ atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
 
 #endif /* atomic64_cmpxchg_relaxed */
 
+#define arch_atomic64_try_cmpxchg atomic64_try_cmpxchg
+#define arch_atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
+#define arch_atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
+#define arch_atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
+
 #ifndef atomic64_try_cmpxchg_relaxed
 #ifdef atomic64_try_cmpxchg
 #define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg
@@ -2109,6 +2323,8 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
 
 #endif /* atomic64_try_cmpxchg_relaxed */
 
+#define arch_atomic64_sub_and_test atomic64_sub_and_test
+
 #ifndef atomic64_sub_and_test
 /**
  * atomic64_sub_and_test - subtract value from variable and test result
@@ -2127,6 +2343,8 @@ atomic64_sub_and_test(s64 i, atomic64_t *v)
 #define atomic64_sub_and_test atomic64_sub_and_test
 #endif
 
+#define arch_atomic64_dec_and_test atomic64_dec_and_test
+
 #ifndef atomic64_dec_and_test
 /**
  * atomic64_dec_and_test - decrement and test
@@ -2144,6 +2362,8 @@ atomic64_dec_and_test(atomic64_t *v)
 #define atomic64_dec_and_test atomic64_dec_and_test
 #endif
 
+#define arch_atomic64_inc_and_test atomic64_inc_and_test
+
 #ifndef atomic64_inc_and_test
 /**
  * atomic64_inc_and_test - increment and test
@@ -2161,6 +2381,8 @@ atomic64_inc_and_test(atomic64_t *v)
 #define atomic64_inc_and_test atomic64_inc_and_test
 #endif
 
+#define arch_atomic64_add_negative atomic64_add_negative
+
 #ifndef atomic64_add_negative
 /**
  * atomic64_add_negative - add and test if negative
@@ -2179,6 +2401,8 @@ atomic64_add_negative(s64 i, atomic64_t *v)
 #define atomic64_add_negative atomic64_add_negative
 #endif
 
+#define arch_atomic64_fetch_add_unless atomic64_fetch_add_unless
+
 #ifndef atomic64_fetch_add_unless
 /**
  * atomic64_fetch_add_unless - add unless the number is already a given value
@@ -2204,6 +2428,8 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
 #define atomic64_fetch_add_unless atomic64_fetch_add_unless
 #endif
 
+#define arch_atomic64_add_unless atomic64_add_unless
+
 #ifndef atomic64_add_unless
 /**
  * atomic64_add_unless - add unless the number is already a given value
@@ -2222,6 +2448,8 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
 #define atomic64_add_unless atomic64_add_unless
 #endif
 
+#define arch_atomic64_inc_not_zero atomic64_inc_not_zero
+
 #ifndef atomic64_inc_not_zero
 /**
  * atomic64_inc_not_zero - increment unless the number is zero
@@ -2238,6 +2466,8 @@ atomic64_inc_not_zero(atomic64_t *v)
 #define atomic64_inc_not_zero atomic64_inc_not_zero
 #endif
 
+#define arch_atomic64_inc_unless_negative atomic64_inc_unless_negative
+
 #ifndef atomic64_inc_unless_negative
 static __always_inline bool
 atomic64_inc_unless_negative(atomic64_t *v)
@@ -2254,6 +2484,8 @@ atomic64_inc_unless_negative(atomic64_t *v)
 #define atomic64_inc_unless_negative atomic64_inc_unless_negative
 #endif
 
+#define arch_atomic64_dec_unless_positive atomic64_dec_unless_positive
+
 #ifndef atomic64_dec_unless_positive
 static __always_inline bool
 atomic64_dec_unless_positive(atomic64_t *v)
@@ -2270,6 +2502,8 @@ atomic64_dec_unless_positive(atomic64_t *v)
 #define atomic64_dec_unless_positive atomic64_dec_unless_positive
 #endif
 
+#define arch_atomic64_dec_if_positive atomic64_dec_if_positive
+
 #ifndef atomic64_dec_if_positive
 static __always_inline s64
 atomic64_dec_if_positive(atomic64_t *v)
@@ -2288,4 +2522,4 @@ atomic64_dec_if_positive(atomic64_t *v)
 #endif
 
 #endif /* _LINUX_ATOMIC_FALLBACK_H */
-// 1fac0941c79bf0ae100723cc2ac9b94061f0b67a
+// 9d95b56f98d82a2a26c7b79ccdd0c47572d50a6f
index 4671fbf..7f475d5 100644 (file)
@@ -18,8 +18,7 @@
  * position @h. For example
  * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
  */
-#if !defined(__ASSEMBLY__) && \
-       (!defined(CONFIG_CC_IS_GCC) || CONFIG_GCC_VERSION >= 49000)
+#if !defined(__ASSEMBLY__)
 #include <linux/build_bug.h>
 #define GENMASK_INPUT_CHECK(h, l) \
        (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
index 8fd9009..5724141 100644 (file)
@@ -590,6 +590,7 @@ struct request_queue {
        u64                     write_hints[BLK_MAX_WRITE_HINTS];
 };
 
+/* Keep blk_queue_flag_name[] in sync with the definitions below */
 #define QUEUE_FLAG_STOPPED     0       /* queue is stopped */
 #define QUEUE_FLAG_DYING       1       /* queue being torn down */
 #define QUEUE_FLAG_NOMERGES     3      /* disable merge attempts */
index 4052d64..47d5b0c 100644 (file)
@@ -33,7 +33,7 @@ int netns_bpf_prog_query(const union bpf_attr *attr,
                         union bpf_attr __user *uattr);
 int netns_bpf_prog_attach(const union bpf_attr *attr,
                          struct bpf_prog *prog);
-int netns_bpf_prog_detach(const union bpf_attr *attr);
+int netns_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
 int netns_bpf_link_create(const union bpf_attr *attr,
                          struct bpf_prog *prog);
 #else
@@ -49,7 +49,8 @@ static inline int netns_bpf_prog_attach(const union bpf_attr *attr,
        return -EOPNOTSUPP;
 }
 
-static inline int netns_bpf_prog_detach(const union bpf_attr *attr)
+static inline int netns_bpf_prog_detach(const union bpf_attr *attr,
+                                       enum bpf_prog_type ptype)
 {
        return -EOPNOTSUPP;
 }
index 07052d4..9750a19 100644 (file)
@@ -1543,13 +1543,16 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
 
 #if defined(CONFIG_BPF_STREAM_PARSER)
-int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which);
+int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
+                        struct bpf_prog *old, u32 which);
 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
+int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
 void sock_map_unhash(struct sock *sk);
 void sock_map_close(struct sock *sk, long timeout);
 #else
 static inline int sock_map_prog_update(struct bpf_map *map,
-                                      struct bpf_prog *prog, u32 which)
+                                      struct bpf_prog *prog,
+                                      struct bpf_prog *old, u32 which)
 {
        return -EOPNOTSUPP;
 }
@@ -1559,6 +1562,12 @@ static inline int sock_map_get_from_fd(const union bpf_attr *attr,
 {
        return -EINVAL;
 }
+
+static inline int sock_map_prog_detach(const union bpf_attr *attr,
+                                      enum bpf_prog_type ptype)
+{
+       return -EOPNOTSUPP;
+}
 #endif /* CONFIG_BPF_STREAM_PARSER */
 
 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
index 5c1ea99..8b81fbb 100644 (file)
@@ -82,6 +82,11 @@ static inline bool btf_type_is_int(const struct btf_type *t)
        return BTF_INFO_KIND(t->info) == BTF_KIND_INT;
 }
 
+static inline bool btf_type_is_small_int(const struct btf_type *t)
+{
+       return btf_type_is_int(t) && t->size <= sizeof(u64);
+}
+
 static inline bool btf_type_is_enum(const struct btf_type *t)
 {
        return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM;
index 5266115..fee0b55 100644 (file)
@@ -790,7 +790,9 @@ struct sock_cgroup_data {
        union {
 #ifdef __LITTLE_ENDIAN
                struct {
-                       u8      is_data;
+                       u8      is_data : 1;
+                       u8      no_refcnt : 1;
+                       u8      unused : 6;
                        u8      padding;
                        u16     prioidx;
                        u32     classid;
@@ -800,7 +802,9 @@ struct sock_cgroup_data {
                        u32     classid;
                        u16     prioidx;
                        u8      padding;
-                       u8      is_data;
+                       u8      unused : 6;
+                       u8      no_refcnt : 1;
+                       u8      is_data : 1;
                } __packed;
 #endif
                u64             val;
index 4598e4d..618838c 100644 (file)
@@ -822,6 +822,7 @@ extern spinlock_t cgroup_sk_update_lock;
 
 void cgroup_sk_alloc_disable(void);
 void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
+void cgroup_sk_clone(struct sock_cgroup_data *skcd);
 void cgroup_sk_free(struct sock_cgroup_data *skcd);
 
 static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
@@ -835,7 +836,7 @@ static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
         */
        v = READ_ONCE(skcd->val);
 
-       if (v & 1)
+       if (v & 3)
                return &cgrp_dfl_root.cgrp;
 
        return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
@@ -847,6 +848,7 @@ static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
 #else  /* CONFIG_CGROUP_DATA */
 
 static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
+static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {}
 static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
 
 #endif /* CONFIG_CGROUP_DATA */
index ee37256..5e55302 100644 (file)
 #define __no_sanitize_thread
 #endif
 
+#if __has_feature(undefined_behavior_sanitizer)
+/* GCC does not have __SANITIZE_UNDEFINED__ */
+#define __no_sanitize_undefined \
+               __attribute__((no_sanitize("undefined")))
+#else
+#define __no_sanitize_undefined
+#endif
+
 /*
  * Not all versions of clang implement the the type-generic versions
  * of the builtin overflow checkers. Fortunately, clang implements
index 7dd4e03..0b1dc61 100644 (file)
@@ -11,7 +11,7 @@
                     + __GNUC_PATCHLEVEL__)
 
 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 */
-#if GCC_VERSION < 40800
+#if GCC_VERSION < 40900
 # error Sorry, your compiler is too old - please upgrade it.
 #endif
 
 #define __no_sanitize_thread
 #endif
 
+#if __has_attribute(__no_sanitize_undefined__)
+#define __no_sanitize_undefined __attribute__((no_sanitize_undefined))
+#else
+#define __no_sanitize_undefined
+#endif
+
 #if GCC_VERSION >= 50100
 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
 #endif
index 30827f8..204e768 100644 (file)
@@ -123,7 +123,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
 #ifdef CONFIG_DEBUG_ENTRY
 /* Begin/end of an instrumentation safe region */
 #define instrumentation_begin() ({                                     \
-       asm volatile("%c0:\n\t"                                         \
+       asm volatile("%c0: nop\n\t"                                             \
                     ".pushsection .discard.instr_begin\n\t"            \
                     ".long %c0b - .\n\t"                               \
                     ".popsection\n\t" : : "i" (__COUNTER__));          \
index cdf0165..c8f03d2 100644 (file)
@@ -40,6 +40,7 @@
 # define __GCC4_has_attribute___noclone__             1
 # define __GCC4_has_attribute___nonstring__           0
 # define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8)
+# define __GCC4_has_attribute___no_sanitize_undefined__ (__GNUC_MINOR__ >= 9)
 # define __GCC4_has_attribute___fallthrough__         0
 #endif
 
index e368384..01dd58c 100644 (file)
@@ -118,10 +118,6 @@ struct ftrace_likely_data {
 #define notrace                        __attribute__((__no_instrument_function__))
 #endif
 
-/* Section for code which can't be instrumented at all */
-#define noinstr                                                                \
-       noinline notrace __attribute((__section__(".noinstr.text")))
-
 /*
  * it doesn't make sense on ARM (currently the only user of __naked)
  * to trace naked functions because then mcount is called without
@@ -193,16 +189,18 @@ struct ftrace_likely_data {
 
 #define __no_kcsan __no_sanitize_thread
 #ifdef __SANITIZE_THREAD__
-# define __no_kcsan_or_inline __no_kcsan notrace __maybe_unused
-# define __no_sanitize_or_inline __no_kcsan_or_inline
-#else
-# define __no_kcsan_or_inline __always_inline
+# define __no_sanitize_or_inline __no_kcsan notrace __maybe_unused
 #endif
 
 #ifndef __no_sanitize_or_inline
 #define __no_sanitize_or_inline __always_inline
 #endif
 
+/* Section for code which can't be instrumented at all */
+#define noinstr                                                                \
+       noinline notrace __attribute((__section__(".noinstr.text")))    \
+       __no_kcsan __no_sanitize_address
+
 #endif /* __KERNEL__ */
 
 #endif /* __ASSEMBLY__ */
@@ -254,32 +252,8 @@ struct ftrace_likely_data {
  * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving
  *                            non-scalar types unchanged.
  */
-#if (defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 40900) || defined(__CHECKER__)
 /*
- * We build this out of a couple of helper macros in a vain attempt to
- * help you keep your lunch down while reading it.
- */
-#define __pick_scalar_type(x, type, otherwise)                                 \
-       __builtin_choose_expr(__same_type(x, type), (type)0, otherwise)
-
-/*
- * 'char' is not type-compatible with either 'signed char' or 'unsigned char',
- * so we include the naked type here as well as the signed/unsigned variants.
- */
-#define __pick_integer_type(x, type, otherwise)                                        \
-       __pick_scalar_type(x, type,                                             \
-               __pick_scalar_type(x, unsigned type,                            \
-                       __pick_scalar_type(x, signed type, otherwise)))
-
-#define __unqual_scalar_typeof(x) typeof(                                      \
-       __pick_integer_type(x, char,                                            \
-               __pick_integer_type(x, short,                                   \
-                       __pick_integer_type(x, int,                             \
-                               __pick_integer_type(x, long,                    \
-                                       __pick_integer_type(x, long long, x))))))
-#else
-/*
- * If supported, prefer C11 _Generic for better compile-times. As above, 'char'
+ * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char'
  * is not type-compatible with 'signed char', and we define a separate case.
  */
 #define __scalar_type_to_expr_cases(type)                              \
@@ -295,7 +269,6 @@ struct ftrace_likely_data {
                         __scalar_type_to_expr_cases(long),             \
                         __scalar_type_to_expr_cases(long long),        \
                         default: (x)))
-#endif
 
 /* Is this type a native word size -- useful for atomic operations */
 #define __native_word(t) \
index 15460a5..5efed86 100644 (file)
@@ -433,7 +433,8 @@ enum dl_dev_state {
  * @suppliers: List of links to supplier devices.
  * @consumers: List of links to consumer devices.
  * @needs_suppliers: Hook to global list of devices waiting for suppliers.
- * @defer_sync: Hook to global list of devices that have deferred sync_state.
+ * @defer_hook: Hook to global list of devices that have deferred sync_state or
+ *             deferred fw_devlink.
  * @need_for_probe: If needs_suppliers is on a list, this indicates if the
  *                 suppliers are needed for probe or not.
  * @status: Driver status information.
@@ -442,7 +443,7 @@ struct dev_links_info {
        struct list_head suppliers;
        struct list_head consumers;
        struct list_head needs_suppliers;
-       struct list_head defer_sync;
+       struct list_head defer_hook;
        bool need_for_probe;
        enum dl_dev_state status;
 };
index ab0c156..a2ca294 100644 (file)
@@ -311,6 +311,7 @@ struct dma_buf {
        void *vmap_ptr;
        const char *exp_name;
        const char *name;
+       spinlock_t name_lock; /* spinlock to protect name access */
        struct module *owner;
        struct list_head list_node;
        void *priv;
index 136f984..ab2e20c 100644 (file)
@@ -69,6 +69,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
 u64 dma_direct_get_required_mask(struct device *dev);
 gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
                                  u64 *phys_mask);
+bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
 void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
                gfp_t gfp, unsigned long attrs);
 void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
@@ -77,8 +78,6 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
                dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
 void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
                dma_addr_t dma_addr, unsigned long attrs);
-struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
-               gfp_t gfp, unsigned long attrs);
 int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
                void *cpu_addr, dma_addr_t dma_addr, size_t size,
                unsigned long attrs);
@@ -87,4 +86,5 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
                void *cpu_addr, dma_addr_t dma_addr, size_t size,
                unsigned long attrs);
 int dma_direct_supported(struct device *dev, u64 mask);
+bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
 #endif /* _LINUX_DMA_DIRECT_H */
index 78f677c..a33ed39 100644 (file)
@@ -461,6 +461,7 @@ int dma_set_mask(struct device *dev, u64 mask);
 int dma_set_coherent_mask(struct device *dev, u64 mask);
 u64 dma_get_required_mask(struct device *dev);
 size_t dma_max_mapping_size(struct device *dev);
+bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
 unsigned long dma_get_merge_boundary(struct device *dev);
 #else /* CONFIG_HAS_DMA */
 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
@@ -571,6 +572,10 @@ static inline size_t dma_max_mapping_size(struct device *dev)
 {
        return 0;
 }
+static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
+{
+       return false;
+}
 static inline unsigned long dma_get_merge_boundary(struct device *dev)
 {
        return 0;
index 2c6495f..bb35f33 100644 (file)
@@ -350,6 +350,7 @@ void efi_native_runtime_setup(void);
  * associated with ConOut
  */
 #define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID   EFI_GUID(0xe03fc20a, 0x85dc, 0x406e,  0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
+#define LINUX_EFI_ARM_CPU_STATE_TABLE_GUID     EFI_GUID(0xef79e4aa, 0x3c3d, 0x4989,  0xb9, 0x02, 0x07, 0xa9, 0x43, 0xe5, 0x50, 0xd2)
 #define LINUX_EFI_LOADER_ENTRY_GUID            EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf,  0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
 #define LINUX_EFI_RANDOM_SEED_TABLE_GUID       EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2,  0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
 #define LINUX_EFI_TPM_EVENT_LOG_GUID           EFI_GUID(0xb7799cb0, 0xeca2, 0x4943,  0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa)
@@ -1236,14 +1237,11 @@ struct linux_efi_memreserve {
        struct {
                phys_addr_t     base;
                phys_addr_t     size;
-       } entry[0];
+       } entry[];
 };
 
-#define EFI_MEMRESERVE_SIZE(count) (sizeof(struct linux_efi_memreserve) + \
-       (count) * sizeof(((struct linux_efi_memreserve *)0)->entry[0]))
-
 #define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \
-       / sizeof(((struct linux_efi_memreserve *)0)->entry[0]))
+       / sizeof_field(struct linux_efi_memreserve, entry[0]))
 
 void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size);
 
index 2593777..0b01447 100644 (file)
@@ -884,12 +884,12 @@ void bpf_jit_compile(struct bpf_prog *prog);
 bool bpf_jit_needs_zext(void);
 bool bpf_helper_changes_pkt_data(void *func);
 
-static inline bool bpf_dump_raw_ok(void)
+static inline bool bpf_dump_raw_ok(const struct cred *cred)
 {
        /* Reconstruction of call-sites is dependent on kallsyms,
         * thus make dump the same restriction.
         */
-       return kallsyms_show_value() == 1;
+       return kallsyms_show_value(cred);
 }
 
 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
index 3f881a8..f5abba8 100644 (file)
@@ -315,6 +315,7 @@ enum rw_hint {
 #define IOCB_SYNC              (1 << 5)
 #define IOCB_WRITE             (1 << 6)
 #define IOCB_NOWAIT            (1 << 7)
+#define IOCB_NOIO              (1 << 9)
 
 struct kiocb {
        struct file             *ki_filp;
@@ -1917,7 +1918,6 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
                              struct iovec *fast_pointer,
                              struct iovec **ret_pointer);
 
-extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *);
 extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
 extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
 extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
@@ -3033,6 +3033,7 @@ extern int kernel_read_file_from_path_initns(const char *, void **, loff_t *, lo
 extern int kernel_read_file_from_fd(int, void **, loff_t *, loff_t,
                                    enum kernel_read_file_id);
 extern ssize_t kernel_read(struct file *, void *, size_t, loff_t *);
+ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos);
 extern ssize_t kernel_write(struct file *, const void *, size_t, loff_t *);
 extern ssize_t __kernel_write(struct file *, const void *, size_t, loff_t *);
 extern struct file * open_exec(const char *);
index 5f24fcb..37e1e8f 100644 (file)
@@ -109,6 +109,7 @@ struct fs_context {
        enum fs_context_phase   phase:8;        /* The phase the context is in */
        bool                    need_free:1;    /* Need to call ops->free() */
        bool                    global:1;       /* Goes into &init_user_ns */
+       bool                    oldapi:1;       /* Coming from mount(2) */
 };
 
 struct fs_context_operations {
index c230b4e..20c885d 100644 (file)
@@ -48,6 +48,9 @@ struct host1x_client_ops {
  * @channel: host1x channel associated with this client
  * @syncpts: array of syncpoints requested for this client
  * @num_syncpts: number of syncpoints requested for this client
+ * @parent: pointer to parent structure
+ * @usecount: reference count for this structure
+ * @lock: mutex for mutually exclusive concurrency
  */
 struct host1x_client {
        struct list_head list;
@@ -325,10 +328,12 @@ int host1x_client_resume(struct host1x_client *client);
 
 struct tegra_mipi_device;
 
-struct tegra_mipi_device *tegra_mipi_request(struct device *device);
+struct tegra_mipi_device *tegra_mipi_request(struct device *device,
+                                            struct device_node *np);
 void tegra_mipi_free(struct tegra_mipi_device *device);
 int tegra_mipi_enable(struct tegra_mipi_device *device);
 int tegra_mipi_disable(struct tegra_mipi_device *device);
 int tegra_mipi_calibrate(struct tegra_mipi_device *device);
+int tegra_mipi_wait(struct tegra_mipi_device *device);
 
 #endif
index fe15f83..9f73249 100644 (file)
@@ -3333,13 +3333,17 @@ struct ieee80211_multiple_bssid_configuration {
 #define WLAN_AKM_SUITE_TDLS                    SUITE(0x000FAC, 7)
 #define WLAN_AKM_SUITE_SAE                     SUITE(0x000FAC, 8)
 #define WLAN_AKM_SUITE_FT_OVER_SAE             SUITE(0x000FAC, 9)
+#define WLAN_AKM_SUITE_AP_PEER_KEY             SUITE(0x000FAC, 10)
 #define WLAN_AKM_SUITE_8021X_SUITE_B           SUITE(0x000FAC, 11)
 #define WLAN_AKM_SUITE_8021X_SUITE_B_192       SUITE(0x000FAC, 12)
+#define WLAN_AKM_SUITE_FT_8021X_SHA384         SUITE(0x000FAC, 13)
 #define WLAN_AKM_SUITE_FILS_SHA256             SUITE(0x000FAC, 14)
 #define WLAN_AKM_SUITE_FILS_SHA384             SUITE(0x000FAC, 15)
 #define WLAN_AKM_SUITE_FT_FILS_SHA256          SUITE(0x000FAC, 16)
 #define WLAN_AKM_SUITE_FT_FILS_SHA384          SUITE(0x000FAC, 17)
 #define WLAN_AKM_SUITE_OWE                     SUITE(0x000FAC, 18)
+#define WLAN_AKM_SUITE_FT_PSK_SHA384           SUITE(0x000FAC, 19)
+#define WLAN_AKM_SUITE_PSK_SHA384              SUITE(0x000FAC, 20)
 
 #define WLAN_MAX_KEY_LEN               32
 
index b05e855..41a5183 100644 (file)
@@ -25,6 +25,8 @@
 #define VLAN_ETH_DATA_LEN      1500    /* Max. octets in payload        */
 #define VLAN_ETH_FRAME_LEN     1518    /* Max. octets in frame sans FCS */
 
+#define VLAN_MAX_DEPTH 8               /* Max. number of nested VLAN tags parsed */
+
 /*
  *     struct vlan_hdr - vlan header
  *     @h_vlan_TCI: priority and VLAN ID
@@ -577,10 +579,10 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
  * Returns the EtherType of the packet, regardless of whether it is
  * vlan encapsulated (normal or hardware accelerated) or not.
  */
-static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
+static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
                                         int *depth)
 {
-       unsigned int vlan_depth = skb->mac_len;
+       unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
 
        /* if type is 802.1Q/AD then the header should already be
         * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
@@ -595,13 +597,12 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
                        vlan_depth = ETH_HLEN;
                }
                do {
-                       struct vlan_hdr *vh;
+                       struct vlan_hdr vhdr, *vh;
 
-                       if (unlikely(!pskb_may_pull(skb,
-                                                   vlan_depth + VLAN_HLEN)))
+                       vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr);
+                       if (unlikely(!vh || !--parse_depth))
                                return 0;
 
-                       vh = (struct vlan_hdr *)(skb->data + vlan_depth);
                        type = vh->h_vlan_encapsulated_proto;
                        vlan_depth += VLAN_HLEN;
                } while (eth_type_vlan(type));
@@ -620,11 +621,25 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
  * Returns the EtherType of the packet, regardless of whether it is
  * vlan encapsulated (normal or hardware accelerated) or not.
  */
-static inline __be16 vlan_get_protocol(struct sk_buff *skb)
+static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
 {
        return __vlan_get_protocol(skb, skb->protocol, NULL);
 }
 
+/* A getter for the SKB protocol field which will handle VLAN tags consistently
+ * whether VLAN acceleration is enabled or not.
+ */
+static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan)
+{
+       if (!skip_vlan)
+               /* VLAN acceleration strips the VLAN header from the skb and
+                * moves it to skb->vlan_proto
+                */
+               return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol;
+
+       return vlan_get_protocol(skb);
+}
+
 static inline void vlan_set_encap_proto(struct sk_buff *skb,
                                        struct vlan_hdr *vhdr)
 {
index 1ecb6b4..520858d 100644 (file)
@@ -67,8 +67,15 @@ static const struct acpi_device_id elan_acpi_id[] = {
        { "ELAN062B", 0 },
        { "ELAN062C", 0 },
        { "ELAN062D", 0 },
+       { "ELAN062E", 0 }, /* Lenovo V340 Whiskey Lake U */
+       { "ELAN062F", 0 }, /* Lenovo V340 Comet Lake U */
        { "ELAN0631", 0 },
        { "ELAN0632", 0 },
+       { "ELAN0633", 0 }, /* Lenovo S145 */
+       { "ELAN0634", 0 }, /* Lenovo V340 Ice lake */
+       { "ELAN0635", 0 }, /* Lenovo V1415-IIL */
+       { "ELAN0636", 0 }, /* Lenovo V1415-Dali */
+       { "ELAN0637", 0 }, /* Lenovo V1415-IGLR */
        { "ELAN1000", 0 },
        { }
 };
index 4100bd2..3e8fa1c 100644 (file)
@@ -41,6 +41,7 @@
 #define DMA_PTE_SNP            BIT_ULL(11)
 
 #define DMA_FL_PTE_PRESENT     BIT_ULL(0)
+#define DMA_FL_PTE_US          BIT_ULL(2)
 #define DMA_FL_PTE_XD          BIT_ULL(63)
 
 #define ADDR_WIDTH_5LEVEL      (57)
index 2735da5..3082378 100644 (file)
@@ -2,7 +2,7 @@
 #ifndef _LINUX_IRQ_WORK_H
 #define _LINUX_IRQ_WORK_H
 
-#include <linux/llist.h>
+#include <linux/smp_types.h>
 
 /*
  * An entry can be in one of four states:
  * busy      NULL, 2 -> {free, claimed} : callback in progress, can be claimed
  */
 
-/* flags share CSD_FLAG_ space */
-
-#define IRQ_WORK_PENDING       BIT(0)
-#define IRQ_WORK_BUSY          BIT(1)
-
-/* Doesn't want IPI, wait for tick: */
-#define IRQ_WORK_LAZY          BIT(2)
-/* Run hard IRQ context, even on RT */
-#define IRQ_WORK_HARD_IRQ      BIT(3)
-
-#define IRQ_WORK_CLAIMED       (IRQ_WORK_PENDING | IRQ_WORK_BUSY)
-
-/*
- * structure shares layout with single_call_data_t.
- */
 struct irq_work {
-       struct llist_node llnode;
-       atomic_t flags;
+       union {
+               struct __call_single_node node;
+               struct {
+                       struct llist_node llnode;
+                       atomic_t flags;
+               };
+       };
        void (*func)(struct irq_work *);
 };
 
index 98338dc..481273f 100644 (file)
@@ -18,6 +18,7 @@
 #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
                         2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
 
+struct cred;
 struct module;
 
 static inline int is_kernel_inittext(unsigned long addr)
@@ -98,7 +99,7 @@ int lookup_symbol_name(unsigned long addr, char *symname);
 int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
 
 /* How and when do we show kallsyms values? */
-extern int kallsyms_show_value(void);
+extern bool kallsyms_show_value(const struct cred *cred);
 
 #else /* !CONFIG_KALLSYMS */
 
@@ -158,7 +159,7 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
        return -ERANGE;
 }
 
-static inline int kallsyms_show_value(void)
+static inline bool kallsyms_show_value(const struct cred *cred)
 {
        return false;
 }
index c62d764..477b8b7 100644 (file)
@@ -177,6 +177,17 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code,
                           struct pt_regs *regs);
 
 /**
+ *     kgdb_arch_handle_qxfer_pkt - Handle architecture specific GDB XML
+ *                                  packets.
+ *     @remcom_in_buffer: The buffer of the packet we have read.
+ *     @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
+ */
+
+extern void
+kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
+                          char *remcom_out_buffer);
+
+/**
  *     kgdb_call_nmi_hook - Call kgdb_nmicallback() on the current CPU
  *     @ignored: This parameter is only here to match the prototype.
  *
@@ -276,8 +287,7 @@ struct kgdb_arch {
  * the I/O driver.
  * @post_exception: Pointer to a function that will do any cleanup work
  * for the I/O driver.
- * @is_console: 1 if the end device is a console 0 if the I/O device is
- * not a console
+ * @cons: valid if the I/O device is a console; else NULL.
  */
 struct kgdb_io {
        const char              *name;
@@ -288,7 +298,7 @@ struct kgdb_io {
        void                    (*deinit) (void);
        void                    (*pre_exception) (void);
        void                    (*post_exception) (void);
-       int                     is_console;
+       struct console          *cons;
 };
 
 extern const struct kgdb_arch          arch_kgdb_ops;
@@ -315,6 +325,7 @@ extern int kgdb_hex2mem(char *buf, char *mem, int count);
 
 extern int kgdb_isremovedbreak(unsigned long addr);
 extern void kgdb_schedule_breakpoint(void);
+extern int kgdb_has_hit_break(unsigned long addr);
 
 extern int
 kgdb_handle_exception(int ex_vector, int signo, int err_code,
index e7e5256..77ccf04 100644 (file)
@@ -1095,7 +1095,7 @@ extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd,
 #define ATA_SCSI_COMPAT_IOCTL /* empty */
 #endif
 extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd);
-#if IS_ENABLED(CONFIG_ATA)
+#if IS_REACHABLE(CONFIG_ATA)
 bool ata_scsi_dma_need_drain(struct request *rq);
 #else
 #define ata_scsi_dma_need_drain NULL
index 6791813..af998f9 100644 (file)
@@ -150,7 +150,7 @@ LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer,
         size_t buffer_size)
 LSM_HOOK(void, LSM_RET_VOID, inode_getsecid, struct inode *inode, u32 *secid)
 LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new)
-LSM_HOOK(int, 0, inode_copy_up_xattr, const char *name)
+LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, const char *name)
 LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir,
         struct kernfs_node *kn)
 LSM_HOOK(int, 0, file_permission, struct file *file, int mask)
@@ -360,7 +360,7 @@ LSM_HOOK(int, 0, key_alloc, struct key *key, const struct cred *cred,
         unsigned long flags)
 LSM_HOOK(void, LSM_RET_VOID, key_free, struct key *key)
 LSM_HOOK(int, 0, key_permission, key_ref_t key_ref, const struct cred *cred,
-        unsigned perm)
+        enum key_need_perm need_perm)
 LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **_buffer)
 #endif /* CONFIG_KEYS */
 
index 13c0e45..1e6ca71 100644 (file)
@@ -147,6 +147,7 @@ enum {
        MLX5_REG_MCDA            = 0x9063,
        MLX5_REG_MCAM            = 0x907f,
        MLX5_REG_MIRC            = 0x9162,
+       MLX5_REG_SBCAM           = 0xB01F,
        MLX5_REG_RESOURCE_DUMP   = 0xC000,
 };
 
index 116bd9b..073b79e 100644 (file)
@@ -4283,7 +4283,8 @@ struct mlx5_ifc_rst2init_qp_out_bits {
 
        u8         syndrome[0x20];
 
-       u8         reserved_at_40[0x40];
+       u8         reserved_at_40[0x20];
+       u8         ece[0x20];
 };
 
 struct mlx5_ifc_rst2init_qp_in_bits {
@@ -4300,7 +4301,7 @@ struct mlx5_ifc_rst2init_qp_in_bits {
 
        u8         opt_param_mask[0x20];
 
-       u8         reserved_at_a0[0x20];
+       u8         ece[0x20];
 
        struct mlx5_ifc_qpc_bits qpc;
 
@@ -6619,7 +6620,8 @@ struct mlx5_ifc_init2init_qp_out_bits {
 
        u8         syndrome[0x20];
 
-       u8         reserved_at_40[0x40];
+       u8         reserved_at_40[0x20];
+       u8         ece[0x20];
 };
 
 struct mlx5_ifc_init2init_qp_in_bits {
@@ -6636,7 +6638,7 @@ struct mlx5_ifc_init2init_qp_in_bits {
 
        u8         opt_param_mask[0x20];
 
-       u8         reserved_at_a0[0x20];
+       u8         ece[0x20];
 
        struct mlx5_ifc_qpc_bits qpc;
 
@@ -9958,6 +9960,34 @@ struct mlx5_ifc_pptb_reg_bits {
        u8         untagged_buff[0x4];
 };
 
+struct mlx5_ifc_sbcam_reg_bits {
+       u8         reserved_at_0[0x8];
+       u8         feature_group[0x8];
+       u8         reserved_at_10[0x8];
+       u8         access_reg_group[0x8];
+
+       u8         reserved_at_20[0x20];
+
+       u8         sb_access_reg_cap_mask[4][0x20];
+
+       u8         reserved_at_c0[0x80];
+
+       u8         sb_feature_cap_mask[4][0x20];
+
+       u8         reserved_at_1c0[0x40];
+
+       u8         cap_total_buffer_size[0x20];
+
+       u8         cap_cell_size[0x10];
+       u8         cap_max_pg_buffers[0x8];
+       u8         cap_num_pool_supported[0x8];
+
+       u8         reserved_at_240[0x8];
+       u8         cap_sbsr_stat_size[0x8];
+       u8         cap_max_tclass_data[0x8];
+       u8         cap_max_cpu_ingress_tclass_sb[0x8];
+};
+
 struct mlx5_ifc_pbmc_reg_bits {
        u8         reserved_at_0[0x8];
        u8         local_port[0x8];
index c4c37fd..f6f8849 100644 (file)
@@ -257,8 +257,8 @@ struct lruvec {
         */
        unsigned long                   anon_cost;
        unsigned long                   file_cost;
-       /* Evictions & activations on the inactive file list */
-       atomic_long_t                   inactive_age;
+       /* Non-resident age, driven by LRU movement */
+       atomic_long_t                   nonresident_age;
        /* Refaults at the time of last reclaim cycle */
        unsigned long                   refaults;
        /* Various lruvec state flags (enum lruvec_flags) */
index 8d764aa..e14cbe4 100644 (file)
@@ -318,7 +318,7 @@ struct pcmcia_device_id {
 #define INPUT_DEVICE_ID_LED_MAX                0x0f
 #define INPUT_DEVICE_ID_SND_MAX                0x07
 #define INPUT_DEVICE_ID_FF_MAX         0x7f
-#define INPUT_DEVICE_ID_SW_MAX         0x0f
+#define INPUT_DEVICE_ID_SW_MAX         0x10
 #define INPUT_DEVICE_ID_PROP_MAX       0x1f
 
 #define INPUT_DEVICE_ID_MATCH_BUS      1
index 6fc613e..39e28e1 100644 (file)
@@ -3157,7 +3157,7 @@ static inline int dev_recursion_level(void)
        return this_cpu_read(softnet_data.xmit.recursion);
 }
 
-#define XMIT_RECURSION_LIMIT   10
+#define XMIT_RECURSION_LIMIT   8
 static inline bool dev_xmit_recursion(void)
 {
        return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
index b394bd4..c4676d6 100644 (file)
 int ipt_register_table(struct net *net, const struct xt_table *table,
                       const struct ipt_replace *repl,
                       const struct nf_hook_ops *ops, struct xt_table **res);
+
+void ipt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
+                      const struct nf_hook_ops *ops);
+
+void ipt_unregister_table_exit(struct net *net, struct xt_table *table);
+
 void ipt_unregister_table(struct net *net, struct xt_table *table,
                          const struct nf_hook_ops *ops);
 
index 8225f78..1547d5f 100644 (file)
@@ -29,6 +29,9 @@ int ip6t_register_table(struct net *net, const struct xt_table *table,
                        const struct nf_hook_ops *ops, struct xt_table **res);
 void ip6t_unregister_table(struct net *net, struct xt_table *table,
                           const struct nf_hook_ops *ops);
+void ip6t_unregister_table_pre_exit(struct net *net, struct xt_table *table,
+                                   const struct nf_hook_ops *ops);
+void ip6t_unregister_table_exit(struct net *net, struct xt_table *table);
 extern unsigned int ip6t_do_table(struct sk_buff *skb,
                                  const struct nf_hook_state *state,
                                  struct xt_table *table);
index c79d833..34c1c4f 100644 (file)
@@ -2169,12 +2169,11 @@ static inline int pci_pcie_type(const struct pci_dev *dev)
  */
 static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
 {
-       struct pci_dev *bridge = pci_upstream_bridge(dev);
-
-       while (bridge) {
-               if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
-                       return bridge;
-               bridge = pci_upstream_bridge(bridge);
+       while (dev) {
+               if (pci_is_pcie(dev) &&
+                   pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+                       return dev;
+               dev = pci_upstream_bridge(dev);
        }
 
        return NULL;
index 8c05d0f..b693b60 100644 (file)
@@ -1416,6 +1416,7 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,
 int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
 int phy_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
 int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd);
+int phy_disable_interrupts(struct phy_device *phydev);
 void phy_request_interrupt(struct phy_device *phydev);
 void phy_free_interrupt(struct phy_device *phydev);
 void phy_print_status(struct phy_device *phydev);
index 733fad7..6d15040 100644 (file)
@@ -207,28 +207,34 @@ static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
 
 static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
 {
+       u16 elem_per_page = p_chain->elem_per_page;
+       u32 prod = p_chain->u.chain16.prod_idx;
+       u32 cons = p_chain->u.chain16.cons_idx;
        u16 used;
 
-       used = (u16) (((u32)0x10000 +
-                      (u32)p_chain->u.chain16.prod_idx) -
-                     (u32)p_chain->u.chain16.cons_idx);
+       if (prod < cons)
+               prod += (u32)U16_MAX + 1;
+
+       used = (u16)(prod - cons);
        if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
-               used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
-                   p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
+               used -= prod / elem_per_page - cons / elem_per_page;
 
        return (u16)(p_chain->capacity - used);
 }
 
 static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
 {
+       u16 elem_per_page = p_chain->elem_per_page;
+       u64 prod = p_chain->u.chain32.prod_idx;
+       u64 cons = p_chain->u.chain32.cons_idx;
        u32 used;
 
-       used = (u32) (((u64)0x100000000ULL +
-                      (u64)p_chain->u.chain32.prod_idx) -
-                     (u64)p_chain->u.chain32.cons_idx);
+       if (prod < cons)
+               prod += (u64)U32_MAX + 1;
+
+       used = (u32)(prod - cons);
        if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
-               used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
-                   p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
+               used -= (u32)(prod / elem_per_page - cons / elem_per_page);
 
        return p_chain->capacity - used;
 }
index 4f922af..45cf7b6 100644 (file)
@@ -155,7 +155,7 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
  * Loop over each sg element in the given sg_table object.
  */
 #define for_each_sgtable_sg(sgt, sg, i)                \
-       for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
+       for_each_sg((sgt)->sgl, sg, (sgt)->orig_nents, i)
 
 /*
  * Loop over each sg element in the given *DMA mapped* sg_table object.
@@ -163,7 +163,7 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
  * of the each element.
  */
 #define for_each_sgtable_dma_sg(sgt, sg, i)    \
-       for_each_sg(sgt->sgl, sg, sgt->nents, i)
+       for_each_sg((sgt)->sgl, sg, (sgt)->nents, i)
 
 /**
  * sg_chain - Chain two sglists together
@@ -451,7 +451,7 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
  * See also for_each_sg_page(). In each loop it operates on PAGE_SIZE unit.
  */
 #define for_each_sgtable_page(sgt, piter, pgoffset)    \
-       for_each_sg_page(sgt->sgl, piter, sgt->orig_nents, pgoffset)
+       for_each_sg_page((sgt)->sgl, piter, (sgt)->orig_nents, pgoffset)
 
 /**
  * for_each_sgtable_dma_page - iterate over the DMA mapped sg_table object
@@ -465,7 +465,7 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
  * unit.
  */
 #define for_each_sgtable_dma_page(sgt, dma_iter, pgoffset)     \
-       for_each_sg_dma_page(sgt->sgl, dma_iter, sgt->nents, pgoffset)
+       for_each_sg_dma_page((sgt)->sgl, dma_iter, (sgt)->nents, pgoffset)
 
 
 /*
index b62e6aa..6833729 100644 (file)
@@ -114,10 +114,6 @@ struct task_group;
 
 #define task_is_stopped_or_traced(task)        ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
 
-#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
-                                        (task->flags & PF_FROZEN) == 0 && \
-                                        (task->state & TASK_NOLOAD) == 0)
-
 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 
 /*
@@ -654,9 +650,8 @@ struct task_struct {
        unsigned int                    ptrace;
 
 #ifdef CONFIG_SMP
-       struct llist_node               wake_entry;
-       unsigned int                    wake_entry_type;
        int                             on_cpu;
+       struct __call_single_node       wake_entry;
 #ifdef CONFIG_THREAD_INFO_IN_TASK
        /* Current CPU: */
        unsigned int                    cpu;
index fa067de..d2b4204 100644 (file)
@@ -19,6 +19,7 @@ struct task_struct;
 #define JOBCTL_TRAPPING_BIT    21      /* switching to TRACED */
 #define JOBCTL_LISTENING_BIT   22      /* ptracer is listening for events */
 #define JOBCTL_TRAP_FREEZE_BIT 23      /* trap for cgroup freezer */
+#define JOBCTL_TASK_WORK_BIT   24      /* set by TWA_SIGNAL */
 
 #define JOBCTL_STOP_DEQUEUED   (1UL << JOBCTL_STOP_DEQUEUED_BIT)
 #define JOBCTL_STOP_PENDING    (1UL << JOBCTL_STOP_PENDING_BIT)
@@ -28,9 +29,10 @@ struct task_struct;
 #define JOBCTL_TRAPPING                (1UL << JOBCTL_TRAPPING_BIT)
 #define JOBCTL_LISTENING       (1UL << JOBCTL_LISTENING_BIT)
 #define JOBCTL_TRAP_FREEZE     (1UL << JOBCTL_TRAP_FREEZE_BIT)
+#define JOBCTL_TASK_WORK       (1UL << JOBCTL_TASK_WORK_BIT)
 
 #define JOBCTL_TRAP_MASK       (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
-#define JOBCTL_PENDING_MASK    (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
+#define JOBCTL_PENDING_MASK    (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK | JOBCTL_TASK_WORK)
 
 extern bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask);
 extern void task_clear_jobctl_trapping(struct task_struct *task);
index 9fd550e..791f484 100644 (file)
@@ -462,10 +462,104 @@ extern void uart_handle_cts_change(struct uart_port *uport,
 extern void uart_insert_char(struct uart_port *port, unsigned int status,
                 unsigned int overrun, unsigned int ch, unsigned int flag);
 
-extern int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch);
-extern int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch);
-extern void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long flags);
-extern int uart_handle_break(struct uart_port *port);
+#ifdef CONFIG_MAGIC_SYSRQ_SERIAL
+#define SYSRQ_TIMEOUT  (HZ * 5)
+
+bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch);
+
+static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+       if (!port->sysrq)
+               return 0;
+
+       if (ch && time_before(jiffies, port->sysrq)) {
+               if (sysrq_mask()) {
+                       handle_sysrq(ch);
+                       port->sysrq = 0;
+                       return 1;
+               }
+               if (uart_try_toggle_sysrq(port, ch))
+                       return 1;
+       }
+       port->sysrq = 0;
+
+       return 0;
+}
+
+static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+       if (!port->sysrq)
+               return 0;
+
+       if (ch && time_before(jiffies, port->sysrq)) {
+               if (sysrq_mask()) {
+                       port->sysrq_ch = ch;
+                       port->sysrq = 0;
+                       return 1;
+               }
+               if (uart_try_toggle_sysrq(port, ch))
+                       return 1;
+       }
+       port->sysrq = 0;
+
+       return 0;
+}
+
+static inline void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags)
+{
+       int sysrq_ch;
+
+       if (!port->has_sysrq) {
+               spin_unlock_irqrestore(&port->lock, irqflags);
+               return;
+       }
+
+       sysrq_ch = port->sysrq_ch;
+       port->sysrq_ch = 0;
+
+       spin_unlock_irqrestore(&port->lock, irqflags);
+
+       if (sysrq_ch)
+               handle_sysrq(sysrq_ch);
+}
+#else  /* CONFIG_MAGIC_SYSRQ_SERIAL */
+static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+       return 0;
+}
+static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+       return 0;
+}
+static inline void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags)
+{
+       spin_unlock_irqrestore(&port->lock, irqflags);
+}
+#endif /* CONFIG_MAGIC_SYSRQ_SERIAL */
+
+/*
+ * We do the SysRQ and SAK checking like this...
+ */
+static inline int uart_handle_break(struct uart_port *port)
+{
+       struct uart_state *state = port->state;
+
+       if (port->handle_break)
+               port->handle_break(port);
+
+#ifdef CONFIG_MAGIC_SYSRQ_SERIAL
+       if (port->has_sysrq && uart_console(port)) {
+               if (!port->sysrq) {
+                       port->sysrq = jiffies + SYSRQ_TIMEOUT;
+                       return 1;
+               }
+               port->sysrq = 0;
+       }
+#endif
+       if (port->flags & UPF_SAK)
+               do_SAK(state->port.tty);
+       return 0;
+}
 
 /*
  *     UART_ENABLE_MS - determine if port should enable modem status irqs
index 08674cd..1e9ed84 100644 (file)
@@ -430,6 +430,19 @@ static inline void psock_set_prog(struct bpf_prog **pprog,
                bpf_prog_put(prog);
 }
 
+static inline int psock_replace_prog(struct bpf_prog **pprog,
+                                    struct bpf_prog *prog,
+                                    struct bpf_prog *old)
+{
+       if (cmpxchg(pprog, old, prog) != old)
+               return -ENOENT;
+
+       if (old)
+               bpf_prog_put(old);
+
+       return 0;
+}
+
 static inline void psock_progs_drop(struct sk_psock_progs *progs)
 {
        psock_set_prog(&progs->msg_parser, NULL);
index 7ee202a..80d557e 100644 (file)
 #include <linux/list.h>
 #include <linux/cpumask.h>
 #include <linux/init.h>
-#include <linux/llist.h>
+#include <linux/smp_types.h>
 
 typedef void (*smp_call_func_t)(void *info);
 typedef bool (*smp_cond_func_t)(int cpu, void *info);
 
-enum {
-       CSD_FLAG_LOCK           = 0x01,
-
-       /* IRQ_WORK_flags */
-
-       CSD_TYPE_ASYNC          = 0x00,
-       CSD_TYPE_SYNC           = 0x10,
-       CSD_TYPE_IRQ_WORK       = 0x20,
-       CSD_TYPE_TTWU           = 0x30,
-       CSD_FLAG_TYPE_MASK      = 0xF0,
-};
-
 /*
  * structure shares (partial) layout with struct irq_work
  */
 struct __call_single_data {
-       struct llist_node llist;
-       unsigned int flags;
+       union {
+               struct __call_single_node node;
+               struct {
+                       struct llist_node llist;
+                       unsigned int flags;
+               };
+       };
        smp_call_func_t func;
        void *info;
 };
diff --git a/include/linux/smp_types.h b/include/linux/smp_types.h
new file mode 100644 (file)
index 0000000..364b3ae
--- /dev/null
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_SMP_TYPES_H
+#define __LINUX_SMP_TYPES_H
+
+#include <linux/llist.h>
+
+enum {
+       CSD_FLAG_LOCK           = 0x01,
+
+       IRQ_WORK_PENDING        = 0x01,
+       IRQ_WORK_BUSY           = 0x02,
+       IRQ_WORK_LAZY           = 0x04, /* No IPI, wait for tick */
+       IRQ_WORK_HARD_IRQ       = 0x08, /* IRQ context on PREEMPT_RT */
+
+       IRQ_WORK_CLAIMED        = (IRQ_WORK_PENDING | IRQ_WORK_BUSY),
+
+       CSD_TYPE_ASYNC          = 0x00,
+       CSD_TYPE_SYNC           = 0x10,
+       CSD_TYPE_IRQ_WORK       = 0x20,
+       CSD_TYPE_TTWU           = 0x30,
+
+       CSD_FLAG_TYPE_MASK      = 0xF0,
+};
+
+/*
+ * struct __call_single_node is the primary type on
+ * smp.c:call_single_queue.
+ *
+ * flush_smp_call_function_queue() only reads the type from
+ * __call_single_node::u_flags as a regular load, the above
+ * (anonymous) enum defines all the bits of this word.
+ *
+ * Other bits are not modified until the type is known.
+ *
+ * CSD_TYPE_SYNC/ASYNC:
+ *     struct {
+ *             struct llist_node node;
+ *             unsigned int flags;
+ *             smp_call_func_t func;
+ *             void *info;
+ *     };
+ *
+ * CSD_TYPE_IRQ_WORK:
+ *     struct {
+ *             struct llist_node node;
+ *             atomic_t flags;
+ *             void (*func)(struct irq_work *);
+ *     };
+ *
+ * CSD_TYPE_TTWU:
+ *     struct {
+ *             struct llist_node node;
+ *             unsigned int flags;
+ *     };
+ *
+ */
+
+struct __call_single_node {
+       struct llist_node       llist;
+       union {
+               unsigned int    u_flags;
+               atomic_t        a_flags;
+       };
+};
+
+#endif /* __LINUX_SMP_TYPES_H */
index 4c5974b..5b3216b 100644 (file)
@@ -313,6 +313,7 @@ struct vma_swap_readahead {
 };
 
 /* linux/mm/workingset.c */
+void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
 void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg);
 void workingset_refault(struct page *page, void *shadow);
 void workingset_activation(struct page *page);
index 7c354c2..b951a87 100644 (file)
@@ -1360,7 +1360,7 @@ static inline long ksys_lchown(const char __user *filename, uid_t user,
 
 extern long do_sys_ftruncate(unsigned int fd, loff_t length, int small);
 
-static inline long ksys_ftruncate(unsigned int fd, unsigned long length)
+static inline long ksys_ftruncate(unsigned int fd, loff_t length)
 {
        return do_sys_ftruncate(fd, length, 1);
 }
index bd9a6a9..0fb93aa 100644 (file)
@@ -13,7 +13,10 @@ init_task_work(struct callback_head *twork, task_work_func_t func)
        twork->func = func;
 }
 
-int task_work_add(struct task_struct *task, struct callback_head *twork, bool);
+#define TWA_RESUME     1
+#define TWA_SIGNAL     2
+int task_work_add(struct task_struct *task, struct callback_head *twork, int);
+
 struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t);
 void task_work_run(void);
 
index b27e2ff..d5471d6 100644 (file)
@@ -222,9 +222,9 @@ extern bool timekeeping_rtc_skipresume(void);
 
 extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta);
 
-/*
+/**
  * struct system_time_snapshot - simultaneous raw/real time capture with
- *     counter value
+ *                              counter value
  * @cycles:    Clocksource counter value to produce the system times
  * @real:      Realtime system time
  * @raw:       Monotonic raw system time
@@ -239,9 +239,9 @@ struct system_time_snapshot {
        u8              cs_was_changed_seq;
 };
 
-/*
+/**
  * struct system_device_crosststamp - system/device cross-timestamp
- *     (syncronized capture)
+ *                                   (synchronized capture)
  * @device:            Device time
  * @sys_realtime:      Realtime simultaneous with device time
  * @sys_monoraw:       Monotonic raw simultaneous with device time
@@ -252,12 +252,12 @@ struct system_device_crosststamp {
        ktime_t sys_monoraw;
 };
 
-/*
+/**
  * struct system_counterval_t - system counter value with the pointer to the
- *     corresponding clocksource
+ *                             corresponding clocksource
  * @cycles:    System counter value
  * @cs:                Clocksource corresponding to system counter value. Used by
- *     timekeeping code to verify comparibility of two cycle values
+ *             timekeeping code to verify comparibility of two cycle values
  */
 struct system_counterval_t {
        u64                     cycles;
index 4f8c90c..64356b1 100644 (file)
@@ -81,6 +81,8 @@ struct tcg_efi_specid_event_algs {
        u16 digest_size;
 } __packed;
 
+#define TCG_SPECID_SIG "Spec ID Event03"
+
 struct tcg_efi_specid_event_head {
        u8 signature[16];
        u32 platform_class;
@@ -171,6 +173,7 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
        int i;
        int j;
        u32 count, event_type;
+       const u8 zero_digest[sizeof(event_header->digest)] = {0};
 
        marker = event;
        marker_start = marker;
@@ -198,10 +201,19 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
        count = READ_ONCE(event->count);
        event_type = READ_ONCE(event->event_type);
 
+       /* Verify that it's the log header */
+       if (event_header->pcr_idx != 0 ||
+           event_header->event_type != NO_ACTION ||
+           memcmp(event_header->digest, zero_digest, sizeof(zero_digest))) {
+               size = 0;
+               goto out;
+       }
+
        efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
 
        /* Check if event is malformed. */
-       if (count > efispecid->num_algs) {
+       if (memcmp(efispecid->signature, TCG_SPECID_SIG,
+                  sizeof(TCG_SPECID_SIG)) || count > efispecid->num_algs) {
                size = 0;
                goto out;
        }
index 48bb681..0221f85 100644 (file)
@@ -106,7 +106,6 @@ extern void *vzalloc(unsigned long size);
 extern void *vmalloc_user(unsigned long size);
 extern void *vmalloc_node(unsigned long size, int node);
 extern void *vzalloc_node(unsigned long size, int node);
-extern void *vmalloc_exec(unsigned long size);
 extern void *vmalloc_32(unsigned long size);
 extern void *vmalloc_32_user(unsigned long size);
 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask);
index 07adfac..852d8fb 100644 (file)
@@ -400,7 +400,15 @@ static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, co
 static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
                                                     struct sk_buff *skb)
 {
-       struct neighbour *n =  dst->ops->neigh_lookup(dst, skb, NULL);
+       struct neighbour *n = NULL;
+
+       /* The packets from tunnel devices (eg bareudp) may have only
+        * metadata in the dst pointer of skb. Hence a pointer check of
+        * neigh_lookup is needed.
+        */
+       if (dst->ops->neigh_lookup)
+               n = dst->ops->neigh_lookup(dst, skb, NULL);
+
        return IS_ERR(n) ? NULL : n;
 }
 
index a7eba43..4b6e362 100644 (file)
@@ -372,7 +372,8 @@ flow_dissector_init_keys(struct flow_dissector_key_control *key_control,
 }
 
 #ifdef CONFIG_BPF_SYSCALL
-int flow_dissector_bpf_prog_attach(struct net *net, struct bpf_prog *prog);
+int flow_dissector_bpf_prog_attach_check(struct net *net,
+                                        struct bpf_prog *prog);
 #endif /* CONFIG_BPF_SYSCALL */
 
 #endif
index f2c8311..6315324 100644 (file)
@@ -450,6 +450,7 @@ struct flow_block_indr {
        struct net_device               *dev;
        enum flow_block_binder_type     binder_type;
        void                            *data;
+       void                            *cb_priv;
        void                            (*cleanup)(struct flow_block_cb *block_cb);
 };
 
@@ -467,6 +468,13 @@ struct flow_block_cb {
 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
                                          void *cb_ident, void *cb_priv,
                                          void (*release)(void *cb_priv));
+struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
+                                              void *cb_ident, void *cb_priv,
+                                              void (*release)(void *cb_priv),
+                                              struct flow_block_offload *bo,
+                                              struct net_device *dev, void *data,
+                                              void *indr_cb_priv,
+                                              void (*cleanup)(struct flow_block_cb *block_cb));
 void flow_block_cb_free(struct flow_block_cb *block_cb);
 
 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
@@ -488,6 +496,13 @@ static inline void flow_block_cb_remove(struct flow_block_cb *block_cb,
        list_move(&block_cb->list, &offload->cb_list);
 }
 
+static inline void flow_indr_block_cb_remove(struct flow_block_cb *block_cb,
+                                            struct flow_block_offload *offload)
+{
+       list_del(&block_cb->indr.list);
+       list_move(&block_cb->list, &offload->cb_list);
+}
+
 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
                           struct list_head *driver_block_list);
 
@@ -532,11 +547,13 @@ static inline void flow_block_init(struct flow_block *flow_block)
 }
 
 typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
-                                     enum tc_setup_type type, void *type_data);
+                                     enum tc_setup_type type, void *type_data,
+                                     void *data,
+                                     void (*cleanup)(struct flow_block_cb *block_cb));
 
 int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv);
 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
-                             flow_setup_cb_t *setup_cb);
+                             void (*release)(void *cb_priv));
 int flow_indr_dev_setup_offload(struct net_device *dev,
                                enum tc_setup_type type, void *data,
                                struct flow_block_offload *bo,
index 7495066..6e5f1e1 100644 (file)
@@ -35,13 +35,6 @@ struct genl_info;
  *     do additional, common, filtering and return an error
  * @post_doit: called after an operation's doit callback, it may
  *     undo operations done by pre_doit, for example release locks
- * @mcast_bind: a socket bound to the given multicast group (which
- *     is given as the offset into the groups array)
- * @mcast_unbind: a socket was unbound from the given multicast group.
- *     Note that unbind() will not be called symmetrically if the
- *     generic netlink family is removed while there are still open
- *     sockets.
- * @attrbuf: buffer to store parsed attributes (private)
  * @mcgrps: multicast groups used by this family
  * @n_mcgrps: number of multicast groups
  * @mcgrp_offset: starting number of multicast group IDs in this family
@@ -64,9 +57,6 @@ struct genl_family {
        void                    (*post_doit)(const struct genl_ops *ops,
                                             struct sk_buff *skb,
                                             struct genl_info *info);
-       int                     (*mcast_bind)(struct net *net, int group);
-       void                    (*mcast_unbind)(struct net *net, int group);
-       struct nlattr **        attrbuf;        /* private */
        const struct genl_ops * ops;
        const struct genl_multicast_group *mcgrps;
        unsigned int            n_ops;
index 3a6595b..e42402f 100644 (file)
@@ -21,7 +21,7 @@
  * |                                                               |
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  *
- * C bit indicates contol message when set, data message when unset.
+ * C bit indicates control message when set, data message when unset.
  * For a control message, proto/ctype is interpreted as a type of
  * control message. For data messages, proto/ctype is the IP protocol
  * of the next header.
index 0f0d1ef..e1eaf17 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/ip.h>
 #include <linux/skbuff.h>
+#include <linux/if_vlan.h>
 
 #include <net/inet_sock.h>
 #include <net/dsfield.h>
@@ -172,7 +173,7 @@ static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner)
 
 static inline int INET_ECN_set_ce(struct sk_buff *skb)
 {
-       switch (skb->protocol) {
+       switch (skb_protocol(skb, true)) {
        case cpu_to_be16(ETH_P_IP):
                if (skb_network_header(skb) + sizeof(struct iphdr) <=
                    skb_tail_pointer(skb))
@@ -191,7 +192,7 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
 
 static inline int INET_ECN_set_ect1(struct sk_buff *skb)
 {
-       switch (skb->protocol) {
+       switch (skb_protocol(skb, true)) {
        case cpu_to_be16(ETH_P_IP):
                if (skb_network_header(skb) + sizeof(struct iphdr) <=
                    skb_tail_pointer(skb))
@@ -272,12 +273,16 @@ static inline int IP_ECN_decapsulate(const struct iphdr *oiph,
 {
        __u8 inner;
 
-       if (skb->protocol == htons(ETH_P_IP))
+       switch (skb_protocol(skb, true)) {
+       case htons(ETH_P_IP):
                inner = ip_hdr(skb)->tos;
-       else if (skb->protocol == htons(ETH_P_IPV6))
+               break;
+       case htons(ETH_P_IPV6):
                inner = ipv6_get_dsfield(ipv6_hdr(skb));
-       else
+               break;
+       default:
                return 0;
+       }
 
        return INET_ECN_decapsulate(skb, oiph->tos, inner);
 }
@@ -287,12 +292,16 @@ static inline int IP6_ECN_decapsulate(const struct ipv6hdr *oipv6h,
 {
        __u8 inner;
 
-       if (skb->protocol == htons(ETH_P_IP))
+       switch (skb_protocol(skb, true)) {
+       case htons(ETH_P_IP):
                inner = ip_hdr(skb)->tos;
-       else if (skb->protocol == htons(ETH_P_IPV6))
+               break;
+       case htons(ETH_P_IPV6):
                inner = ipv6_get_dsfield(ipv6_hdr(skb));
-       else
+               break;
+       default:
                return 0;
+       }
 
        return INET_ECN_decapsulate(skb, ipv6_get_dsfield(oipv6h), inner);
 }
index 076e5d7..36025de 100644 (file)
@@ -290,6 +290,9 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
                      struct ip_tunnel_parm *p, __u32 fwmark);
 void ip_tunnel_setup(struct net_device *dev, unsigned int net_id);
 
+extern const struct header_ops ip_tunnel_header_ops;
+__be16 ip_tunnel_parse_protocol(const struct sk_buff *skb);
+
 struct ip_tunnel_encap_ops {
        size_t (*encap_hlen)(struct ip_tunnel_encap *e);
        int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
index a8dce2a..0ca6a1b 100644 (file)
@@ -9,10 +9,13 @@
 #include <linux/bpf-netns.h>
 
 struct bpf_prog;
+struct bpf_prog_array;
 
 struct netns_bpf {
-       struct bpf_prog __rcu *progs[MAX_NETNS_BPF_ATTACH_TYPE];
-       struct bpf_link *links[MAX_NETNS_BPF_ATTACH_TYPE];
+       /* Array of programs to run compiled from progs or links */
+       struct bpf_prog_array __rcu *run_array[MAX_NETNS_BPF_ATTACH_TYPE];
+       struct bpf_prog *progs[MAX_NETNS_BPF_ATTACH_TYPE];
+       struct list_head links[MAX_NETNS_BPF_ATTACH_TYPE];
 };
 
 #endif /* __NETNS_BPF_H__ */
index 9092e69..ac8c890 100644 (file)
@@ -136,17 +136,6 @@ static inline void qdisc_run(struct Qdisc *q)
        }
 }
 
-static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
-{
-       /* We need to take extra care in case the skb came via
-        * vlan accelerated path. In that case, use skb->vlan_proto
-        * as the original vlan header was already stripped.
-        */
-       if (skb_vlan_tag_present(skb))
-               return skb->vlan_proto;
-       return skb->protocol;
-}
-
 /* Calculate maximal size of packet seen by hard_start_xmit
    routine of this device.
  */
index 15b4d9a..122d9e2 100644 (file)
@@ -353,11 +353,13 @@ enum {
         ipv4_is_anycast_6to4(a))
 
 /* Flags used for the bind address copy functions.  */
-#define SCTP_ADDR6_ALLOWED     0x00000001      /* IPv6 address is allowed by
+#define SCTP_ADDR4_ALLOWED     0x00000001      /* IPv4 address is allowed by
                                                   local sock family */
-#define SCTP_ADDR4_PEERSUPP    0x00000002      /* IPv4 address is supported by
+#define SCTP_ADDR6_ALLOWED     0x00000002      /* IPv6 address is allowed by
+                                                  local sock family */
+#define SCTP_ADDR4_PEERSUPP    0x00000004      /* IPv4 address is supported by
                                                   peer */
-#define SCTP_ADDR6_PEERSUPP    0x00000004      /* IPv6 address is supported by
+#define SCTP_ADDR6_PEERSUPP    0x00000008      /* IPv6 address is supported by
                                                   peer */
 
 /* Reasons to retransmit. */
index c53cc42..1183507 100644 (file)
@@ -533,7 +533,8 @@ enum sk_pacing {
  * be copied.
  */
 #define SK_USER_DATA_NOCOPY    1UL
-#define SK_USER_DATA_PTRMASK   ~(SK_USER_DATA_NOCOPY)
+#define SK_USER_DATA_BPF       2UL     /* Managed by BPF */
+#define SK_USER_DATA_PTRMASK   ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF)
 
 /**
  * sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied
@@ -1848,7 +1849,6 @@ static inline int sk_rx_queue_get(const struct sock *sk)
 
 static inline void sk_set_socket(struct sock *sk, struct socket *sock)
 {
-       sk_tx_queue_clear(sk);
        sk->sk_socket = sock;
 }
 
index 094fe68..c7d213c 100644 (file)
@@ -1008,6 +1008,7 @@ struct xfrm_offload {
 #define        XFRM_GRO                32
 #define        XFRM_ESP_NO_TRAILER     64
 #define        XFRM_DEV_RESUME         128
+#define        XFRM_XMIT               256
 
        __u32                   status;
 #define CRYPTO_SUCCESS                         1
index a4ff226..6842990 100644 (file)
@@ -40,7 +40,7 @@ struct xsk_buff_pool {
        u32 headroom;
        u32 chunk_size;
        u32 frame_len;
-       bool cheap_dma;
+       bool dma_need_sync;
        bool unaligned;
        void *addrs;
        struct device *dev;
@@ -80,7 +80,7 @@ static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
 void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
 static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
 {
-       if (xskb->pool->cheap_dma)
+       if (!xskb->pool->dma_need_sync)
                return;
 
        xp_dma_sync_for_cpu_slow(xskb);
@@ -91,7 +91,7 @@ void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
 static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
                                          dma_addr_t dma, size_t size)
 {
-       if (pool->cheap_dma)
+       if (!pool->dma_need_sync)
                return;
 
        xp_dma_sync_for_device_slow(pool, dma, size);
index 6ce8eff..70cbc50 100644 (file)
@@ -66,6 +66,7 @@ struct snd_compr_runtime {
  * @direction: stream direction, playback/recording
  * @metadata_set: metadata set flag, true when set
  * @next_track: has userspace signal next track transition, true when set
+ * @partial_drain: undergoing partial_drain for stream, true when set
  * @private_data: pointer to DSP private data
  * @dma_buffer: allocated buffer if any
  */
@@ -78,6 +79,7 @@ struct snd_compr_stream {
        enum snd_compr_direction direction;
        bool metadata_set;
        bool next_track;
+       bool partial_drain;
        void *private_data;
        struct snd_dma_buffer dma_buffer;
 };
@@ -182,7 +184,13 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
        if (snd_BUG_ON(!stream))
                return;
 
-       stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+       /* for partial_drain case we are back to running state on success */
+       if (stream->partial_drain) {
+               stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
+               stream->partial_drain = false; /* clear this flag as well */
+       } else {
+               stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+       }
 
        wake_up(&stream->runtime->sleep);
 }
index b652206..8c5e381 100644 (file)
@@ -161,4 +161,15 @@ int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
 
 #define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm"
 
+struct dmaengine_pcm {
+       struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1];
+       const struct snd_dmaengine_pcm_config *config;
+       struct snd_soc_component component;
+       unsigned int flags;
+};
+
+static inline struct dmaengine_pcm *soc_component_to_pcm(struct snd_soc_component *p)
+{
+       return container_of(p, struct dmaengine_pcm, component);
+}
 #endif
index ef5dd28..2756f9b 100644 (file)
@@ -444,6 +444,8 @@ int devm_snd_soc_register_component(struct device *dev,
                         const struct snd_soc_component_driver *component_driver,
                         struct snd_soc_dai_driver *dai_drv, int num_dai);
 void snd_soc_unregister_component(struct device *dev);
+struct snd_soc_component *snd_soc_lookup_component_nolocked(struct device *dev,
+                                                           const char *driver_name);
 struct snd_soc_component *snd_soc_lookup_component(struct device *dev,
                                                   const char *driver_name);
 
@@ -1361,6 +1363,10 @@ void snd_soc_remove_pcm_runtime(struct snd_soc_card *card,
 struct snd_soc_dai *snd_soc_register_dai(struct snd_soc_component *component,
                                         struct snd_soc_dai_driver *dai_drv,
                                         bool legacy_dai_naming);
+struct snd_soc_dai *devm_snd_soc_register_dai(struct device *dev,
+                                             struct snd_soc_component *component,
+                                             struct snd_soc_dai_driver *dai_drv,
+                                             bool legacy_dai_naming);
 void snd_soc_unregister_dai(struct snd_soc_dai *dai);
 
 struct snd_soc_dai *snd_soc_find_dai(
index ba9efdc..059b6e4 100644 (file)
@@ -400,7 +400,7 @@ enum rxrpc_tx_point {
        EM(rxrpc_cong_begin_retransmission,     " Retrans") \
        EM(rxrpc_cong_cleared_nacks,            " Cleared") \
        EM(rxrpc_cong_new_low_nack,             " NewLowN") \
-       EM(rxrpc_cong_no_change,                "") \
+       EM(rxrpc_cong_no_change,                " -") \
        EM(rxrpc_cong_progress,                 " Progres") \
        EM(rxrpc_cong_retransmit_again,         " ReTxAgn") \
        EM(rxrpc_cong_rtt_window_end,           " RttWinE") \
index 1968481..8bd3305 100644 (file)
@@ -3168,16 +3168,15 @@ union bpf_attr {
  *     Return
  *             The id is returned or 0 in case the id could not be retrieved.
  *
- * void *bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
+ * int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
  *     Description
  *             Copy *size* bytes from *data* into a ring buffer *ringbuf*.
- *             If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
- *             new data availability is sent.
- *             IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
- *             new data availability is sent unconditionally.
+ *             If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ *             of new data availability is sent.
+ *             If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ *             of new data availability is sent unconditionally.
  *     Return
- *             0, on success;
- *             < 0, on error.
+ *             0 on success, or a negative error in case of failure.
  *
  * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags)
  *     Description
@@ -3189,20 +3188,20 @@ union bpf_attr {
  * void bpf_ringbuf_submit(void *data, u64 flags)
  *     Description
  *             Submit reserved ring buffer sample, pointed to by *data*.
- *             If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
- *             new data availability is sent.
- *             IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
- *             new data availability is sent unconditionally.
+ *             If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ *             of new data availability is sent.
+ *             If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ *             of new data availability is sent unconditionally.
  *     Return
  *             Nothing. Always succeeds.
  *
  * void bpf_ringbuf_discard(void *data, u64 flags)
  *     Description
  *             Discard reserved ring buffer sample, pointed to by *data*.
- *             If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
- *             new data availability is sent.
- *             IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
- *             new data availability is sent unconditionally.
+ *             If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ *             of new data availability is sent.
+ *             If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ *             of new data availability is sent unconditionally.
  *     Return
  *             Nothing. Always succeeds.
  *
@@ -3210,16 +3209,18 @@ union bpf_attr {
  *     Description
  *             Query various characteristics of provided ring buffer. What
  *             exactly is queries is determined by *flags*:
- *               - BPF_RB_AVAIL_DATA - amount of data not yet consumed;
- *               - BPF_RB_RING_SIZE - the size of ring buffer;
- *               - BPF_RB_CONS_POS - consumer position (can wrap around);
- *               - BPF_RB_PROD_POS - producer(s) position (can wrap around);
- *             Data returned is just a momentary snapshots of actual values
+ *
+ *             * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed.
+ *             * **BPF_RB_RING_SIZE**: The size of ring buffer.
+ *             * **BPF_RB_CONS_POS**: Consumer position (can wrap around).
+ *             * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around).
+ *
+ *             Data returned is just a momentary snapshot of actual values
  *             and could be inaccurate, so this facility should be used to
  *             power heuristics and for reporting, not to make 100% correct
  *             calculation.
  *     Return
- *             Requested value, or 0, if flags are not recognized.
+ *             Requested value, or 0, if *flags* are not recognized.
  *
  * int bpf_csum_level(struct sk_buff *skb, u64 level)
  *     Description
index b6aac7e..4c14e8b 100644 (file)
@@ -205,6 +205,7 @@ struct fb_bitfield {
 #define FB_ACTIVATE_ALL               64       /* change all VCs on this fb    */
 #define FB_ACTIVATE_FORCE     128      /* force apply even when no change*/
 #define FB_ACTIVATE_INV_MODE  256       /* invalidate videomode */
+#define FB_ACTIVATE_KD_TEXT   512       /* for KDSET vt ioctl */
 
 #define FB_ACCELF_TEXT         1       /* (OBSOLETE) see fb_info.flags and vc_mode */
 
index 1f412fb..e103c14 100644 (file)
@@ -110,9 +110,12 @@ struct dsa_hw_desc {
        uint16_t        rsvd1;
        union {
                uint8_t         expected_res;
+               /* create delta record */
                struct {
                        uint64_t        delta_addr;
                        uint32_t        max_delta_size;
+                       uint32_t        delt_rsvd;
+                       uint8_t         expected_res_mask;
                };
                uint32_t        delta_rec_size;
                uint64_t        dest2;
index b6a835d..0c2e27d 100644 (file)
 #define SW_LINEIN_INSERT       0x0d  /* set = inserted */
 #define SW_MUTE_DEVICE         0x0e  /* set = device disabled */
 #define SW_PEN_INSERTED                0x0f  /* set = pen inserted */
-#define SW_MAX                 0x0f
+#define SW_MACHINE_COVER       0x10  /* set = cover closed */
+#define SW_MAX                 0x10
 #define SW_CNT                 (SW_MAX+1)
 
 /*
index 92c2269..7843742 100644 (file)
@@ -197,6 +197,7 @@ struct io_sqring_offsets {
  * sq_ring->flags
  */
 #define IORING_SQ_NEED_WAKEUP  (1U << 0) /* needs io_uring_enter wakeup */
+#define IORING_SQ_CQ_OVERFLOW  (1U << 1) /* CQ ring is overflown */
 
 struct io_cqring_offsets {
        __u32 head;
index 84f15f4..bee3665 100644 (file)
@@ -36,7 +36,6 @@ enum br_mrp_port_state_type {
 enum br_mrp_port_role_type {
        BR_MRP_PORT_ROLE_PRIMARY,
        BR_MRP_PORT_ROLE_SECONDARY,
-       BR_MRP_PORT_ROLE_NONE,
 };
 
 enum br_mrp_tlv_header_type {
index cba368e..c21edb9 100644 (file)
 
 /* supported values for SO_RDS_TRANSPORT */
 #define        RDS_TRANS_IB    0
-#define        RDS_TRANS_IWARP 1
+#define        RDS_TRANS_GAP   1
 #define        RDS_TRANS_TCP   2
 #define RDS_TRANS_COUNT        3
 #define        RDS_TRANS_NONE  (~0)
+/* don't use RDS_TRANS_IWARP - it is deprecated */
+#define RDS_TRANS_IWARP RDS_TRANS_GAP
 
 /* IOCTLS commands for SOL_RDS */
 #define SIOCRDSSETTOS          (SIOCPROTOPRIVATE)
index ee0f246..d56427c 100644 (file)
 #define SPI_TX_QUAD            0x200
 #define SPI_RX_DUAL            0x400
 #define SPI_RX_QUAD            0x800
+#define SPI_CS_WORD            0x1000
+#define SPI_TX_OCTAL           0x2000
+#define SPI_RX_OCTAL           0x4000
+#define SPI_3WIRE_HIZ          0x8000
 
 /*---------------------------------------------------------------------------*/
 
index 9cec58a..f79d7ab 100644 (file)
@@ -103,7 +103,7 @@ VMMDEV_ASSERT_SIZE(vbg_ioctl_driver_version_info, 24 + 20);
 
 
 /* IOCTL to perform a VMM Device request larger then 1KB. */
-#define VBG_IOCTL_VMMDEV_REQUEST_BIG   _IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0)
+#define VBG_IOCTL_VMMDEV_REQUEST_BIG   _IO('V', 3)
 
 
 /** VBG_IOCTL_HGCM_CONNECT data structure. */
@@ -198,7 +198,7 @@ struct vbg_ioctl_log {
        } u;
 };
 
-#define VBG_IOCTL_LOG(s)               _IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s)
+#define VBG_IOCTL_LOG(s)               _IO('V', 9)
 
 
 /** VBG_IOCTL_WAIT_FOR_EVENTS data structure. */
index eca6692..9204705 100644 (file)
@@ -1030,7 +1030,7 @@ struct vfio_iommu_type1_info_cap_iova_range {
  * size in bytes that can be used by user applications when getting the dirty
  * bitmap.
  */
-#define VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION  1
+#define VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION  2
 
 struct vfio_iommu_type1_info_cap_migration {
        struct  vfio_info_cap_header header;
index a46aa8f..0498af5 100644 (file)
@@ -49,13 +49,13 @@ config CLANG_VERSION
 
 config CC_CAN_LINK
        bool
-       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(m64-flag)) if 64BIT
-       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(m32-flag))
+       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m64-flag)) if 64BIT
+       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m32-flag))
 
 config CC_CAN_LINK_STATIC
        bool
-       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) -static $(m64-flag)) if 64BIT
-       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) -static $(m32-flag))
+       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m64-flag) -static) if 64BIT
+       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m32-flag) -static)
 
 config CC_HAS_ASM_GOTO
        def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
index 58c9af1..9a1a98d 100644 (file)
@@ -3746,7 +3746,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
                                return false;
 
                        t = btf_type_skip_modifiers(btf, t->type, NULL);
-                       if (!btf_type_is_int(t)) {
+                       if (!btf_type_is_small_int(t)) {
                                bpf_log(log,
                                        "ret type %s not allowed for fmod_ret\n",
                                        btf_kind_str[BTF_INFO_KIND(t->info)]);
@@ -3768,7 +3768,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
        /* skip modifiers */
        while (btf_type_is_modifier(t))
                t = btf_type_by_id(btf, t->type);
-       if (btf_type_is_int(t) || btf_type_is_enum(t))
+       if (btf_type_is_small_int(t) || btf_type_is_enum(t))
                /* accessing a scalar */
                return true;
        if (!btf_type_is_ptr(t)) {
index 4d76f16..ac53102 100644 (file)
@@ -1276,16 +1276,23 @@ static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp,
 
 static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
 {
-       if (unlikely(max_optlen > PAGE_SIZE) || max_optlen < 0)
+       if (unlikely(max_optlen < 0))
                return -EINVAL;
 
+       if (unlikely(max_optlen > PAGE_SIZE)) {
+               /* We don't expose optvals that are greater than PAGE_SIZE
+                * to the BPF program.
+                */
+               max_optlen = PAGE_SIZE;
+       }
+
        ctx->optval = kzalloc(max_optlen, GFP_USER);
        if (!ctx->optval)
                return -ENOMEM;
 
        ctx->optval_end = ctx->optval + max_optlen;
 
-       return 0;
+       return max_optlen;
 }
 
 static void sockopt_free_buf(struct bpf_sockopt_kern *ctx)
@@ -1319,13 +1326,13 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
         */
        max_optlen = max_t(int, 16, *optlen);
 
-       ret = sockopt_alloc_buf(&ctx, max_optlen);
-       if (ret)
-               return ret;
+       max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
+       if (max_optlen < 0)
+               return max_optlen;
 
        ctx.optlen = *optlen;
 
-       if (copy_from_user(ctx.optval, optval, *optlen) != 0) {
+       if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) {
                ret = -EFAULT;
                goto out;
        }
@@ -1353,8 +1360,14 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
                /* export any potential modifications */
                *level = ctx.level;
                *optname = ctx.optname;
-               *optlen = ctx.optlen;
-               *kernel_optval = ctx.optval;
+
+               /* optlen == 0 from BPF indicates that we should
+                * use original userspace data.
+                */
+               if (ctx.optlen != 0) {
+                       *optlen = ctx.optlen;
+                       *kernel_optval = ctx.optval;
+               }
        }
 
 out:
@@ -1385,12 +1398,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
            __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
                return retval;
 
-       ret = sockopt_alloc_buf(&ctx, max_optlen);
-       if (ret)
-               return ret;
-
        ctx.optlen = max_optlen;
 
+       max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
+       if (max_optlen < 0)
+               return max_optlen;
+
        if (!retval) {
                /* If kernel getsockopt finished successfully,
                 * copy whatever was returned to the user back
@@ -1404,10 +1417,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
                        goto out;
                }
 
-               if (ctx.optlen > max_optlen)
-                       ctx.optlen = max_optlen;
-
-               if (copy_from_user(ctx.optval, optval, ctx.optlen) != 0) {
+               if (copy_from_user(ctx.optval, optval,
+                                  min(ctx.optlen, max_optlen)) != 0) {
                        ret = -EFAULT;
                        goto out;
                }
@@ -1436,10 +1447,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
                goto out;
        }
 
-       if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
-           put_user(ctx.optlen, optlen)) {
-               ret = -EFAULT;
-               goto out;
+       if (ctx.optlen != 0) {
+               if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
+                   put_user(ctx.optlen, optlen)) {
+                       ret = -EFAULT;
+                       goto out;
+               }
        }
 
        ret = ctx.retval;
index 0cbb72c..5fdbc77 100644 (file)
@@ -86,12 +86,13 @@ static DEFINE_PER_CPU(struct list_head, dev_flush_list);
 static DEFINE_SPINLOCK(dev_map_lock);
 static LIST_HEAD(dev_map_list);
 
-static struct hlist_head *dev_map_create_hash(unsigned int entries)
+static struct hlist_head *dev_map_create_hash(unsigned int entries,
+                                             int numa_node)
 {
        int i;
        struct hlist_head *hash;
 
-       hash = kmalloc_array(entries, sizeof(*hash), GFP_KERNEL);
+       hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node);
        if (hash != NULL)
                for (i = 0; i < entries; i++)
                        INIT_HLIST_HEAD(&hash[i]);
@@ -145,7 +146,8 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
                return -EINVAL;
 
        if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
-               dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets);
+               dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
+                                                          dtab->map.numa_node);
                if (!dtab->dev_index_head)
                        goto free_charge;
 
@@ -232,7 +234,7 @@ static void dev_map_free(struct bpf_map *map)
                        }
                }
 
-               kfree(dtab->dev_index_head);
+               bpf_map_area_free(dtab->dev_index_head);
        } else {
                for (i = 0; i < dtab->map.max_entries; i++) {
                        struct bpf_dtab_netdev *dev;
index 78cf061..310241c 100644 (file)
@@ -19,18 +19,21 @@ struct bpf_netns_link {
         * with netns_bpf_mutex held.
         */
        struct net *net;
+       struct list_head node; /* node in list of links attached to net */
 };
 
 /* Protects updates to netns_bpf */
 DEFINE_MUTEX(netns_bpf_mutex);
 
 /* Must be called with netns_bpf_mutex held. */
-static void __net_exit bpf_netns_link_auto_detach(struct bpf_link *link)
+static void netns_bpf_run_array_detach(struct net *net,
+                                      enum netns_bpf_attach_type type)
 {
-       struct bpf_netns_link *net_link =
-               container_of(link, struct bpf_netns_link, link);
+       struct bpf_prog_array *run_array;
 
-       net_link->net = NULL;
+       run_array = rcu_replace_pointer(net->bpf.run_array[type], NULL,
+                                       lockdep_is_held(&netns_bpf_mutex));
+       bpf_prog_array_free(run_array);
 }
 
 static void bpf_netns_link_release(struct bpf_link *link)
@@ -40,22 +43,18 @@ static void bpf_netns_link_release(struct bpf_link *link)
        enum netns_bpf_attach_type type = net_link->netns_type;
        struct net *net;
 
-       /* Link auto-detached by dying netns. */
-       if (!net_link->net)
-               return;
-
        mutex_lock(&netns_bpf_mutex);
 
-       /* Recheck after potential sleep. We can race with cleanup_net
-        * here, but if we see a non-NULL struct net pointer pre_exit
-        * has not happened yet and will block on netns_bpf_mutex.
+       /* We can race with cleanup_net, but if we see a non-NULL
+        * struct net pointer, pre_exit has not run yet and wait for
+        * netns_bpf_mutex.
         */
        net = net_link->net;
        if (!net)
                goto out_unlock;
 
-       net->bpf.links[type] = NULL;
-       RCU_INIT_POINTER(net->bpf.progs[type], NULL);
+       netns_bpf_run_array_detach(net, type);
+       list_del(&net_link->node);
 
 out_unlock:
        mutex_unlock(&netns_bpf_mutex);
@@ -76,6 +75,7 @@ static int bpf_netns_link_update_prog(struct bpf_link *link,
        struct bpf_netns_link *net_link =
                container_of(link, struct bpf_netns_link, link);
        enum netns_bpf_attach_type type = net_link->netns_type;
+       struct bpf_prog_array *run_array;
        struct net *net;
        int ret = 0;
 
@@ -93,8 +93,11 @@ static int bpf_netns_link_update_prog(struct bpf_link *link,
                goto out_unlock;
        }
 
+       run_array = rcu_dereference_protected(net->bpf.run_array[type],
+                                             lockdep_is_held(&netns_bpf_mutex));
+       WRITE_ONCE(run_array->items[0].prog, new_prog);
+
        old_prog = xchg(&link->prog, new_prog);
-       rcu_assign_pointer(net->bpf.progs[type], new_prog);
        bpf_prog_put(old_prog);
 
 out_unlock:
@@ -142,14 +145,38 @@ static const struct bpf_link_ops bpf_netns_link_ops = {
        .show_fdinfo = bpf_netns_link_show_fdinfo,
 };
 
+/* Must be called with netns_bpf_mutex held. */
+static int __netns_bpf_prog_query(const union bpf_attr *attr,
+                                 union bpf_attr __user *uattr,
+                                 struct net *net,
+                                 enum netns_bpf_attach_type type)
+{
+       __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
+       struct bpf_prog_array *run_array;
+       u32 prog_cnt = 0, flags = 0;
+
+       run_array = rcu_dereference_protected(net->bpf.run_array[type],
+                                             lockdep_is_held(&netns_bpf_mutex));
+       if (run_array)
+               prog_cnt = bpf_prog_array_length(run_array);
+
+       if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
+               return -EFAULT;
+       if (copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
+               return -EFAULT;
+       if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
+               return 0;
+
+       return bpf_prog_array_copy_to_user(run_array, prog_ids,
+                                          attr->query.prog_cnt);
+}
+
 int netns_bpf_prog_query(const union bpf_attr *attr,
                         union bpf_attr __user *uattr)
 {
-       __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
-       u32 prog_id, prog_cnt = 0, flags = 0;
        enum netns_bpf_attach_type type;
-       struct bpf_prog *attached;
        struct net *net;
+       int ret;
 
        if (attr->query.query_flags)
                return -EINVAL;
@@ -162,36 +189,25 @@ int netns_bpf_prog_query(const union bpf_attr *attr,
        if (IS_ERR(net))
                return PTR_ERR(net);
 
-       rcu_read_lock();
-       attached = rcu_dereference(net->bpf.progs[type]);
-       if (attached) {
-               prog_cnt = 1;
-               prog_id = attached->aux->id;
-       }
-       rcu_read_unlock();
+       mutex_lock(&netns_bpf_mutex);
+       ret = __netns_bpf_prog_query(attr, uattr, net, type);
+       mutex_unlock(&netns_bpf_mutex);
 
        put_net(net);
-
-       if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
-               return -EFAULT;
-       if (copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
-               return -EFAULT;
-
-       if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
-               return 0;
-
-       if (copy_to_user(prog_ids, &prog_id, sizeof(u32)))
-               return -EFAULT;
-
-       return 0;
+       return ret;
 }
 
 int netns_bpf_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
 {
+       struct bpf_prog_array *run_array;
        enum netns_bpf_attach_type type;
+       struct bpf_prog *attached;
        struct net *net;
        int ret;
 
+       if (attr->target_fd || attr->attach_flags || attr->replace_bpf_fd)
+               return -EINVAL;
+
        type = to_netns_bpf_attach_type(attr->attach_type);
        if (type < 0)
                return -EINVAL;
@@ -200,19 +216,47 @@ int netns_bpf_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
        mutex_lock(&netns_bpf_mutex);
 
        /* Attaching prog directly is not compatible with links */
-       if (net->bpf.links[type]) {
+       if (!list_empty(&net->bpf.links[type])) {
                ret = -EEXIST;
                goto out_unlock;
        }
 
        switch (type) {
        case NETNS_BPF_FLOW_DISSECTOR:
-               ret = flow_dissector_bpf_prog_attach(net, prog);
+               ret = flow_dissector_bpf_prog_attach_check(net, prog);
                break;
        default:
                ret = -EINVAL;
                break;
        }
+       if (ret)
+               goto out_unlock;
+
+       attached = net->bpf.progs[type];
+       if (attached == prog) {
+               /* The same program cannot be attached twice */
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       run_array = rcu_dereference_protected(net->bpf.run_array[type],
+                                             lockdep_is_held(&netns_bpf_mutex));
+       if (run_array) {
+               WRITE_ONCE(run_array->items[0].prog, prog);
+       } else {
+               run_array = bpf_prog_array_alloc(1, GFP_KERNEL);
+               if (!run_array) {
+                       ret = -ENOMEM;
+                       goto out_unlock;
+               }
+               run_array->items[0].prog = prog;
+               rcu_assign_pointer(net->bpf.run_array[type], run_array);
+       }
+
+       net->bpf.progs[type] = prog;
+       if (attached)
+               bpf_prog_put(attached);
+
 out_unlock:
        mutex_unlock(&netns_bpf_mutex);
 
@@ -221,63 +265,74 @@ out_unlock:
 
 /* Must be called with netns_bpf_mutex held. */
 static int __netns_bpf_prog_detach(struct net *net,
-                                  enum netns_bpf_attach_type type)
+                                  enum netns_bpf_attach_type type,
+                                  struct bpf_prog *old)
 {
        struct bpf_prog *attached;
 
        /* Progs attached via links cannot be detached */
-       if (net->bpf.links[type])
+       if (!list_empty(&net->bpf.links[type]))
                return -EINVAL;
 
-       attached = rcu_dereference_protected(net->bpf.progs[type],
-                                            lockdep_is_held(&netns_bpf_mutex));
-       if (!attached)
+       attached = net->bpf.progs[type];
+       if (!attached || attached != old)
                return -ENOENT;
-       RCU_INIT_POINTER(net->bpf.progs[type], NULL);
+       netns_bpf_run_array_detach(net, type);
+       net->bpf.progs[type] = NULL;
        bpf_prog_put(attached);
        return 0;
 }
 
-int netns_bpf_prog_detach(const union bpf_attr *attr)
+int netns_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
 {
        enum netns_bpf_attach_type type;
+       struct bpf_prog *prog;
        int ret;
 
+       if (attr->target_fd)
+               return -EINVAL;
+
        type = to_netns_bpf_attach_type(attr->attach_type);
        if (type < 0)
                return -EINVAL;
 
+       prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
+       if (IS_ERR(prog))
+               return PTR_ERR(prog);
+
        mutex_lock(&netns_bpf_mutex);
-       ret = __netns_bpf_prog_detach(current->nsproxy->net_ns, type);
+       ret = __netns_bpf_prog_detach(current->nsproxy->net_ns, type, prog);
        mutex_unlock(&netns_bpf_mutex);
 
+       bpf_prog_put(prog);
+
        return ret;
 }
 
 static int netns_bpf_link_attach(struct net *net, struct bpf_link *link,
                                 enum netns_bpf_attach_type type)
 {
-       struct bpf_prog *prog;
+       struct bpf_netns_link *net_link =
+               container_of(link, struct bpf_netns_link, link);
+       struct bpf_prog_array *run_array;
        int err;
 
        mutex_lock(&netns_bpf_mutex);
 
        /* Allow attaching only one prog or link for now */
-       if (net->bpf.links[type]) {
+       if (!list_empty(&net->bpf.links[type])) {
                err = -E2BIG;
                goto out_unlock;
        }
        /* Links are not compatible with attaching prog directly */
-       prog = rcu_dereference_protected(net->bpf.progs[type],
-                                        lockdep_is_held(&netns_bpf_mutex));
-       if (prog) {
+       if (net->bpf.progs[type]) {
                err = -EEXIST;
                goto out_unlock;
        }
 
        switch (type) {
        case NETNS_BPF_FLOW_DISSECTOR:
-               err = flow_dissector_bpf_prog_attach(net, link->prog);
+               err = flow_dissector_bpf_prog_attach_check(net, link->prog);
                break;
        default:
                err = -EINVAL;
@@ -286,7 +341,15 @@ static int netns_bpf_link_attach(struct net *net, struct bpf_link *link,
        if (err)
                goto out_unlock;
 
-       net->bpf.links[type] = link;
+       run_array = bpf_prog_array_alloc(1, GFP_KERNEL);
+       if (!run_array) {
+               err = -ENOMEM;
+               goto out_unlock;
+       }
+       run_array->items[0].prog = link->prog;
+       rcu_assign_pointer(net->bpf.run_array[type], run_array);
+
+       list_add_tail(&net_link->node, &net->bpf.links[type]);
 
 out_unlock:
        mutex_unlock(&netns_bpf_mutex);
@@ -345,23 +408,34 @@ out_put_net:
        return err;
 }
 
+static int __net_init netns_bpf_pernet_init(struct net *net)
+{
+       int type;
+
+       for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++)
+               INIT_LIST_HEAD(&net->bpf.links[type]);
+
+       return 0;
+}
+
 static void __net_exit netns_bpf_pernet_pre_exit(struct net *net)
 {
        enum netns_bpf_attach_type type;
-       struct bpf_link *link;
+       struct bpf_netns_link *net_link;
 
        mutex_lock(&netns_bpf_mutex);
        for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++) {
-               link = net->bpf.links[type];
-               if (link)
-                       bpf_netns_link_auto_detach(link);
-               else
-                       __netns_bpf_prog_detach(net, type);
+               netns_bpf_run_array_detach(net, type);
+               list_for_each_entry(net_link, &net->bpf.links[type], node)
+                       net_link->net = NULL; /* auto-detach link */
+               if (net->bpf.progs[type])
+                       bpf_prog_put(net->bpf.progs[type]);
        }
        mutex_unlock(&netns_bpf_mutex);
 }
 
 static struct pernet_operations netns_bpf_pernet_ops __net_initdata = {
+       .init = netns_bpf_pernet_init,
        .pre_exit = netns_bpf_pernet_pre_exit,
 };
 
index 21cde24..cae9d50 100644 (file)
@@ -20,11 +20,14 @@ static struct reuseport_array *reuseport_array(struct bpf_map *map)
 /* The caller must hold the reuseport_lock */
 void bpf_sk_reuseport_detach(struct sock *sk)
 {
-       struct sock __rcu **socks;
+       uintptr_t sk_user_data;
 
        write_lock_bh(&sk->sk_callback_lock);
-       socks = sk->sk_user_data;
-       if (socks) {
+       sk_user_data = (uintptr_t)sk->sk_user_data;
+       if (sk_user_data & SK_USER_DATA_BPF) {
+               struct sock __rcu **socks;
+
+               socks = (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
                WRITE_ONCE(sk->sk_user_data, NULL);
                /*
                 * Do not move this NULL assignment outside of
@@ -252,6 +255,7 @@ int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
        struct sock *free_osk = NULL, *osk, *nsk;
        struct sock_reuseport *reuse;
        u32 index = *(u32 *)key;
+       uintptr_t sk_user_data;
        struct socket *socket;
        int err, fd;
 
@@ -305,7 +309,9 @@ int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
        if (err)
                goto put_file_unlock;
 
-       WRITE_ONCE(nsk->sk_user_data, &array->ptrs[index]);
+       sk_user_data = (uintptr_t)&array->ptrs[index] | SK_USER_DATA_NOCOPY |
+               SK_USER_DATA_BPF;
+       WRITE_ONCE(nsk->sk_user_data, (void *)sk_user_data);
        rcu_assign_pointer(array->ptrs[index], nsk);
        free_osk = osk;
        err = 0;
index 180414b..0af88bb 100644 (file)
@@ -132,15 +132,6 @@ static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
 {
        struct bpf_ringbuf *rb;
 
-       if (!data_sz || !PAGE_ALIGNED(data_sz))
-               return ERR_PTR(-EINVAL);
-
-#ifdef CONFIG_64BIT
-       /* on 32-bit arch, it's impossible to overflow record's hdr->pgoff */
-       if (data_sz > RINGBUF_MAX_DATA_SZ)
-               return ERR_PTR(-E2BIG);
-#endif
-
        rb = bpf_ringbuf_area_alloc(data_sz, numa_node);
        if (!rb)
                return ERR_PTR(-ENOMEM);
@@ -166,9 +157,16 @@ static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr)
                return ERR_PTR(-EINVAL);
 
        if (attr->key_size || attr->value_size ||
-           attr->max_entries == 0 || !PAGE_ALIGNED(attr->max_entries))
+           !is_power_of_2(attr->max_entries) ||
+           !PAGE_ALIGNED(attr->max_entries))
                return ERR_PTR(-EINVAL);
 
+#ifdef CONFIG_64BIT
+       /* on 32-bit arch, it's impossible to overflow record's hdr->pgoff */
+       if (attr->max_entries > RINGBUF_MAX_DATA_SZ)
+               return ERR_PTR(-E2BIG);
+#endif
+
        rb_map = kzalloc(sizeof(*rb_map), GFP_USER);
        if (!rb_map)
                return ERR_PTR(-ENOMEM);
index 8da1599..0fd80ac 100644 (file)
@@ -2121,7 +2121,7 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
            !bpf_capable())
                return -EPERM;
 
-       if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN))
+       if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
                return -EPERM;
        if (is_perfmon_prog_type(type) && !perfmon_capable())
                return -EPERM;
@@ -2893,13 +2893,11 @@ static int bpf_prog_detach(const union bpf_attr *attr)
        switch (ptype) {
        case BPF_PROG_TYPE_SK_MSG:
        case BPF_PROG_TYPE_SK_SKB:
-               return sock_map_get_from_fd(attr, NULL);
+               return sock_map_prog_detach(attr, ptype);
        case BPF_PROG_TYPE_LIRC_MODE2:
                return lirc_prog_detach(attr);
        case BPF_PROG_TYPE_FLOW_DISSECTOR:
-               if (!capable(CAP_NET_ADMIN))
-                       return -EPERM;
-               return netns_bpf_prog_detach(attr);
+               return netns_bpf_prog_detach(attr, ptype);
        case BPF_PROG_TYPE_CGROUP_DEVICE:
        case BPF_PROG_TYPE_CGROUP_SKB:
        case BPF_PROG_TYPE_CGROUP_SOCK:
@@ -3139,7 +3137,8 @@ static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
        return NULL;
 }
 
-static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
+static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
+                                             const struct cred *f_cred)
 {
        const struct bpf_map *map;
        struct bpf_insn *insns;
@@ -3165,7 +3164,7 @@ static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
                    code == (BPF_JMP | BPF_CALL_ARGS)) {
                        if (code == (BPF_JMP | BPF_CALL_ARGS))
                                insns[i].code = BPF_JMP | BPF_CALL;
-                       if (!bpf_dump_raw_ok())
+                       if (!bpf_dump_raw_ok(f_cred))
                                insns[i].imm = 0;
                        continue;
                }
@@ -3221,7 +3220,8 @@ static int set_info_rec_size(struct bpf_prog_info *info)
        return 0;
 }
 
-static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
+static int bpf_prog_get_info_by_fd(struct file *file,
+                                  struct bpf_prog *prog,
                                   const union bpf_attr *attr,
                                   union bpf_attr __user *uattr)
 {
@@ -3290,11 +3290,11 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
                struct bpf_insn *insns_sanitized;
                bool fault;
 
-               if (prog->blinded && !bpf_dump_raw_ok()) {
+               if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
                        info.xlated_prog_insns = 0;
                        goto done;
                }
-               insns_sanitized = bpf_insn_prepare_dump(prog);
+               insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
                if (!insns_sanitized)
                        return -ENOMEM;
                uinsns = u64_to_user_ptr(info.xlated_prog_insns);
@@ -3328,7 +3328,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
        }
 
        if (info.jited_prog_len && ulen) {
-               if (bpf_dump_raw_ok()) {
+               if (bpf_dump_raw_ok(file->f_cred)) {
                        uinsns = u64_to_user_ptr(info.jited_prog_insns);
                        ulen = min_t(u32, info.jited_prog_len, ulen);
 
@@ -3363,7 +3363,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
        ulen = info.nr_jited_ksyms;
        info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
        if (ulen) {
-               if (bpf_dump_raw_ok()) {
+               if (bpf_dump_raw_ok(file->f_cred)) {
                        unsigned long ksym_addr;
                        u64 __user *user_ksyms;
                        u32 i;
@@ -3394,7 +3394,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
        ulen = info.nr_jited_func_lens;
        info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
        if (ulen) {
-               if (bpf_dump_raw_ok()) {
+               if (bpf_dump_raw_ok(file->f_cred)) {
                        u32 __user *user_lens;
                        u32 func_len, i;
 
@@ -3451,7 +3451,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
        else
                info.nr_jited_line_info = 0;
        if (info.nr_jited_line_info && ulen) {
-               if (bpf_dump_raw_ok()) {
+               if (bpf_dump_raw_ok(file->f_cred)) {
                        __u64 __user *user_linfo;
                        u32 i;
 
@@ -3497,7 +3497,8 @@ done:
        return 0;
 }
 
-static int bpf_map_get_info_by_fd(struct bpf_map *map,
+static int bpf_map_get_info_by_fd(struct file *file,
+                                 struct bpf_map *map,
                                  const union bpf_attr *attr,
                                  union bpf_attr __user *uattr)
 {
@@ -3540,7 +3541,8 @@ static int bpf_map_get_info_by_fd(struct bpf_map *map,
        return 0;
 }
 
-static int bpf_btf_get_info_by_fd(struct btf *btf,
+static int bpf_btf_get_info_by_fd(struct file *file,
+                                 struct btf *btf,
                                  const union bpf_attr *attr,
                                  union bpf_attr __user *uattr)
 {
@@ -3555,7 +3557,8 @@ static int bpf_btf_get_info_by_fd(struct btf *btf,
        return btf_get_info_by_fd(btf, attr, uattr);
 }
 
-static int bpf_link_get_info_by_fd(struct bpf_link *link,
+static int bpf_link_get_info_by_fd(struct file *file,
+                                 struct bpf_link *link,
                                  const union bpf_attr *attr,
                                  union bpf_attr __user *uattr)
 {
@@ -3608,15 +3611,15 @@ static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
                return -EBADFD;
 
        if (f.file->f_op == &bpf_prog_fops)
-               err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
+               err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
                                              uattr);
        else if (f.file->f_op == &bpf_map_fops)
-               err = bpf_map_get_info_by_fd(f.file->private_data, attr,
+               err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
                                             uattr);
        else if (f.file->f_op == &btf_fops)
-               err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr);
+               err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
        else if (f.file->f_op == &bpf_link_fops)
-               err = bpf_link_get_info_by_fd(f.file->private_data,
+               err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
                                              attr, uattr);
        else
                err = -EINVAL;
index 34cde84..94cead5 100644 (file)
@@ -399,8 +399,7 @@ static bool reg_type_not_null(enum bpf_reg_type type)
        return type == PTR_TO_SOCKET ||
                type == PTR_TO_TCP_SOCK ||
                type == PTR_TO_MAP_VALUE ||
-               type == PTR_TO_SOCK_COMMON ||
-               type == PTR_TO_BTF_ID;
+               type == PTR_TO_SOCK_COMMON;
 }
 
 static bool reg_type_may_be_null(enum bpf_reg_type type)
@@ -9801,7 +9800,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
        int i, j, subprog_start, subprog_end = 0, len, subprog;
        struct bpf_insn *insn;
        void *old_bpf_func;
-       int err;
+       int err, num_exentries;
 
        if (env->subprog_cnt <= 1)
                return 0;
@@ -9876,6 +9875,14 @@ static int jit_subprogs(struct bpf_verifier_env *env)
                func[i]->aux->nr_linfo = prog->aux->nr_linfo;
                func[i]->aux->jited_linfo = prog->aux->jited_linfo;
                func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
+               num_exentries = 0;
+               insn = func[i]->insnsi;
+               for (j = 0; j < func[i]->len; j++, insn++) {
+                       if (BPF_CLASS(insn->code) == BPF_LDX &&
+                           BPF_MODE(insn->code) == BPF_PROBE_MEM)
+                               num_exentries++;
+               }
+               func[i]->aux->num_exentries = num_exentries;
                func[i] = bpf_int_jit_compile(func[i]);
                if (!func[i]->jited) {
                        err = -ENOTSUPP;
index 1ea181a..dd24774 100644 (file)
@@ -6439,18 +6439,8 @@ void cgroup_sk_alloc_disable(void)
 
 void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
 {
-       if (cgroup_sk_alloc_disabled)
-               return;
-
-       /* Socket clone path */
-       if (skcd->val) {
-               /*
-                * We might be cloning a socket which is left in an empty
-                * cgroup and the cgroup might have already been rmdir'd.
-                * Don't use cgroup_get_live().
-                */
-               cgroup_get(sock_cgroup_ptr(skcd));
-               cgroup_bpf_get(sock_cgroup_ptr(skcd));
+       if (cgroup_sk_alloc_disabled) {
+               skcd->no_refcnt = 1;
                return;
        }
 
@@ -6475,10 +6465,27 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
        rcu_read_unlock();
 }
 
+void cgroup_sk_clone(struct sock_cgroup_data *skcd)
+{
+       if (skcd->val) {
+               if (skcd->no_refcnt)
+                       return;
+               /*
+                * We might be cloning a socket which is left in an empty
+                * cgroup and the cgroup might have already been rmdir'd.
+                * Don't use cgroup_get_live().
+                */
+               cgroup_get(sock_cgroup_ptr(skcd));
+               cgroup_bpf_get(sock_cgroup_ptr(skcd));
+       }
+}
+
 void cgroup_sk_free(struct sock_cgroup_data *skcd)
 {
        struct cgroup *cgrp = sock_cgroup_ptr(skcd);
 
+       if (skcd->no_refcnt)
+               return;
        cgroup_bpf_put(cgrp);
        cgroup_put(cgrp);
 }
index bc8d25f..9e59347 100644 (file)
@@ -587,6 +587,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
                arch_kgdb_ops.disable_hw_break(regs);
 
 acquirelock:
+       rcu_read_lock();
        /*
         * Interrupts will be restored by the 'trap return' code, except when
         * single stepping.
@@ -646,6 +647,7 @@ return_normal:
                        atomic_dec(&slaves_in_kgdb);
                        dbg_touch_watchdogs();
                        local_irq_restore(flags);
+                       rcu_read_unlock();
                        return 0;
                }
                cpu_relax();
@@ -664,6 +666,7 @@ return_normal:
                raw_spin_unlock(&dbg_master_lock);
                dbg_touch_watchdogs();
                local_irq_restore(flags);
+               rcu_read_unlock();
 
                goto acquirelock;
        }
@@ -787,6 +790,7 @@ kgdb_restore:
        raw_spin_unlock(&dbg_master_lock);
        dbg_touch_watchdogs();
        local_irq_restore(flags);
+       rcu_read_unlock();
 
        return kgdb_info[cpu].ret_state;
 }
index 61774ae..a790026 100644 (file)
@@ -792,6 +792,19 @@ static void gdb_cmd_query(struct kgdb_state *ks)
                }
                break;
 #endif
+#ifdef CONFIG_HAVE_ARCH_KGDB_QXFER_PKT
+       case 'S':
+               if (!strncmp(remcom_in_buffer, "qSupported:", 11))
+                       strcpy(remcom_out_buffer, kgdb_arch_gdb_stub_feature);
+               break;
+       case 'X':
+               if (!strncmp(remcom_in_buffer, "qXfer:", 6))
+                       kgdb_arch_handle_qxfer_pkt(remcom_in_buffer,
+                                                  remcom_out_buffer);
+               break;
+#endif
+       default:
+               break;
        }
 }
 
index 924bc92..683a799 100644 (file)
@@ -542,6 +542,44 @@ static int kdb_search_string(char *searched, char *searchfor)
        return 0;
 }
 
+static void kdb_msg_write(const char *msg, int msg_len)
+{
+       struct console *c;
+
+       if (msg_len == 0)
+               return;
+
+       if (dbg_io_ops) {
+               const char *cp = msg;
+               int len = msg_len;
+
+               while (len--) {
+                       dbg_io_ops->write_char(*cp);
+                       cp++;
+               }
+       }
+
+       for_each_console(c) {
+               if (!(c->flags & CON_ENABLED))
+                       continue;
+               if (c == dbg_io_ops->cons)
+                       continue;
+               /*
+                * Set oops_in_progress to encourage the console drivers to
+                * disregard their internal spin locks: in the current calling
+                * context the risk of deadlock is a bigger problem than risks
+                * due to re-entering the console driver. We operate directly on
+                * oops_in_progress rather than using bust_spinlocks() because
+                * the calls bust_spinlocks() makes on exit are not appropriate
+                * for this calling context.
+                */
+               ++oops_in_progress;
+               c->write(c, msg, msg_len);
+               --oops_in_progress;
+               touch_nmi_watchdog();
+       }
+}
+
 int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
 {
        int diag;
@@ -553,7 +591,6 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
        int this_cpu, old_cpu;
        char *cp, *cp2, *cphold = NULL, replaced_byte = ' ';
        char *moreprompt = "more> ";
-       struct console *c;
        unsigned long uninitialized_var(flags);
 
        /* Serialize kdb_printf if multiple cpus try to write at once.
@@ -687,22 +724,11 @@ kdb_printit:
         */
        retlen = strlen(kdb_buffer);
        cp = (char *) printk_skip_headers(kdb_buffer);
-       if (!dbg_kdb_mode && kgdb_connected) {
+       if (!dbg_kdb_mode && kgdb_connected)
                gdbstub_msg_write(cp, retlen - (cp - kdb_buffer));
-       } else {
-               if (dbg_io_ops && !dbg_io_ops->is_console) {
-                       len = retlen - (cp - kdb_buffer);
-                       cp2 = cp;
-                       while (len--) {
-                               dbg_io_ops->write_char(*cp2);
-                               cp2++;
-                       }
-               }
-               for_each_console(c) {
-                       c->write(c, cp, retlen - (cp - kdb_buffer));
-                       touch_nmi_watchdog();
-               }
-       }
+       else
+               kdb_msg_write(cp, retlen - (cp - kdb_buffer));
+
        if (logging) {
                saved_loglevel = console_loglevel;
                console_loglevel = CONSOLE_LOGLEVEL_SILENT;
@@ -751,19 +777,7 @@ kdb_printit:
                        moreprompt = "more> ";
 
                kdb_input_flush();
-
-               if (dbg_io_ops && !dbg_io_ops->is_console) {
-                       len = strlen(moreprompt);
-                       cp = moreprompt;
-                       while (len--) {
-                               dbg_io_ops->write_char(*cp);
-                               cp++;
-                       }
-               }
-               for_each_console(c) {
-                       c->write(c, moreprompt, strlen(moreprompt));
-                       touch_nmi_watchdog();
-               }
+               kdb_msg_write(moreprompt, strlen(moreprompt));
 
                if (logging)
                        printk("%s", moreprompt);
index a0ce3c1..1da3f44 100644 (file)
@@ -71,15 +71,16 @@ config SWIOTLB
 # in the pagetables
 #
 config DMA_NONCOHERENT_MMAP
+       default y if !MMU
        bool
 
 config DMA_COHERENT_POOL
+       select GENERIC_ALLOCATOR
        bool
 
 config DMA_REMAP
        bool
        depends on MMU
-       select GENERIC_ALLOCATOR
        select DMA_NONCOHERENT_MMAP
 
 config DMA_DIRECT_REMAP
index 0a4881e..67f060b 100644 (file)
@@ -70,7 +70,7 @@ gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
        return 0;
 }
 
-static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
+bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
 {
        return phys_to_dma_direct(dev, phys) + size - 1 <=
                        min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
@@ -109,14 +109,15 @@ static inline bool dma_should_free_from_pool(struct device *dev,
        return false;
 }
 
-struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
+static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
                gfp_t gfp, unsigned long attrs)
 {
-       size_t alloc_size = PAGE_ALIGN(size);
        int node = dev_to_node(dev);
        struct page *page = NULL;
        u64 phys_limit;
 
+       WARN_ON_ONCE(!PAGE_ALIGNED(size));
+
        if (attrs & DMA_ATTR_NO_WARN)
                gfp |= __GFP_NOWARN;
 
@@ -124,14 +125,14 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
        gfp &= ~__GFP_ZERO;
        gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
                                           &phys_limit);
-       page = dma_alloc_contiguous(dev, alloc_size, gfp);
+       page = dma_alloc_contiguous(dev, size, gfp);
        if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
-               dma_free_contiguous(dev, page, alloc_size);
+               dma_free_contiguous(dev, page, size);
                page = NULL;
        }
 again:
        if (!page)
-               page = alloc_pages_node(node, gfp, get_order(alloc_size));
+               page = alloc_pages_node(node, gfp, get_order(size));
        if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
                dma_free_contiguous(dev, page, size);
                page = NULL;
@@ -157,9 +158,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
 {
        struct page *page;
        void *ret;
+       int err;
+
+       size = PAGE_ALIGN(size);
 
        if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
-               ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, gfp);
+               ret = dma_alloc_from_pool(dev, size, &page, gfp);
                if (!ret)
                        return NULL;
                goto done;
@@ -183,14 +187,20 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
             dma_alloc_need_uncached(dev, attrs)) ||
            (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
                /* remove any dirty cache lines on the kernel alias */
-               arch_dma_prep_coherent(page, PAGE_ALIGN(size));
+               arch_dma_prep_coherent(page, size);
 
                /* create a coherent mapping */
-               ret = dma_common_contiguous_remap(page, PAGE_ALIGN(size),
+               ret = dma_common_contiguous_remap(page, size,
                                dma_pgprot(dev, PAGE_KERNEL, attrs),
                                __builtin_return_address(0));
                if (!ret)
                        goto out_free_pages;
+               if (force_dma_unencrypted(dev)) {
+                       err = set_memory_decrypted((unsigned long)ret,
+                                                  1 << get_order(size));
+                       if (err)
+                               goto out_free_pages;
+               }
                memset(ret, 0, size);
                goto done;
        }
@@ -207,8 +217,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
        }
 
        ret = page_address(page);
-       if (force_dma_unencrypted(dev))
-               set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
+       if (force_dma_unencrypted(dev)) {
+               err = set_memory_decrypted((unsigned long)ret,
+                                          1 << get_order(size));
+               if (err)
+                       goto out_free_pages;
+       }
 
        memset(ret, 0, size);
 
@@ -217,7 +231,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
                arch_dma_prep_coherent(page, size);
                ret = arch_dma_set_uncached(ret, size);
                if (IS_ERR(ret))
-                       goto out_free_pages;
+                       goto out_encrypt_pages;
        }
 done:
        if (force_dma_unencrypted(dev))
@@ -225,6 +239,15 @@ done:
        else
                *dma_handle = phys_to_dma(dev, page_to_phys(page));
        return ret;
+
+out_encrypt_pages:
+       if (force_dma_unencrypted(dev)) {
+               err = set_memory_encrypted((unsigned long)page_address(page),
+                                          1 << get_order(size));
+               /* If memory cannot be re-encrypted, it must be leaked */
+               if (err)
+                       return NULL;
+       }
 out_free_pages:
        dma_free_contiguous(dev, page, size);
        return NULL;
@@ -459,7 +482,6 @@ int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
        return ret;
 }
 
-#ifdef CONFIG_MMU
 bool dma_direct_can_mmap(struct device *dev)
 {
        return dev_is_dma_coherent(dev) ||
@@ -485,19 +507,6 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
        return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
                        user_count << PAGE_SHIFT, vma->vm_page_prot);
 }
-#else /* CONFIG_MMU */
-bool dma_direct_can_mmap(struct device *dev)
-{
-       return false;
-}
-
-int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
-               void *cpu_addr, dma_addr_t dma_addr, size_t size,
-               unsigned long attrs)
-{
-       return -ENXIO;
-}
-#endif /* CONFIG_MMU */
 
 int dma_direct_supported(struct device *dev, u64 mask)
 {
@@ -530,3 +539,9 @@ size_t dma_direct_max_mapping_size(struct device *dev)
                return swiotlb_max_mapping_size(dev);
        return SIZE_MAX;
 }
+
+bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
+{
+       return !dev_is_dma_coherent(dev) ||
+               is_swiotlb_buffer(dma_to_phys(dev, dma_addr));
+}
index 98e3d87..a8c18c9 100644 (file)
@@ -397,6 +397,16 @@ size_t dma_max_mapping_size(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(dma_max_mapping_size);
 
+bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
+{
+       const struct dma_map_ops *ops = get_dma_ops(dev);
+
+       if (dma_is_direct(ops))
+               return dma_direct_need_sync(dev, dma_addr);
+       return ops->sync_single_for_cpu || ops->sync_single_for_device;
+}
+EXPORT_SYMBOL_GPL(dma_need_sync);
+
 unsigned long dma_get_merge_boundary(struct device *dev)
 {
        const struct dma_map_ops *ops = get_dma_ops(dev);
index 8cfa012..6bc74a2 100644 (file)
@@ -6,7 +6,6 @@
 #include <linux/debugfs.h>
 #include <linux/dma-direct.h>
 #include <linux/dma-noncoherent.h>
-#include <linux/dma-contiguous.h>
 #include <linux/init.h>
 #include <linux/genalloc.h>
 #include <linux/set_memory.h>
@@ -69,12 +68,7 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
 
        do {
                pool_size = 1 << (PAGE_SHIFT + order);
-
-               if (dev_get_cma_area(NULL))
-                       page = dma_alloc_from_contiguous(NULL, 1 << order,
-                                                        order, false);
-               else
-                       page = alloc_pages(gfp, order);
+               page = alloc_pages(gfp, order);
        } while (!page && order-- > 0);
        if (!page)
                goto out;
@@ -118,8 +112,7 @@ remove_mapping:
        dma_common_free_remap(addr, pool_size);
 #endif
 free_page: __maybe_unused
-       if (!dma_release_from_contiguous(NULL, page, 1 << order))
-               __free_pages(page, order);
+       __free_pages(page, order);
 out:
        return ret;
 }
@@ -203,7 +196,7 @@ static int __init dma_atomic_pool_init(void)
 }
 postcore_initcall(dma_atomic_pool_init);
 
-static inline struct gen_pool *dev_to_pool(struct device *dev)
+static inline struct gen_pool *dma_guess_pool_from_device(struct device *dev)
 {
        u64 phys_mask;
        gfp_t gfp;
@@ -217,47 +210,79 @@ static inline struct gen_pool *dev_to_pool(struct device *dev)
        return atomic_pool_kernel;
 }
 
-static bool dma_in_atomic_pool(struct device *dev, void *start, size_t size)
+static inline struct gen_pool *dma_get_safer_pool(struct gen_pool *bad_pool)
+{
+       if (bad_pool == atomic_pool_kernel)
+               return atomic_pool_dma32 ? : atomic_pool_dma;
+
+       if (bad_pool == atomic_pool_dma32)
+               return atomic_pool_dma;
+
+       return NULL;
+}
+
+static inline struct gen_pool *dma_guess_pool(struct device *dev,
+                                             struct gen_pool *bad_pool)
 {
-       struct gen_pool *pool = dev_to_pool(dev);
+       if (bad_pool)
+               return dma_get_safer_pool(bad_pool);
 
-       if (unlikely(!pool))
-               return false;
-       return gen_pool_has_addr(pool, (unsigned long)start, size);
+       return dma_guess_pool_from_device(dev);
 }
 
 void *dma_alloc_from_pool(struct device *dev, size_t size,
                          struct page **ret_page, gfp_t flags)
 {
-       struct gen_pool *pool = dev_to_pool(dev);
-       unsigned long val;
+       struct gen_pool *pool = NULL;
+       unsigned long val = 0;
        void *ptr = NULL;
-
-       if (!pool) {
-               WARN(1, "%pGg atomic pool not initialised!\n", &flags);
-               return NULL;
+       phys_addr_t phys;
+
+       while (1) {
+               pool = dma_guess_pool(dev, pool);
+               if (!pool) {
+                       WARN(1, "Failed to get suitable pool for %s\n",
+                            dev_name(dev));
+                       break;
+               }
+
+               val = gen_pool_alloc(pool, size);
+               if (!val)
+                       continue;
+
+               phys = gen_pool_virt_to_phys(pool, val);
+               if (dma_coherent_ok(dev, phys, size))
+                       break;
+
+               gen_pool_free(pool, val, size);
+               val = 0;
        }
 
-       val = gen_pool_alloc(pool, size);
-       if (val) {
-               phys_addr_t phys = gen_pool_virt_to_phys(pool, val);
 
+       if (val) {
                *ret_page = pfn_to_page(__phys_to_pfn(phys));
                ptr = (void *)val;
                memset(ptr, 0, size);
+
+               if (gen_pool_avail(pool) < atomic_pool_size)
+                       schedule_work(&atomic_pool_work);
        }
-       if (gen_pool_avail(pool) < atomic_pool_size)
-               schedule_work(&atomic_pool_work);
 
        return ptr;
 }
 
 bool dma_free_from_pool(struct device *dev, void *start, size_t size)
 {
-       struct gen_pool *pool = dev_to_pool(dev);
+       struct gen_pool *pool = NULL;
+
+       while (1) {
+               pool = dma_guess_pool(dev, pool);
+               if (!pool)
+                       return false;
 
-       if (!dma_in_atomic_pool(dev, start, size))
-               return false;
-       gen_pool_free(pool, (unsigned long)start, size);
-       return true;
+               if (gen_pool_has_addr(pool, (unsigned long)start, size)) {
+                       gen_pool_free(pool, (unsigned long)start, size);
+                       return true;
+               }
+       }
 }
index e739a6e..78b23f0 100644 (file)
@@ -24,7 +24,8 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
 {
        void *vaddr;
 
-       vaddr = vmap(pages, size >> PAGE_SHIFT, VM_DMA_COHERENT, prot);
+       vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT,
+                    VM_DMA_COHERENT, prot);
        if (vaddr)
                find_vm_area(vaddr)->pages = pages;
        return vaddr;
@@ -37,7 +38,7 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
 void *dma_common_contiguous_remap(struct page *page, size_t size,
                        pgprot_t prot, const void *caller)
 {
-       int count = size >> PAGE_SHIFT;
+       int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        struct page **pages;
        void *vaddr;
        int i;
index 142b236..efc5493 100644 (file)
@@ -1977,7 +1977,7 @@ static __latent_entropy struct task_struct *copy_process(
         * to stop root fork bombs.
         */
        retval = -EAGAIN;
-       if (nr_threads >= max_threads)
+       if (data_race(nr_threads >= max_threads))
                goto bad_fork_cleanup_count;
 
        delayacct_tsk_init(p);  /* Must remain after dup_task_struct() */
index 7619111..2a9fec5 100644 (file)
@@ -195,9 +195,9 @@ void irq_set_thread_affinity(struct irq_desc *desc)
                        set_bit(IRQTF_AFFINITY, &action->thread_flags);
 }
 
+#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
 static void irq_validate_effective_affinity(struct irq_data *data)
 {
-#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
        const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
        struct irq_chip *chip = irq_data_get_irq_chip(data);
 
@@ -205,9 +205,19 @@ static void irq_validate_effective_affinity(struct irq_data *data)
                return;
        pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
                     chip->name, data->irq);
-#endif
 }
 
+static inline void irq_init_effective_affinity(struct irq_data *data,
+                                              const struct cpumask *mask)
+{
+       cpumask_copy(irq_data_get_effective_affinity_mask(data), mask);
+}
+#else
+static inline void irq_validate_effective_affinity(struct irq_data *data) { }
+static inline void irq_init_effective_affinity(struct irq_data *data,
+                                              const struct cpumask *mask) { }
+#endif
+
 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
                        bool force)
 {
@@ -304,6 +314,26 @@ static int irq_try_set_affinity(struct irq_data *data,
        return ret;
 }
 
+static bool irq_set_affinity_deactivated(struct irq_data *data,
+                                        const struct cpumask *mask, bool force)
+{
+       struct irq_desc *desc = irq_data_to_desc(data);
+
+       /*
+        * If the interrupt is not yet activated, just store the affinity
+        * mask and do not call the chip driver at all. On activation the
+        * driver has to make sure anyway that the interrupt is in a
+        * useable state so startup works.
+        */
+       if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || irqd_is_activated(data))
+               return false;
+
+       cpumask_copy(desc->irq_common_data.affinity, mask);
+       irq_init_effective_affinity(data, mask);
+       irqd_set(data, IRQD_AFFINITY_SET);
+       return true;
+}
+
 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
                            bool force)
 {
@@ -314,6 +344,9 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
        if (!chip || !chip->irq_set_affinity)
                return -EINVAL;
 
+       if (irq_set_affinity_deactivated(data, mask, force))
+               return 0;
+
        if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
                ret = irq_try_set_affinity(data, mask, force);
        } else {
index 16c8c60..bb14e64 100644 (file)
@@ -644,19 +644,20 @@ static inline int kallsyms_for_perf(void)
  * Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to
  * block even that).
  */
-int kallsyms_show_value(void)
+bool kallsyms_show_value(const struct cred *cred)
 {
        switch (kptr_restrict) {
        case 0:
                if (kallsyms_for_perf())
-                       return 1;
+                       return true;
        /* fallthrough */
        case 1:
-               if (has_capability_noaudit(current, CAP_SYSLOG))
-                       return 1;
+               if (security_capable(cred, &init_user_ns, CAP_SYSLOG,
+                                    CAP_OPT_NOAUDIT) == 0)
+                       return true;
        /* fallthrough */
        default:
-               return 0;
+               return false;
        }
 }
 
@@ -673,7 +674,11 @@ static int kallsyms_open(struct inode *inode, struct file *file)
                return -ENOMEM;
        reset_iter(iter, 0);
 
-       iter->show_value = kallsyms_show_value();
+       /*
+        * Instead of checking this on every s_show() call, cache
+        * the result here at open time.
+        */
+       iter->show_value = kallsyms_show_value(file->f_cred);
        return 0;
 }
 
index bb05fd5..09cc78d 100644 (file)
@@ -181,34 +181,19 @@ void kimage_file_post_load_cleanup(struct kimage *image)
 static int
 kimage_validate_signature(struct kimage *image)
 {
-       const char *reason;
        int ret;
 
        ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf,
                                           image->kernel_buf_len);
-       switch (ret) {
-       case 0:
-               break;
+       if (ret) {
 
-               /* Certain verification errors are non-fatal if we're not
-                * checking errors, provided we aren't mandating that there
-                * must be a valid signature.
-                */
-       case -ENODATA:
-               reason = "kexec of unsigned image";
-               goto decide;
-       case -ENOPKG:
-               reason = "kexec of image with unsupported crypto";
-               goto decide;
-       case -ENOKEY:
-               reason = "kexec of image with unavailable key";
-       decide:
                if (IS_ENABLED(CONFIG_KEXEC_SIG_FORCE)) {
-                       pr_notice("%s rejected\n", reason);
+                       pr_notice("Enforced kernel signature verification failed (%d).\n", ret);
                        return ret;
                }
 
-               /* If IMA is guaranteed to appraise a signature on the kexec
+               /*
+                * If IMA is guaranteed to appraise a signature on the kexec
                 * image, permit it even if the kernel is otherwise locked
                 * down.
                 */
@@ -216,17 +201,10 @@ kimage_validate_signature(struct kimage *image)
                    security_locked_down(LOCKDOWN_KEXEC))
                        return -EPERM;
 
-               return 0;
-
-               /* All other errors are fatal, including nomem, unparseable
-                * signatures and signature check failures - even if signatures
-                * aren't required.
-                */
-       default:
-               pr_notice("kernel signature verification failed (%d).\n", ret);
+               pr_debug("kernel signature verification failed (%d).\n", ret);
        }
 
-       return ret;
+       return 0;
 }
 #endif
 
index 4a904cc..2e97feb 100644 (file)
@@ -2448,7 +2448,7 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
        else
                kprobe_type = "k";
 
-       if (!kallsyms_show_value())
+       if (!kallsyms_show_value(pi->file->f_cred))
                addr = NULL;
 
        if (sym)
@@ -2540,7 +2540,7 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
         * If /proc/kallsyms is not showing kernel address, we won't
         * show them here either.
         */
-       if (!kallsyms_show_value())
+       if (!kallsyms_show_value(m->file->f_cred))
                seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
                           (void *)ent->start_addr);
        else
index e8a1985..aa183c9 100644 (file)
@@ -1510,8 +1510,7 @@ static inline bool sect_empty(const Elf_Shdr *sect)
 }
 
 struct module_sect_attr {
-       struct module_attribute mattr;
-       char *name;
+       struct bin_attribute battr;
        unsigned long address;
 };
 
@@ -1521,13 +1520,18 @@ struct module_sect_attrs {
        struct module_sect_attr attrs[];
 };
 
-static ssize_t module_sect_show(struct module_attribute *mattr,
-                               struct module_kobject *mk, char *buf)
+static ssize_t module_sect_read(struct file *file, struct kobject *kobj,
+                               struct bin_attribute *battr,
+                               char *buf, loff_t pos, size_t count)
 {
        struct module_sect_attr *sattr =
-               container_of(mattr, struct module_sect_attr, mattr);
-       return sprintf(buf, "0x%px\n", kptr_restrict < 2 ?
-                      (void *)sattr->address : NULL);
+               container_of(battr, struct module_sect_attr, battr);
+
+       if (pos != 0)
+               return -EINVAL;
+
+       return sprintf(buf, "0x%px\n",
+                      kallsyms_show_value(file->f_cred) ? (void *)sattr->address : NULL);
 }
 
 static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
@@ -1535,7 +1539,7 @@ static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
        unsigned int section;
 
        for (section = 0; section < sect_attrs->nsections; section++)
-               kfree(sect_attrs->attrs[section].name);
+               kfree(sect_attrs->attrs[section].battr.attr.name);
        kfree(sect_attrs);
 }
 
@@ -1544,42 +1548,41 @@ static void add_sect_attrs(struct module *mod, const struct load_info *info)
        unsigned int nloaded = 0, i, size[2];
        struct module_sect_attrs *sect_attrs;
        struct module_sect_attr *sattr;
-       struct attribute **gattr;
+       struct bin_attribute **gattr;
 
        /* Count loaded sections and allocate structures */
        for (i = 0; i < info->hdr->e_shnum; i++)
                if (!sect_empty(&info->sechdrs[i]))
                        nloaded++;
        size[0] = ALIGN(struct_size(sect_attrs, attrs, nloaded),
-                       sizeof(sect_attrs->grp.attrs[0]));
-       size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
+                       sizeof(sect_attrs->grp.bin_attrs[0]));
+       size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.bin_attrs[0]);
        sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
        if (sect_attrs == NULL)
                return;
 
        /* Setup section attributes. */
        sect_attrs->grp.name = "sections";
-       sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
+       sect_attrs->grp.bin_attrs = (void *)sect_attrs + size[0];
 
        sect_attrs->nsections = 0;
        sattr = &sect_attrs->attrs[0];
-       gattr = &sect_attrs->grp.attrs[0];
+       gattr = &sect_attrs->grp.bin_attrs[0];
        for (i = 0; i < info->hdr->e_shnum; i++) {
                Elf_Shdr *sec = &info->sechdrs[i];
                if (sect_empty(sec))
                        continue;
+               sysfs_bin_attr_init(&sattr->battr);
                sattr->address = sec->sh_addr;
-               sattr->name = kstrdup(info->secstrings + sec->sh_name,
-                                       GFP_KERNEL);
-               if (sattr->name == NULL)
+               sattr->battr.attr.name =
+                       kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL);
+               if (sattr->battr.attr.name == NULL)
                        goto out;
                sect_attrs->nsections++;
-               sysfs_attr_init(&sattr->mattr.attr);
-               sattr->mattr.show = module_sect_show;
-               sattr->mattr.store = NULL;
-               sattr->mattr.attr.name = sattr->name;
-               sattr->mattr.attr.mode = S_IRUSR;
-               *(gattr++) = &(sattr++)->mattr.attr;
+               sattr->battr.read = module_sect_read;
+               sattr->battr.size = 3 /* "0x", "\n" */ + (BITS_PER_LONG / 4);
+               sattr->battr.attr.mode = 0400;
+               *(gattr++) = &(sattr++)->battr;
        }
        *gattr = NULL;
 
@@ -1669,7 +1672,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
                        continue;
                if (info->sechdrs[i].sh_type == SHT_NOTE) {
                        sysfs_bin_attr_init(nattr);
-                       nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
+                       nattr->attr.name = mod->sect_attrs->attrs[loaded].battr.attr.name;
                        nattr->attr.mode = S_IRUGO;
                        nattr->size = info->sechdrs[i].sh_size;
                        nattr->private = (void *) info->sechdrs[i].sh_addr;
@@ -2783,7 +2786,9 @@ static void dynamic_debug_remove(struct module *mod, struct _ddebug *debug)
 
 void * __weak module_alloc(unsigned long size)
 {
-       return vmalloc_exec(size);
+       return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+                       GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
+                       NUMA_NO_NODE, __builtin_return_address(0));
 }
 
 bool __weak module_init_section(const char *name)
@@ -4377,7 +4382,7 @@ static int modules_open(struct inode *inode, struct file *file)
 
        if (!err) {
                struct seq_file *m = file->private_data;
-               m->private = kallsyms_show_value() ? NULL : (void *)8ul;
+               m->private = kallsyms_show_value(file->f_cred) ? NULL : (void *)8ul;
        }
 
        return err;
index b03df67..cd35663 100644 (file)
@@ -531,7 +531,7 @@ SYSCALL_DEFINE2(setns, int, fd, int, flags)
        } else if (!IS_ERR(pidfd_pid(file))) {
                err = check_setns_flags(flags);
        } else {
-               err = -EBADF;
+               err = -EINVAL;
        }
        if (err)
                goto out;
index 29fc5d8..4373f7a 100644 (file)
@@ -335,7 +335,7 @@ static void padata_reorder(struct parallel_data *pd)
         *
         * Ensure reorder queue is read after pd->lock is dropped so we see
         * new objects from another task in padata_do_serial.  Pairs with
-        * smp_mb__after_atomic in padata_do_serial.
+        * smp_mb in padata_do_serial.
         */
        smp_mb();
 
@@ -418,7 +418,7 @@ void padata_do_serial(struct padata_priv *padata)
         * with the trylock of pd->lock in padata_reorder.  Pairs with smp_mb
         * in padata_reorder.
         */
-       smp_mb__after_atomic();
+       smp_mb();
 
        padata_reorder(pd);
 }
index 8c14835..b71eaf5 100644 (file)
@@ -974,16 +974,6 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
                user->idx = log_next_idx;
                user->seq = log_next_seq;
                break;
-       case SEEK_CUR:
-               /*
-                * It isn't supported due to the record nature of this
-                * interface: _SET _DATA and _END point to very specific
-                * record positions, while _CUR would be more useful in case
-                * of a byte-based log. Because of that, return the default
-                * errno value for invalid seek operation.
-                */
-               ret = -ESPIPE;
-               break;
        default:
                ret = -EINVAL;
        }
index 16dd1e6..9eb39c2 100644 (file)
@@ -723,7 +723,7 @@ kfree_perf_init(void)
                schedule_timeout_uninterruptible(1);
        }
 
-       pr_alert("kfree object size=%lu\n", kfree_mult * sizeof(struct kfree_obj));
+       pr_alert("kfree object size=%zu\n", kfree_mult * sizeof(struct kfree_obj));
 
        kfree_reader_tasks = kcalloc(kfree_nrealthreads, sizeof(kfree_reader_tasks[0]),
                               GFP_KERNEL);
index c716ead..6c6569e 100644 (file)
@@ -250,7 +250,7 @@ static noinstr void rcu_dynticks_eqs_enter(void)
         * next idle sojourn.
         */
        rcu_dynticks_task_trace_enter();  // Before ->dynticks update!
-       seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
+       seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
        // RCU is no longer watching.  Better be in extended quiescent state!
        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
                     (seq & RCU_DYNTICK_CTRL_CTR));
@@ -274,13 +274,13 @@ static noinstr void rcu_dynticks_eqs_exit(void)
         * and we also must force ordering with the next RCU read-side
         * critical section.
         */
-       seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
+       seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
        // RCU is now watching.  Better not be in an extended quiescent state!
        rcu_dynticks_task_trace_exit();  // After ->dynticks update!
        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
                     !(seq & RCU_DYNTICK_CTRL_CTR));
        if (seq & RCU_DYNTICK_CTRL_MASK) {
-               atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
+               arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
                smp_mb__after_atomic(); /* _exit after clearing mask. */
        }
 }
@@ -313,7 +313,7 @@ static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
 {
        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 
-       return !(atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
+       return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
 }
 
 /*
@@ -633,6 +633,10 @@ static noinstr void rcu_eqs_enter(bool user)
        do_nocb_deferred_wakeup(rdp);
        rcu_prepare_for_idle();
        rcu_preempt_deferred_qs(current);
+
+       // instrumentation for the noinstr rcu_dynticks_eqs_enter()
+       instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
+
        instrumentation_end();
        WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
        // RCU is watching here ...
@@ -692,6 +696,7 @@ noinstr void rcu_nmi_exit(void)
 {
        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 
+       instrumentation_begin();
        /*
         * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
         * (We are exiting an NMI handler, so RCU better be paying attention
@@ -705,7 +710,6 @@ noinstr void rcu_nmi_exit(void)
         * leave it in non-RCU-idle state.
         */
        if (rdp->dynticks_nmi_nesting != 1) {
-               instrumentation_begin();
                trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
                                  atomic_read(&rdp->dynticks));
                WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
@@ -714,13 +718,15 @@ noinstr void rcu_nmi_exit(void)
                return;
        }
 
-       instrumentation_begin();
        /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
        trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
        WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
 
        if (!in_nmi())
                rcu_prepare_for_idle();
+
+       // instrumentation for the noinstr rcu_dynticks_eqs_enter()
+       instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
        instrumentation_end();
 
        // RCU is watching here ...
@@ -838,6 +844,10 @@ static void noinstr rcu_eqs_exit(bool user)
        rcu_dynticks_eqs_exit();
        // ... but is watching here.
        instrumentation_begin();
+
+       // instrumentation for the noinstr rcu_dynticks_eqs_exit()
+       instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
+
        rcu_cleanup_after_idle();
        trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
@@ -983,13 +993,21 @@ noinstr void rcu_nmi_enter(void)
                if (!in_nmi())
                        rcu_cleanup_after_idle();
 
+               instrumentation_begin();
+               // instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs()
+               instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks));
+               // instrumentation for the noinstr rcu_dynticks_eqs_exit()
+               instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
+
                incby = 1;
        } else if (!in_nmi()) {
                instrumentation_begin();
                rcu_irq_enter_check_tick();
                instrumentation_end();
+       } else  {
+               instrumentation_begin();
        }
-       instrumentation_begin();
+
        trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
                          rdp->dynticks_nmi_nesting,
                          rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
index 8f36032..e15543c 100644 (file)
@@ -1311,9 +1311,6 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
 
 void activate_task(struct rq *rq, struct task_struct *p, int flags)
 {
-       if (task_contributes_to_load(p))
-               rq->nr_uninterruptible--;
-
        enqueue_task(rq, p, flags);
 
        p->on_rq = TASK_ON_RQ_QUEUED;
@@ -1323,9 +1320,6 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
 {
        p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING;
 
-       if (task_contributes_to_load(p))
-               rq->nr_uninterruptible++;
-
        dequeue_task(rq, p, flags);
 }
 
@@ -1637,7 +1631,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
                goto out;
        }
 
-       if (cpumask_equal(p->cpus_ptr, new_mask))
+       if (cpumask_equal(&p->cpus_mask, new_mask))
                goto out;
 
        /*
@@ -2236,10 +2230,10 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
 
        lockdep_assert_held(&rq->lock);
 
-#ifdef CONFIG_SMP
        if (p->sched_contributes_to_load)
                rq->nr_uninterruptible--;
 
+#ifdef CONFIG_SMP
        if (wake_flags & WF_MIGRATED)
                en_flags |= ENQUEUE_MIGRATED;
 #endif
@@ -2293,8 +2287,15 @@ void sched_ttwu_pending(void *arg)
        rq_lock_irqsave(rq, &rf);
        update_rq_clock(rq);
 
-       llist_for_each_entry_safe(p, t, llist, wake_entry)
+       llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
+               if (WARN_ON_ONCE(p->on_cpu))
+                       smp_cond_load_acquire(&p->on_cpu, !VAL);
+
+               if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
+                       set_task_cpu(p, cpu_of(rq));
+
                ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
+       }
 
        rq_unlock_irqrestore(rq, &rf);
 }
@@ -2322,7 +2323,7 @@ static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags
        p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
 
        WRITE_ONCE(rq->ttwu_pending, 1);
-       __smp_call_single_queue(cpu, &p->wake_entry);
+       __smp_call_single_queue(cpu, &p->wake_entry.llist);
 }
 
 void wake_up_if_idle(int cpu)
@@ -2369,7 +2370,7 @@ static inline bool ttwu_queue_cond(int cpu, int wake_flags)
         * the soon-to-be-idle CPU as the current CPU is likely busy.
         * nr_running is checked to avoid unnecessary task stacking.
         */
-       if ((wake_flags & WF_ON_RQ) && cpu_rq(cpu)->nr_running <= 1)
+       if ((wake_flags & WF_ON_CPU) && cpu_rq(cpu)->nr_running <= 1)
                return true;
 
        return false;
@@ -2378,6 +2379,9 @@ static inline bool ttwu_queue_cond(int cpu, int wake_flags)
 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
 {
        if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(cpu, wake_flags)) {
+               if (WARN_ON_ONCE(cpu == smp_processor_id()))
+                       return false;
+
                sched_clock_cpu(cpu); /* Sync clocks across CPUs */
                __ttwu_queue_wakelist(p, cpu, wake_flags);
                return true;
@@ -2528,7 +2532,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
                        goto out;
 
                success = 1;
-               cpu = task_cpu(p);
                trace_sched_waking(p);
                p->state = TASK_RUNNING;
                trace_sched_wakeup(p);
@@ -2550,7 +2553,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
 
        /* We're going to change ->state: */
        success = 1;
-       cpu = task_cpu(p);
 
        /*
         * Ensure we load p->on_rq _after_ p->state, otherwise it would
@@ -2575,7 +2577,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
         * A similar smb_rmb() lives in try_invoke_on_locked_down_task().
         */
        smp_rmb();
-       if (p->on_rq && ttwu_remote(p, wake_flags))
+       if (READ_ONCE(p->on_rq) && ttwu_remote(p, wake_flags))
                goto unlock;
 
        if (p->in_iowait) {
@@ -2584,9 +2586,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
        }
 
 #ifdef CONFIG_SMP
-       p->sched_contributes_to_load = !!task_contributes_to_load(p);
-       p->state = TASK_WAKING;
-
        /*
         * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
         * possible to, falsely, observe p->on_cpu == 0.
@@ -2605,8 +2604,20 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
         *
         * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
         * __schedule().  See the comment for smp_mb__after_spinlock().
+        *
+        * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
+        * schedule()'s deactivate_task() has 'happened' and p will no longer
+        * care about it's own p->state. See the comment in __schedule().
         */
-       smp_rmb();
+       smp_acquire__after_ctrl_dep();
+
+       /*
+        * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
+        * == 0), which means we need to do an enqueue, change p->state to
+        * TASK_WAKING such that we can unlock p->pi_lock before doing the
+        * enqueue, such as ttwu_queue_wakelist().
+        */
+       p->state = TASK_WAKING;
 
        /*
         * If the owning (remote) CPU is still in the middle of schedule() with
@@ -2614,8 +2625,21 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
         * which potentially sends an IPI instead of spinning on p->on_cpu to
         * let the waker make forward progress. This is safe because IRQs are
         * disabled and the IPI will deliver after on_cpu is cleared.
+        *
+        * Ensure we load task_cpu(p) after p->on_cpu:
+        *
+        * set_task_cpu(p, cpu);
+        *   STORE p->cpu = @cpu
+        * __schedule() (switch to task 'p')
+        *   LOCK rq->lock
+        *   smp_mb__after_spin_lock()          smp_cond_load_acquire(&p->on_cpu)
+        *   STORE p->on_cpu = 1                LOAD p->cpu
+        *
+        * to ensure we observe the correct CPU on which the task is currently
+        * scheduling.
         */
-       if (READ_ONCE(p->on_cpu) && ttwu_queue_wakelist(p, cpu, wake_flags | WF_ON_RQ))
+       if (smp_load_acquire(&p->on_cpu) &&
+           ttwu_queue_wakelist(p, task_cpu(p), wake_flags | WF_ON_CPU))
                goto unlock;
 
        /*
@@ -2635,6 +2659,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
                psi_ttwu_dequeue(p);
                set_task_cpu(p, cpu);
        }
+#else
+       cpu = task_cpu(p);
 #endif /* CONFIG_SMP */
 
        ttwu_queue(p, cpu, wake_flags);
@@ -2642,7 +2668,7 @@ unlock:
        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 out:
        if (success)
-               ttwu_stat(p, cpu, wake_flags);
+               ttwu_stat(p, task_cpu(p), wake_flags);
        preempt_enable();
 
        return success;
@@ -2763,7 +2789,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
 #endif
        init_numa_balancing(clone_flags, p);
 #ifdef CONFIG_SMP
-       p->wake_entry_type = CSD_TYPE_TTWU;
+       p->wake_entry.u_flags = CSD_TYPE_TTWU;
 #endif
 }
 
@@ -2939,6 +2965,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
         * Silence PROVE_RCU.
         */
        raw_spin_lock_irqsave(&p->pi_lock, flags);
+       rseq_migrate(p);
        /*
         * We're setting the CPU for the first time, we don't migrate,
         * so use __set_task_cpu().
@@ -3003,6 +3030,7 @@ void wake_up_new_task(struct task_struct *p)
         * as we're not fully set-up yet.
         */
        p->recent_used_cpu = task_cpu(p);
+       rseq_migrate(p);
        __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
 #endif
        rq = __task_rq_lock(p, &rf);
@@ -4074,6 +4102,7 @@ static void __sched notrace __schedule(bool preempt)
 {
        struct task_struct *prev, *next;
        unsigned long *switch_count;
+       unsigned long prev_state;
        struct rq_flags rf;
        struct rq *rq;
        int cpu;
@@ -4090,12 +4119,22 @@ static void __sched notrace __schedule(bool preempt)
        local_irq_disable();
        rcu_note_context_switch(preempt);
 
+       /* See deactivate_task() below. */
+       prev_state = prev->state;
+
        /*
         * Make sure that signal_pending_state()->signal_pending() below
         * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
-        * done by the caller to avoid the race with signal_wake_up().
+        * done by the caller to avoid the race with signal_wake_up():
+        *
+        * __set_current_state(@state)          signal_wake_up()
+        * schedule()                             set_tsk_thread_flag(p, TIF_SIGPENDING)
+        *                                        wake_up_state(p, state)
+        *   LOCK rq->lock                          LOCK p->pi_state
+        *   smp_mb__after_spinlock()               smp_mb__after_spinlock()
+        *     if (signal_pending_state())          if (p->state & @state)
         *
-        * The membarrier system call requires a full memory barrier
+        * Also, the membarrier system call requires a full memory barrier
         * after coming from user-space, before storing to rq->curr.
         */
        rq_lock(rq, &rf);
@@ -4106,10 +4145,31 @@ static void __sched notrace __schedule(bool preempt)
        update_rq_clock(rq);
 
        switch_count = &prev->nivcsw;
-       if (!preempt && prev->state) {
-               if (signal_pending_state(prev->state, prev)) {
+       /*
+        * We must re-load prev->state in case ttwu_remote() changed it
+        * before we acquired rq->lock.
+        */
+       if (!preempt && prev_state && prev_state == prev->state) {
+               if (signal_pending_state(prev_state, prev)) {
                        prev->state = TASK_RUNNING;
                } else {
+                       prev->sched_contributes_to_load =
+                               (prev_state & TASK_UNINTERRUPTIBLE) &&
+                               !(prev_state & TASK_NOLOAD) &&
+                               !(prev->flags & PF_FROZEN);
+
+                       if (prev->sched_contributes_to_load)
+                               rq->nr_uninterruptible++;
+
+                       /*
+                        * __schedule()                 ttwu()
+                        *   prev_state = prev->state;    if (READ_ONCE(p->on_rq) && ...)
+                        *   LOCK rq->lock                  goto out;
+                        *   smp_mb__after_spinlock();    smp_acquire__after_ctrl_dep();
+                        *   p->on_rq = 0;                p->state = TASK_WAKING;
+                        *
+                        * After this, schedule() must not care about p->state any more.
+                        */
                        deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
 
                        if (prev->in_iowait) {
@@ -4533,7 +4593,8 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
         */
        if (dl_prio(prio)) {
                if (!dl_prio(p->normal_prio) ||
-                   (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
+                   (pi_task && dl_prio(pi_task->prio) &&
+                    dl_entity_preempt(&pi_task->dl, &p->dl))) {
                        p->dl.dl_boosted = 1;
                        queue_flag |= ENQUEUE_REPLENISH;
                } else
index 504d2f5..f63f337 100644 (file)
@@ -2692,6 +2692,7 @@ void __dl_clear_params(struct task_struct *p)
        dl_se->dl_bw                    = 0;
        dl_se->dl_density               = 0;
 
+       dl_se->dl_boosted               = 0;
        dl_se->dl_throttled             = 0;
        dl_se->dl_yielded               = 0;
        dl_se->dl_non_contending        = 0;
index cbcb2f7..04fa8db 100644 (file)
@@ -806,7 +806,7 @@ void post_init_entity_util_avg(struct task_struct *p)
                }
        }
 
-       sa->runnable_avg = cpu_scale;
+       sa->runnable_avg = sa->util_avg;
 
        if (p->sched_class != &fair_sched_class) {
                /*
@@ -4039,7 +4039,11 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
                return;
        }
 
-       rq->misfit_task_load = task_h_load(p);
+       /*
+        * Make sure that misfit_task_load will not be null even if
+        * task_h_load() returns 0.
+        */
+       rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1);
 }
 
 #else /* CONFIG_SMP */
@@ -7638,7 +7642,14 @@ static int detach_tasks(struct lb_env *env)
 
                switch (env->migration_type) {
                case migrate_load:
-                       load = task_h_load(p);
+                       /*
+                        * Depending of the number of CPUs and tasks and the
+                        * cgroup hierarchy, task_h_load() can return a null
+                        * value. Make sure that env->imbalance decreases
+                        * otherwise detach_tasks() will stop only after
+                        * detaching up to loop_max tasks.
+                        */
+                       load = max_t(unsigned long, task_h_load(p), 1);
 
                        if (sched_feat(LB_MIN) &&
                            load < 16 && !env->sd->nr_balance_failed)
index 05deb81..1ae95b9 100644 (file)
@@ -96,6 +96,15 @@ void __cpuidle default_idle_call(void)
        }
 }
 
+static int call_cpuidle_s2idle(struct cpuidle_driver *drv,
+                              struct cpuidle_device *dev)
+{
+       if (current_clr_polling_and_test())
+               return -EBUSY;
+
+       return cpuidle_enter_s2idle(drv, dev);
+}
+
 static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                      int next_state)
 {
@@ -171,11 +180,9 @@ static void cpuidle_idle_call(void)
                if (idle_should_enter_s2idle()) {
                        rcu_idle_enter();
 
-                       entered_state = cpuidle_enter_s2idle(drv, dev);
-                       if (entered_state > 0) {
-                               local_irq_enable();
+                       entered_state = call_cpuidle_s2idle(drv, dev);
+                       if (entered_state > 0)
                                goto exit_idle;
-                       }
 
                        rcu_idle_exit();
 
index 1d4e94c..877fb08 100644 (file)
@@ -1682,7 +1682,7 @@ static inline int task_on_rq_migrating(struct task_struct *p)
 #define WF_SYNC                        0x01            /* Waker goes to sleep after wakeup */
 #define WF_FORK                        0x02            /* Child wakeup after fork */
 #define WF_MIGRATED            0x04            /* Internal use, task got migrated */
-#define WF_ON_RQ               0x08            /* Wakee is on_rq */
+#define WF_ON_CPU              0x08            /* Wakee is on_cpu */
 
 /*
  * To aid in avoiding the subversion of "niceness" due to uneven distribution
index 5ca48cc..ee22ec7 100644 (file)
@@ -2529,9 +2529,6 @@ bool get_signal(struct ksignal *ksig)
        struct signal_struct *signal = current->signal;
        int signr;
 
-       if (unlikely(current->task_works))
-               task_work_run();
-
        if (unlikely(uprobe_deny_signal()))
                return false;
 
@@ -2544,6 +2541,13 @@ bool get_signal(struct ksignal *ksig)
 
 relock:
        spin_lock_irq(&sighand->siglock);
+       current->jobctl &= ~JOBCTL_TASK_WORK;
+       if (unlikely(current->task_works)) {
+               spin_unlock_irq(&sighand->siglock);
+               task_work_run();
+               goto relock;
+       }
+
        /*
         * Every stopped thread goes here after wakeup. Check to see if
         * we should notify the parent, prepare_signal(SIGCONT) encodes
index 472c2b2..aa17eed 100644 (file)
@@ -669,24 +669,6 @@ void __init smp_init(void)
 {
        int num_nodes, num_cpus;
 
-       /*
-        * Ensure struct irq_work layout matches so that
-        * flush_smp_call_function_queue() can do horrible things.
-        */
-       BUILD_BUG_ON(offsetof(struct irq_work, llnode) !=
-                    offsetof(struct __call_single_data, llist));
-       BUILD_BUG_ON(offsetof(struct irq_work, func) !=
-                    offsetof(struct __call_single_data, func));
-       BUILD_BUG_ON(offsetof(struct irq_work, flags) !=
-                    offsetof(struct __call_single_data, flags));
-
-       /*
-        * Assert the CSD_TYPE_TTWU layout is similar enough
-        * for task_struct to be on the @call_single_queue.
-        */
-       BUILD_BUG_ON(offsetof(struct task_struct, wake_entry_type) - offsetof(struct task_struct, wake_entry) !=
-                    offsetof(struct __call_single_data, flags) - offsetof(struct __call_single_data, llist));
-
        idle_threads_init();
        cpuhp_threads_init();
 
index 825f282..5c0848c 100644 (file)
@@ -25,9 +25,10 @@ static struct callback_head work_exited; /* all we need is ->next == NULL */
  * 0 if succeeds or -ESRCH.
  */
 int
-task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
+task_work_add(struct task_struct *task, struct callback_head *work, int notify)
 {
        struct callback_head *head;
+       unsigned long flags;
 
        do {
                head = READ_ONCE(task->task_works);
@@ -36,8 +37,19 @@ task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
                work->next = head;
        } while (cmpxchg(&task->task_works, head, work) != head);
 
-       if (notify)
+       switch (notify) {
+       case TWA_RESUME:
                set_notify_resume(task);
+               break;
+       case TWA_SIGNAL:
+               if (lock_task_sighand(task, &flags)) {
+                       task->jobctl |= JOBCTL_TASK_WORK;
+                       signal_wake_up(task, 0);
+                       unlock_task_sighand(task, &flags);
+               }
+               break;
+       }
+
        return 0;
 }
 
index 398e6ea..df1ff80 100644 (file)
@@ -521,8 +521,8 @@ static int calc_wheel_index(unsigned long expires, unsigned long clk)
                 * Force expire obscene large timeouts to expire at the
                 * capacity limit of the wheel.
                 */
-               if (expires >= WHEEL_TIMEOUT_CUTOFF)
-                       expires = WHEEL_TIMEOUT_MAX;
+               if (delta >= WHEEL_TIMEOUT_CUTOFF)
+                       expires = clk + WHEEL_TIMEOUT_MAX;
 
                idx = calc_index(expires, LVL_DEPTH - 1);
        }
@@ -584,7 +584,15 @@ trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
         * Set the next expiry time and kick the CPU so it can reevaluate the
         * wheel:
         */
-       base->next_expiry = timer->expires;
+       if (time_before(timer->expires, base->clk)) {
+               /*
+                * Prevent from forward_timer_base() moving the base->clk
+                * backward
+                */
+               base->next_expiry = base->clk;
+       } else {
+               base->next_expiry = timer->expires;
+       }
        wake_up_nohz_cpu(base->cpu);
 }
 
@@ -896,10 +904,13 @@ static inline void forward_timer_base(struct timer_base *base)
         * If the next expiry value is > jiffies, then we fast forward to
         * jiffies otherwise we forward to the next expiry value.
         */
-       if (time_after(base->next_expiry, jnow))
+       if (time_after(base->next_expiry, jnow)) {
                base->clk = jnow;
-       else
+       } else {
+               if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk)))
+                       return;
                base->clk = base->next_expiry;
+       }
 #endif
 }
 
index dc05626..7bc3d61 100644 (file)
@@ -241,7 +241,7 @@ bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
        if (unlikely(ret < 0))
                goto fail;
 
-       return 0;
+       return ret;
 fail:
        memset(dst, 0, size);
        return ret;
index b8e1ca4..00867ff 100644 (file)
@@ -2427,7 +2427,7 @@ rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
        if (unlikely(info->add_timestamp)) {
                bool abs = ring_buffer_time_stamp_abs(cpu_buffer->buffer);
 
-               event = rb_add_time_stamp(event, info->delta, abs);
+               event = rb_add_time_stamp(event, abs ? info->delta : delta, abs);
                length -= RB_LEN_TIME_EXTEND;
                delta = 0;
        }
index 9de29bb..fa0fc08 100644 (file)
@@ -101,12 +101,16 @@ trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
                kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
 
                ret = kprobe_event_gen_cmd_start(&cmd, event, val);
-               if (ret)
+               if (ret) {
+                       pr_err("Failed to generate probe: %s\n", buf);
                        break;
+               }
 
                ret = kprobe_event_gen_cmd_end(&cmd);
-               if (ret)
+               if (ret) {
                        pr_err("Failed to add probe: %s\n", buf);
+                       break;
+               }
        }
 
        return ret;
@@ -120,7 +124,7 @@ trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
 }
 #endif
 
-#ifdef CONFIG_HIST_TRIGGERS
+#ifdef CONFIG_SYNTH_EVENTS
 static int __init
 trace_boot_add_synth_event(struct xbc_node *node, const char *event)
 {
index 3a74736..f725802 100644 (file)
@@ -216,11 +216,17 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file)
 
 int trigger_process_regex(struct trace_event_file *file, char *buff)
 {
-       char *command, *next = buff;
+       char *command, *next;
        struct event_command *p;
        int ret = -EINVAL;
 
+       next = buff = skip_spaces(buff);
        command = strsep(&next, ": \t");
+       if (next) {
+               next = skip_spaces(next);
+               if (!*next)
+                       next = NULL;
+       }
        command = (command[0] != '!') ? command : command + 1;
 
        mutex_lock(&trigger_cmd_mutex);
@@ -630,8 +636,14 @@ event_trigger_callback(struct event_command *cmd_ops,
        int ret;
 
        /* separate the trigger from the filter (t:n [if filter]) */
-       if (param && isdigit(param[0]))
+       if (param && isdigit(param[0])) {
                trigger = strsep(&param, " \t");
+               if (param) {
+                       param = skip_spaces(param);
+                       if (!*param)
+                               param = NULL;
+               }
+       }
 
        trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
 
@@ -1368,6 +1380,11 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
        trigger = strsep(&param, " \t");
        if (!trigger)
                return -EINVAL;
+       if (param) {
+               param = skip_spaces(param);
+               if (!*param)
+                       param = NULL;
+       }
 
        system = strsep(&trigger, ":");
        if (!trigger)
index 81f5464..34b84bc 100644 (file)
@@ -15,11 +15,15 @@ config CC_HAS_KASAN_GENERIC
 config CC_HAS_KASAN_SW_TAGS
        def_bool $(cc-option, -fsanitize=kernel-hwaddress)
 
+config CC_HAS_WORKING_NOSANITIZE_ADDRESS
+       def_bool !CC_IS_GCC || GCC_VERSION >= 80300
+
 config KASAN
        bool "KASAN: runtime memory debugger"
        depends on (HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \
                   (HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)
        depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
+       depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
        help
          Enables KASAN (KernelAddressSANitizer) - runtime memory debugger,
          designed to find out-of-bounds accesses and use-after-free bugs.
index ffa7a76..256f248 100644 (file)
@@ -3,6 +3,11 @@
 config HAVE_ARCH_KGDB
        bool
 
+# set if architecture has the its kgdb_arch_handle_qxfer_pkt
+# function to enable gdb stub to address XML packet sent from GDB.
+config HAVE_ARCH_KGDB_QXFER_PKT
+       bool
+
 menuconfig KGDB
        bool "KGDB: kernel debugger"
        depends on HAVE_ARCH_KGDB
index 50d1e9f..6ed72dc 100644 (file)
@@ -73,6 +73,7 @@ static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit,
  * @endbit: The index (in logical notation, compensated for quirks) where
  *         the packed value ends within pbuf. Must be smaller than, or equal
  *         to, startbit.
+ * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf.
  * @op: If PACK, then uval will be treated as const pointer and copied (packed)
  *     into pbuf, between startbit and endbit.
  *     If UNPACK, then pbuf will be treated as const pointer and the logical
index 2852828..a2a8226 100644 (file)
@@ -520,8 +520,7 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
 err_free:
        kfree(devmem);
 err_release:
-       release_mem_region(devmem->pagemap.res.start,
-                          resource_size(&devmem->pagemap.res));
+       release_mem_region(res->start, resource_size(res));
 err:
        mutex_unlock(&mdevice->devmem_lock);
        return false;
index 0463ad2..26ecff8 100644 (file)
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -339,13 +339,13 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
                 */
                if (base < highmem_start && limit > highmem_start) {
                        addr = memblock_alloc_range_nid(size, alignment,
-                                       highmem_start, limit, nid, false);
+                                       highmem_start, limit, nid, true);
                        limit = highmem_start;
                }
 
                if (!addr) {
                        addr = memblock_alloc_range_nid(size, alignment, base,
-                                       limit, nid, false);
+                                       limit, nid, true);
                        if (!addr) {
                                ret = -ENOMEM;
                                goto err;
index fd988b7..8637560 100644 (file)
@@ -2316,15 +2316,26 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
                .page = NULL,
        };
 
-       current->capture_control = &capc;
+       /*
+        * Make sure the structs are really initialized before we expose the
+        * capture control, in case we are interrupted and the interrupt handler
+        * frees a page.
+        */
+       barrier();
+       WRITE_ONCE(current->capture_control, &capc);
 
        ret = compact_zone(&cc, &capc);
 
        VM_BUG_ON(!list_empty(&cc.freepages));
        VM_BUG_ON(!list_empty(&cc.migratepages));
 
-       *capture = capc.page;
-       current->capture_control = NULL;
+       /*
+        * Make sure we hide capture control first before we read the captured
+        * page pointer, otherwise an interrupt could free and capture a page
+        * and we would leak it.
+        */
+       WRITE_ONCE(current->capture_control, NULL);
+       *capture = READ_ONCE(capc.page);
 
        return ret;
 }
index e456230..61ab16f 100644 (file)
@@ -246,13 +246,13 @@ static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
 static void __init pte_clear_tests(struct mm_struct *mm, pte_t *ptep,
                                   unsigned long vaddr)
 {
-       pte_t pte = READ_ONCE(*ptep);
+       pte_t pte = ptep_get(ptep);
 
        pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
        set_pte_at(mm, vaddr, ptep, pte);
        barrier();
        pte_clear(mm, vaddr, ptep);
-       pte = READ_ONCE(*ptep);
+       pte = ptep_get(ptep);
        WARN_ON(!pte_none(pte));
 }
 
index f0ae9a6..385759c 100644 (file)
@@ -2028,7 +2028,7 @@ find_page:
 
                page = find_get_page(mapping, index);
                if (!page) {
-                       if (iocb->ki_flags & IOCB_NOWAIT)
+                       if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO))
                                goto would_block;
                        page_cache_sync_readahead(mapping,
                                        ra, filp,
@@ -2038,6 +2038,10 @@ find_page:
                                goto no_cached_page;
                }
                if (PageReadahead(page)) {
+                       if (iocb->ki_flags & IOCB_NOIO) {
+                               put_page(page);
+                               goto out;
+                       }
                        page_cache_async_readahead(mapping,
                                        ra, filp, page,
                                        index, last_index - index);
@@ -2160,6 +2164,11 @@ page_not_up_to_date_locked:
                }
 
 readpage:
+               if (iocb->ki_flags & IOCB_NOIO) {
+                       unlock_page(page);
+                       put_page(page);
+                       goto would_block;
+               }
                /*
                 * A previous I/O error may have been due to temporary
                 * failures, eg. multipath errors.
@@ -2249,9 +2258,19 @@ EXPORT_SYMBOL_GPL(generic_file_buffered_read);
  *
  * This is the "read_iter()" routine for all filesystems
  * that can use the page cache directly.
+ *
+ * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall
+ * be returned when no data can be read without waiting for I/O requests
+ * to complete; it doesn't prevent readahead.
+ *
+ * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O
+ * requests shall be made for the read or for readahead.  When no data
+ * can be read, -EAGAIN shall be returned.  When readahead would be
+ * triggered, a partial, possibly empty read shall be returned.
+ *
  * Return:
  * * number of bytes copied, even for partial reads
- * * negative error code if nothing was read
+ * * negative error code (or 0 if IOCB_NOIO) if nothing was read
  */
 ssize_t
 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
index 57ece74..fab4485 100644 (file)
@@ -1593,7 +1593,7 @@ static struct address_space *_get_hugetlb_page_mapping(struct page *hpage)
 
        /* Use first found vma */
        pgoff_start = page_to_pgoff(hpage);
-       pgoff_end = pgoff_start + hpage_nr_pages(hpage) - 1;
+       pgoff_end = pgoff_start + pages_per_huge_page(page_hstate(hpage)) - 1;
        anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
                                        pgoff_start, pgoff_end) {
                struct vm_area_struct *vma = avc->vma;
index 0b38b6a..1962232 100644 (file)
@@ -2772,8 +2772,10 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
                return;
 
        cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
-       if (!cw)
+       if (!cw) {
+               css_put(&memcg->css);
                return;
+       }
 
        cw->memcg = memcg;
        cw->cachep = cachep;
@@ -6360,11 +6362,16 @@ static unsigned long effective_protection(unsigned long usage,
         * We're using unprotected memory for the weight so that if
         * some cgroups DO claim explicit protection, we don't protect
         * the same bytes twice.
+        *
+        * Check both usage and parent_usage against the respective
+        * protected values. One should imply the other, but they
+        * aren't read atomically - make sure the division is sane.
         */
        if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
                return ep;
-
-       if (parent_effective > siblings_protected && usage > protected) {
+       if (parent_effective > siblings_protected &&
+           parent_usage > siblings_protected &&
+           usage > protected) {
                unsigned long unclaimed;
 
                unclaimed = parent_effective - siblings_protected;
@@ -6416,7 +6423,7 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
 
        if (parent == root) {
                memcg->memory.emin = READ_ONCE(memcg->memory.min);
-               memcg->memory.elow = memcg->memory.low;
+               memcg->memory.elow = READ_ONCE(memcg->memory.low);
                goto out;
        }
 
@@ -6428,7 +6435,8 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
                        atomic_long_read(&parent->memory.children_min_usage)));
 
        WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
-                       memcg->memory.low, READ_ONCE(parent->memory.elow),
+                       READ_ONCE(memcg->memory.low),
+                       READ_ONCE(parent->memory.elow),
                        atomic_long_read(&parent->memory.children_low_usage)));
 
 out:
index dc7f354..87ec87c 100644 (file)
@@ -1498,7 +1498,7 @@ out:
 }
 
 #ifdef pte_index
-static int insert_page_in_batch_locked(struct mm_struct *mm, pmd_t *pmd,
+static int insert_page_in_batch_locked(struct mm_struct *mm, pte_t *pte,
                        unsigned long addr, struct page *page, pgprot_t prot)
 {
        int err;
@@ -1506,8 +1506,9 @@ static int insert_page_in_batch_locked(struct mm_struct *mm, pmd_t *pmd,
        if (!page_count(page))
                return -EINVAL;
        err = validate_page_before_insert(page);
-       return err ? err : insert_page_into_pte_locked(
-               mm, pte_offset_map(pmd, addr), addr, page, prot);
+       if (err)
+               return err;
+       return insert_page_into_pte_locked(mm, pte, addr, page, prot);
 }
 
 /* insert_pages() amortizes the cost of spinlock operations
@@ -1517,7 +1518,8 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
                        struct page **pages, unsigned long *num, pgprot_t prot)
 {
        pmd_t *pmd = NULL;
-       spinlock_t *pte_lock = NULL;
+       pte_t *start_pte, *pte;
+       spinlock_t *pte_lock;
        struct mm_struct *const mm = vma->vm_mm;
        unsigned long curr_page_idx = 0;
        unsigned long remaining_pages_total = *num;
@@ -1536,18 +1538,17 @@ more:
        ret = -ENOMEM;
        if (pte_alloc(mm, pmd))
                goto out;
-       pte_lock = pte_lockptr(mm, pmd);
 
        while (pages_to_write_in_pmd) {
                int pte_idx = 0;
                const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
 
-               spin_lock(pte_lock);
-               for (; pte_idx < batch_size; ++pte_idx) {
-                       int err = insert_page_in_batch_locked(mm, pmd,
+               start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
+               for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
+                       int err = insert_page_in_batch_locked(mm, pte,
                                addr, pages[curr_page_idx], prot);
                        if (unlikely(err)) {
-                               spin_unlock(pte_lock);
+                               pte_unmap_unlock(start_pte, pte_lock);
                                ret = err;
                                remaining_pages_total -= pte_idx;
                                goto out;
@@ -1555,7 +1556,7 @@ more:
                        addr += PAGE_SIZE;
                        ++curr_page_idx;
                }
-               spin_unlock(pte_lock);
+               pte_unmap_unlock(start_pte, pte_lock);
                pages_to_write_in_pmd -= batch_size;
                remaining_pages_total -= batch_size;
        }
@@ -3140,8 +3141,18 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                                err = mem_cgroup_charge(page, vma->vm_mm,
                                                        GFP_KERNEL);
                                ClearPageSwapCache(page);
-                               if (err)
+                               if (err) {
+                                       ret = VM_FAULT_OOM;
                                        goto out_page;
+                               }
+
+                               /*
+                                * XXX: Move to lru_cache_add() when it
+                                * supports new vs putback
+                                */
+                               spin_lock_irq(&page_pgdat(page)->lru_lock);
+                               lru_note_cost_page(page);
+                               spin_unlock_irq(&page_pgdat(page)->lru_lock);
 
                                lru_cache_add(page);
                                swap_readpage(page, true);
index 9b34e03..da374cd 100644 (file)
@@ -471,11 +471,20 @@ void __ref remove_pfn_range_from_zone(struct zone *zone,
                                      unsigned long start_pfn,
                                      unsigned long nr_pages)
 {
+       const unsigned long end_pfn = start_pfn + nr_pages;
        struct pglist_data *pgdat = zone->zone_pgdat;
-       unsigned long flags;
+       unsigned long pfn, cur_nr_pages, flags;
 
        /* Poison struct pages because they are now uninitialized again. */
-       page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages);
+       for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) {
+               cond_resched();
+
+               /* Select all remaining pages up to the next section boundary */
+               cur_nr_pages =
+                       min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn);
+               page_init_poison(pfn_to_page(pfn),
+                                sizeof(struct page) * cur_nr_pages);
+       }
 
 #ifdef CONFIG_ZONE_DEVICE
        /*
index f377296..40cd701 100644 (file)
@@ -1161,21 +1161,10 @@ out:
 }
 
 /*
- * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move().  Work
- * around it.
- */
-#if defined(CONFIG_ARM) && \
-       defined(GCC_VERSION) && GCC_VERSION < 40900 && GCC_VERSION >= 40700
-#define ICE_noinline noinline
-#else
-#define ICE_noinline
-#endif
-
-/*
  * Obtain the lock on page, remove all ptes and migrate the page
  * to the newly allocated page in newpage.
  */
-static ICE_noinline int unmap_and_move(new_page_t get_new_page,
+static int unmap_and_move(new_page_t get_new_page,
                                   free_page_t put_new_page,
                                   unsigned long private, struct page *page,
                                   int force, enum migrate_mode mode,
index 5dd572d..6b153dc 100644 (file)
@@ -206,9 +206,28 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
 
        /*
         * The destination pmd shouldn't be established, free_pgtables()
-        * should have release it.
+        * should have released it.
+        *
+        * However, there's a case during execve() where we use mremap
+        * to move the initial stack, and in that case the target area
+        * may overlap the source area (always moving down).
+        *
+        * If everything is PMD-aligned, that works fine, as moving
+        * each pmd down will clear the source pmd. But if we first
+        * have a few 4kB-only pages that get moved down, and then
+        * hit the "now the rest is PMD-aligned, let's do everything
+        * one pmd at a time", we will still have the old (now empty
+        * of any 4kB pages, but still there) PMD in the page table
+        * tree.
+        *
+        * Warn on it once - because we really should try to figure
+        * out how to do this better - but then say "I won't move
+        * this pmd".
+        *
+        * One alternative might be to just unmap the target pmd at
+        * this point, and verify that it really is empty. We'll see.
         */
-       if (WARN_ON(!pmd_none(*new_pmd)))
+       if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
                return false;
 
        /*
index cdcad5d..f32a690 100644 (file)
@@ -291,23 +291,6 @@ void *vzalloc_node(unsigned long size, int node)
 EXPORT_SYMBOL(vzalloc_node);
 
 /**
- *     vmalloc_exec  -  allocate virtually contiguous, executable memory
- *     @size:          allocation size
- *
- *     Kernel-internal function to allocate enough pages to cover @size
- *     the page level allocator and map them into contiguous and
- *     executable kernel virtual space.
- *
- *     For tight control over page level allocator and protection flags
- *     use __vmalloc() instead.
- */
-
-void *vmalloc_exec(unsigned long size)
-{
-       return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM);
-}
-
-/**
  * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
  *     @size:          allocation size
  *
index 48eb0f1..e028b87 100644 (file)
@@ -7832,7 +7832,7 @@ void setup_per_zone_wmarks(void)
  * Initialise min_free_kbytes.
  *
  * For small machines we want it small (128k min).  For large machines
- * we want it large (64MB max).  But it is not linear, because network
+ * we want it large (256MB max).  But it is not linear, because network
  * bandwidth does not increase linearly with machine size.  We use
  *
  *     min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
index 207c83e..74f7e09 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -348,7 +348,7 @@ static __always_inline int memcg_charge_slab(struct page *page,
                                             gfp_t gfp, int order,
                                             struct kmem_cache *s)
 {
-       unsigned int nr_pages = 1 << order;
+       int nr_pages = 1 << order;
        struct mem_cgroup *memcg;
        struct lruvec *lruvec;
        int ret;
@@ -388,7 +388,7 @@ out:
 static __always_inline void memcg_uncharge_slab(struct page *page, int order,
                                                struct kmem_cache *s)
 {
-       unsigned int nr_pages = 1 << order;
+       int nr_pages = 1 << order;
        struct mem_cgroup *memcg;
        struct lruvec *lruvec;
 
index 9e72ba2..37d48a5 100644 (file)
@@ -1726,7 +1726,7 @@ void kzfree(const void *p)
        if (unlikely(ZERO_OR_NULL_PTR(mem)))
                return;
        ks = ksize(mem);
-       memset(mem, 0, ks);
+       memzero_explicit(mem, ks);
        kfree(mem);
 }
 EXPORT_SYMBOL(kzfree);
index fe81773..ef30307 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3766,15 +3766,13 @@ error:
 }
 
 static void list_slab_objects(struct kmem_cache *s, struct page *page,
-                             const char *text, unsigned long *map)
+                             const char *text)
 {
 #ifdef CONFIG_SLUB_DEBUG
        void *addr = page_address(page);
+       unsigned long *map;
        void *p;
 
-       if (!map)
-               return;
-
        slab_err(s, page, text, s->name);
        slab_lock(page);
 
@@ -3786,6 +3784,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
                        print_tracking(s, p);
                }
        }
+       put_map(map);
        slab_unlock(page);
 #endif
 }
@@ -3799,11 +3798,6 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
 {
        LIST_HEAD(discard);
        struct page *page, *h;
-       unsigned long *map = NULL;
-
-#ifdef CONFIG_SLUB_DEBUG
-       map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
-#endif
 
        BUG_ON(irqs_disabled());
        spin_lock_irq(&n->list_lock);
@@ -3813,16 +3807,11 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
                        list_add(&page->slab_list, &discard);
                } else {
                        list_slab_objects(s, page,
-                         "Objects remaining in %s on __kmem_cache_shutdown()",
-                         map);
+                         "Objects remaining in %s on __kmem_cache_shutdown()");
                }
        }
        spin_unlock_irq(&n->list_lock);
 
-#ifdef CONFIG_SLUB_DEBUG
-       bitmap_free(map);
-#endif
-
        list_for_each_entry_safe(page, h, &discard, slab_list)
                discard_slab(s, page);
 }
index dbcab84..a82efc3 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -443,8 +443,7 @@ void mark_page_accessed(struct page *page)
                else
                        __lru_cache_activate_page(page);
                ClearPageReferenced(page);
-               if (page_is_file_lru(page))
-                       workingset_activation(page);
+               workingset_activation(page);
        }
        if (page_is_idle(page))
                clear_page_idle(page);
index e98ff46..05889e8 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/vmalloc.h>
 #include <linux/swap_slots.h>
 #include <linux/huge_mm.h>
-
+#include "internal.h"
 
 /*
  * swapper_space is a fiction, retained to simplify the path through
@@ -429,7 +429,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
        __SetPageSwapBacked(page);
 
        /* May fail (-ENOMEM) if XArray node allocation failed. */
-       if (add_to_swap_cache(page, entry, gfp_mask & GFP_KERNEL)) {
+       if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK)) {
                put_swap_page(page, entry);
                goto fail_unlock;
        }
index 3091c2c..5a2b55c 100644 (file)
@@ -1862,7 +1862,6 @@ EXPORT_SYMBOL(vm_unmap_ram);
  * @pages: an array of pointers to the pages to be mapped
  * @count: number of pages
  * @node: prefer to allocate data structures on this node
- * @prot: memory protection to use. PAGE_KERNEL for regular RAM
  *
  * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
  * faster than vmap so it's good.  But if you mix long-life and short-life
@@ -2696,26 +2695,6 @@ void *vzalloc_node(unsigned long size, int node)
 }
 EXPORT_SYMBOL(vzalloc_node);
 
-/**
- * vmalloc_exec - allocate virtually contiguous, executable memory
- * @size:        allocation size
- *
- * Kernel-internal function to allocate enough pages to cover @size
- * the page level allocator and map them into contiguous and
- * executable kernel virtual space.
- *
- * For tight control over page level allocator and protection flags
- * use __vmalloc() instead.
- *
- * Return: pointer to the allocated memory or %NULL on error
- */
-void *vmalloc_exec(unsigned long size)
-{
-       return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
-                       GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
-                       NUMA_NO_NODE, __builtin_return_address(0));
-}
-
 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
index b6d8432..749d239 100644 (file)
@@ -904,6 +904,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
                __delete_from_swap_cache(page, swap);
                xa_unlock_irqrestore(&mapping->i_pages, flags);
                put_swap_page(page, swap);
+               workingset_eviction(page, target_memcg);
        } else {
                void (*freepage)(struct page *);
                void *shadow = NULL;
@@ -1884,6 +1885,8 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
                                list_add(&page->lru, &pages_to_free);
                } else {
                        nr_moved += nr_pages;
+                       if (PageActive(page))
+                               workingset_age_nonresident(lruvec, nr_pages);
                }
        }
 
index d481ea4..50b7937 100644 (file)
  *
  *             Implementation
  *
- * For each node's file LRU lists, a counter for inactive evictions
- * and activations is maintained (node->inactive_age).
+ * For each node's LRU lists, a counter for inactive evictions and
+ * activations is maintained (node->nonresident_age).
  *
  * On eviction, a snapshot of this counter (along with some bits to
  * identify the node) is stored in the now empty page cache
@@ -213,7 +213,17 @@ static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
        *workingsetp = workingset;
 }
 
-static void advance_inactive_age(struct mem_cgroup *memcg, pg_data_t *pgdat)
+/**
+ * workingset_age_nonresident - age non-resident entries as LRU ages
+ * @memcg: the lruvec that was aged
+ * @nr_pages: the number of pages to count
+ *
+ * As in-memory pages are aged, non-resident pages need to be aged as
+ * well, in order for the refault distances later on to be comparable
+ * to the in-memory dimensions. This function allows reclaim and LRU
+ * operations to drive the non-resident aging along in parallel.
+ */
+void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages)
 {
        /*
         * Reclaiming a cgroup means reclaiming all its children in a
@@ -227,11 +237,8 @@ static void advance_inactive_age(struct mem_cgroup *memcg, pg_data_t *pgdat)
         * the root cgroup's, age as well.
         */
        do {
-               struct lruvec *lruvec;
-
-               lruvec = mem_cgroup_lruvec(memcg, pgdat);
-               atomic_long_inc(&lruvec->inactive_age);
-       } while (memcg && (memcg = parent_mem_cgroup(memcg)));
+               atomic_long_add(nr_pages, &lruvec->nonresident_age);
+       } while ((lruvec = parent_lruvec(lruvec)));
 }
 
 /**
@@ -254,12 +261,11 @@ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg)
        VM_BUG_ON_PAGE(page_count(page), page);
        VM_BUG_ON_PAGE(!PageLocked(page), page);
 
-       advance_inactive_age(page_memcg(page), pgdat);
-
        lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
+       workingset_age_nonresident(lruvec, hpage_nr_pages(page));
        /* XXX: target_memcg can be NULL, go through lruvec */
        memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
-       eviction = atomic_long_read(&lruvec->inactive_age);
+       eviction = atomic_long_read(&lruvec->nonresident_age);
        return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
 }
 
@@ -309,20 +315,20 @@ void workingset_refault(struct page *page, void *shadow)
        if (!mem_cgroup_disabled() && !eviction_memcg)
                goto out;
        eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
-       refault = atomic_long_read(&eviction_lruvec->inactive_age);
+       refault = atomic_long_read(&eviction_lruvec->nonresident_age);
 
        /*
         * Calculate the refault distance
         *
         * The unsigned subtraction here gives an accurate distance
-        * across inactive_age overflows in most cases. There is a
+        * across nonresident_age overflows in most cases. There is a
         * special case: usually, shadow entries have a short lifetime
         * and are either refaulted or reclaimed along with the inode
         * before they get too old.  But it is not impossible for the
-        * inactive_age to lap a shadow entry in the field, which can
-        * then result in a false small refault distance, leading to a
-        * false activation should this old entry actually refault
-        * again.  However, earlier kernels used to deactivate
+        * nonresident_age to lap a shadow entry in the field, which
+        * can then result in a false small refault distance, leading
+        * to a false activation should this old entry actually
+        * refault again.  However, earlier kernels used to deactivate
         * unconditionally with *every* reclaim invocation for the
         * longest time, so the occasional inappropriate activation
         * leading to pressure on the active list is not a problem.
@@ -359,7 +365,7 @@ void workingset_refault(struct page *page, void *shadow)
                goto out;
 
        SetPageActive(page);
-       advance_inactive_age(memcg, pgdat);
+       workingset_age_nonresident(lruvec, hpage_nr_pages(page));
        inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE);
 
        /* Page was active prior to eviction */
@@ -382,6 +388,7 @@ out:
 void workingset_activation(struct page *page)
 {
        struct mem_cgroup *memcg;
+       struct lruvec *lruvec;
 
        rcu_read_lock();
        /*
@@ -394,7 +401,8 @@ void workingset_activation(struct page *page)
        memcg = page_memcg_rcu(page);
        if (!mem_cgroup_disabled() && !memcg)
                goto out;
-       advance_inactive_age(memcg, page_pgdat(page));
+       lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
+       workingset_age_nonresident(lruvec, hpage_nr_pages(page));
 out:
        rcu_read_unlock();
 }
index c8d6a07..3dd7c97 100644 (file)
@@ -503,11 +503,10 @@ static void vlan_dev_set_lockdep_one(struct net_device *dev,
        lockdep_set_class(&txq->_xmit_lock, &vlan_netdev_xmit_lock_key);
 }
 
-static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
+static void vlan_dev_set_lockdep_class(struct net_device *dev)
 {
-       lockdep_set_class_and_subclass(&dev->addr_list_lock,
-                                      &vlan_netdev_addr_lock_key,
-                                      subclass);
+       lockdep_set_class(&dev->addr_list_lock,
+                         &vlan_netdev_addr_lock_key);
        netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, NULL);
 }
 
@@ -601,7 +600,7 @@ static int vlan_dev_init(struct net_device *dev)
 
        SET_NETDEV_DEVTYPE(dev, &vlan_type);
 
-       vlan_dev_set_lockdep_class(dev, dev->lower_level);
+       vlan_dev_set_lockdep_class(dev);
 
        vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
        if (!vlan->vlan_pcpu_stats)
index c1b6242..5126566 100644 (file)
@@ -189,3 +189,4 @@ MODULE_AUTHOR("Latchesar Ionkov <lucho@ionkov.net>");
 MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>");
 MODULE_AUTHOR("Ron Minnich <rminnich@lanl.gov>");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Plan 9 Resource Sharing Support (9P2000)");
index bfd4ccd..b03c469 100644 (file)
@@ -147,6 +147,20 @@ int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
        return a + (long)b + c + d + (long)e + f;
 }
 
+struct bpf_fentry_test_t {
+       struct bpf_fentry_test_t *a;
+};
+
+int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
+{
+       return (long)arg;
+}
+
+int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
+{
+       return (long)arg->a;
+}
+
 int noinline bpf_modify_return_test(int a, int *b)
 {
        *b += 1;
@@ -185,6 +199,7 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog,
                              const union bpf_attr *kattr,
                              union bpf_attr __user *uattr)
 {
+       struct bpf_fentry_test_t arg = {};
        u16 side_effect = 0, ret = 0;
        int b = 2, err = -EFAULT;
        u32 retval = 0;
@@ -197,7 +212,9 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog,
                    bpf_fentry_test3(4, 5, 6) != 15 ||
                    bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
                    bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
-                   bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111)
+                   bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
+                   bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
+                   bpf_fentry_test8(&arg) != 0)
                        goto out;
                break;
        case BPF_MODIFY_RETURN:
index c0f0990..1905e01 100644 (file)
@@ -50,7 +50,7 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname,
        req.len = optlen;
        if (!bpfilter_ops.info.pid)
                goto out;
-       n = __kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req),
+       n = kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req),
                           &pos);
        if (n != sizeof(req)) {
                pr_err("write fail %zd\n", n);
index 24986ec..90592af 100644 (file)
@@ -86,7 +86,7 @@ static struct sk_buff *br_mrp_skb_alloc(struct net_bridge_port *p,
 {
        struct ethhdr *eth_hdr;
        struct sk_buff *skb;
-       u16 *version;
+       __be16 *version;
 
        skb = dev_alloc_skb(MRP_MAX_FRAME_LENGTH);
        if (!skb)
@@ -411,10 +411,16 @@ int br_mrp_set_port_role(struct net_bridge_port *p,
        if (!mrp)
                return -EINVAL;
 
-       if (role == BR_MRP_PORT_ROLE_PRIMARY)
+       switch (role) {
+       case BR_MRP_PORT_ROLE_PRIMARY:
                rcu_assign_pointer(mrp->p_port, p);
-       else
+               break;
+       case BR_MRP_PORT_ROLE_SECONDARY:
                rcu_assign_pointer(mrp->s_port, p);
+               break;
+       default:
+               return -EINVAL;
+       }
 
        br_mrp_port_switchdev_set_role(p, role);
 
index 83490bf..4c4a93a 100644 (file)
@@ -1007,7 +1007,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
                nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
 
                if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
-                   nsrcs_offset + sizeof(_nsrcs))
+                   nsrcs_offset + sizeof(__nsrcs))
                        return -EINVAL;
 
                _nsrcs = skb_header_pointer(skb, nsrcs_offset,
index 7501be4..e0ea6db 100644 (file)
@@ -217,8 +217,8 @@ struct net_bridge_port_group {
        struct rcu_head                 rcu;
        struct timer_list               timer;
        struct br_ip                    addr;
+       unsigned char                   eth_addr[ETH_ALEN] __aligned(2);
        unsigned char                   flags;
-       unsigned char                   eth_addr[ETH_ALEN];
 };
 
 struct net_bridge_mdb_entry {
@@ -430,7 +430,7 @@ struct net_bridge {
        struct hlist_head               fdb_list;
 
 #if IS_ENABLED(CONFIG_BRIDGE_MRP)
-       struct list_head                __rcu mrp_list;
+       struct list_head                mrp_list;
 #endif
 };
 
index 33b255e..315eb37 100644 (file)
@@ -8,7 +8,7 @@
 
 struct br_mrp {
        /* list of mrp instances */
-       struct list_head                __rcu list;
+       struct list_head                list;
 
        struct net_bridge_port __rcu    *p_port;
        struct net_bridge_port __rcu    *s_port;
index 7c9e92b..8e8ffac 100644 (file)
@@ -155,3 +155,4 @@ module_exit(nft_meta_bridge_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("wenxu <wenxu@ucloud.cn>");
 MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "meta");
+MODULE_DESCRIPTION("Support for bridge dedicated meta key");
index f48cf4c..deae2c9 100644 (file)
@@ -455,3 +455,4 @@ module_exit(nft_reject_bridge_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "reject");
+MODULE_DESCRIPTION("Reject packets from bridge via nftables");
index 6bc2388..90b59fc 100644 (file)
@@ -4192,10 +4192,12 @@ int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
 
        local_bh_disable();
 
+       dev_xmit_recursion_inc();
        HARD_TX_LOCK(dev, txq, smp_processor_id());
        if (!netif_xmit_frozen_or_drv_stopped(txq))
                ret = netdev_start_xmit(skb, dev, txq, false);
        HARD_TX_UNLOCK(dev, txq);
+       dev_xmit_recursion_dec();
 
        local_bh_enable();
 
@@ -9547,6 +9549,13 @@ int register_netdevice(struct net_device *dev)
                rcu_barrier();
 
                dev->reg_state = NETREG_UNREGISTERED;
+               /* We should put the kobject that hold in
+                * netdev_unregister_kobject(), otherwise
+                * the net device cannot be freed when
+                * driver calls free_netdev(), because the
+                * kobject is being hold.
+                */
+               kobject_put(&dev->dev.kobj);
        }
        /*
         *      Prevent userspace races by waiting until the network
index 6393ba9..54cd568 100644 (file)
@@ -690,6 +690,15 @@ void dev_uc_unsync(struct net_device *to, struct net_device *from)
        if (to->addr_len != from->addr_len)
                return;
 
+       /* netif_addr_lock_bh() uses lockdep subclass 0, this is okay for two
+        * reasons:
+        * 1) This is always called without any addr_list_lock, so as the
+        *    outermost one here, it must be 0.
+        * 2) This is called by some callers after unlinking the upper device,
+        *    so the dev->lower_level becomes 1 again.
+        * Therefore, the subclass for 'from' is 0, for 'to' is either 1 or
+        * larger.
+        */
        netif_addr_lock_bh(from);
        netif_addr_lock_nested(to);
        __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
@@ -911,6 +920,7 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from)
        if (to->addr_len != from->addr_len)
                return;
 
+       /* See the above comments inside dev_uc_unsync(). */
        netif_addr_lock_bh(from);
        netif_addr_lock_nested(to);
        __hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
index 2ee7bc4..b09bebe 100644 (file)
@@ -1721,3 +1721,4 @@ module_exit(exit_net_drop_monitor);
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
 MODULE_ALIAS_GENL_FAMILY("NET_DM");
+MODULE_DESCRIPTION("Monitoring code for network dropped packet alerts");
index 7339538..82e1b5b 100644 (file)
@@ -5853,12 +5853,16 @@ BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb)
 {
        unsigned int iphdr_len;
 
-       if (skb->protocol == cpu_to_be16(ETH_P_IP))
+       switch (skb_protocol(skb, true)) {
+       case cpu_to_be16(ETH_P_IP):
                iphdr_len = sizeof(struct iphdr);
-       else if (skb->protocol == cpu_to_be16(ETH_P_IPV6))
+               break;
+       case cpu_to_be16(ETH_P_IPV6):
                iphdr_len = sizeof(struct ipv6hdr);
-       else
+               break;
+       default:
                return 0;
+       }
 
        if (skb_headlen(skb) < iphdr_len)
                return 0;
index d02df0b..142a882 100644 (file)
@@ -70,10 +70,10 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
 EXPORT_SYMBOL(skb_flow_dissector_init);
 
 #ifdef CONFIG_BPF_SYSCALL
-int flow_dissector_bpf_prog_attach(struct net *net, struct bpf_prog *prog)
+int flow_dissector_bpf_prog_attach_check(struct net *net,
+                                        struct bpf_prog *prog)
 {
        enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
-       struct bpf_prog *attached;
 
        if (net == &init_net) {
                /* BPF flow dissector in the root namespace overrides
@@ -86,26 +86,17 @@ int flow_dissector_bpf_prog_attach(struct net *net, struct bpf_prog *prog)
                for_each_net(ns) {
                        if (ns == &init_net)
                                continue;
-                       if (rcu_access_pointer(ns->bpf.progs[type]))
+                       if (rcu_access_pointer(ns->bpf.run_array[type]))
                                return -EEXIST;
                }
        } else {
                /* Make sure root flow dissector is not attached
                 * when attaching to the non-root namespace.
                 */
-               if (rcu_access_pointer(init_net.bpf.progs[type]))
+               if (rcu_access_pointer(init_net.bpf.run_array[type]))
                        return -EEXIST;
        }
 
-       attached = rcu_dereference_protected(net->bpf.progs[type],
-                                            lockdep_is_held(&netns_bpf_mutex));
-       if (attached == prog)
-               /* The same program cannot be attached twice */
-               return -EINVAL;
-
-       rcu_assign_pointer(net->bpf.progs[type], prog);
-       if (attached)
-               bpf_prog_put(attached);
        return 0;
 }
 #endif /* CONFIG_BPF_SYSCALL */
@@ -903,7 +894,6 @@ bool __skb_flow_dissect(const struct net *net,
        struct flow_dissector_key_addrs *key_addrs;
        struct flow_dissector_key_tags *key_tags;
        struct flow_dissector_key_vlan *key_vlan;
-       struct bpf_prog *attached = NULL;
        enum flow_dissect_ret fdret;
        enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
        bool mpls_el = false;
@@ -960,14 +950,14 @@ bool __skb_flow_dissect(const struct net *net,
        WARN_ON_ONCE(!net);
        if (net) {
                enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
+               struct bpf_prog_array *run_array;
 
                rcu_read_lock();
-               attached = rcu_dereference(init_net.bpf.progs[type]);
-
-               if (!attached)
-                       attached = rcu_dereference(net->bpf.progs[type]);
+               run_array = rcu_dereference(init_net.bpf.run_array[type]);
+               if (!run_array)
+                       run_array = rcu_dereference(net->bpf.run_array[type]);
 
-               if (attached) {
+               if (run_array) {
                        struct bpf_flow_keys flow_keys;
                        struct bpf_flow_dissector ctx = {
                                .flow_keys = &flow_keys,
@@ -975,6 +965,7 @@ bool __skb_flow_dissect(const struct net *net,
                                .data_end = data + hlen,
                        };
                        __be16 n_proto = proto;
+                       struct bpf_prog *prog;
 
                        if (skb) {
                                ctx.skb = skb;
@@ -985,7 +976,8 @@ bool __skb_flow_dissect(const struct net *net,
                                n_proto = skb->protocol;
                        }
 
-                       ret = bpf_flow_dissect(attached, &ctx, n_proto, nhoff,
+                       prog = READ_ONCE(run_array->items[0].prog);
+                       ret = bpf_flow_dissect(prog, &ctx, n_proto, nhoff,
                                               hlen, flags);
                        __skb_flow_bpf_to_target(&flow_keys, flow_dissector,
                                                 target_container);
index 0cfc35e..b739cfa 100644 (file)
@@ -372,14 +372,15 @@ int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
 }
 EXPORT_SYMBOL(flow_indr_dev_register);
 
-static void __flow_block_indr_cleanup(flow_setup_cb_t *setup_cb, void *cb_priv,
+static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
+                                     void *cb_priv,
                                      struct list_head *cleanup_list)
 {
        struct flow_block_cb *this, *next;
 
        list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
-               if (this->cb == setup_cb &&
-                   this->cb_priv == cb_priv) {
+               if (this->release == release &&
+                   this->indr.cb_priv == cb_priv) {
                        list_move(&this->indr.list, cleanup_list);
                        return;
                }
@@ -397,7 +398,7 @@ static void flow_block_indr_notify(struct list_head *cleanup_list)
 }
 
 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
-                             flow_setup_cb_t *setup_cb)
+                             void (*release)(void *cb_priv))
 {
        struct flow_indr_dev *this, *next, *indr_dev = NULL;
        LIST_HEAD(cleanup_list);
@@ -418,7 +419,7 @@ void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
                return;
        }
 
-       __flow_block_indr_cleanup(setup_cb, cb_priv, &cleanup_list);
+       __flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
        mutex_unlock(&flow_indr_block_lock);
 
        flow_block_indr_notify(&cleanup_list);
@@ -429,32 +430,37 @@ EXPORT_SYMBOL(flow_indr_dev_unregister);
 static void flow_block_indr_init(struct flow_block_cb *flow_block,
                                 struct flow_block_offload *bo,
                                 struct net_device *dev, void *data,
+                                void *cb_priv,
                                 void (*cleanup)(struct flow_block_cb *block_cb))
 {
        flow_block->indr.binder_type = bo->binder_type;
        flow_block->indr.data = data;
+       flow_block->indr.cb_priv = cb_priv;
        flow_block->indr.dev = dev;
        flow_block->indr.cleanup = cleanup;
 }
 
-static void __flow_block_indr_binding(struct flow_block_offload *bo,
-                                     struct net_device *dev, void *data,
-                                     void (*cleanup)(struct flow_block_cb *block_cb))
+struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
+                                              void *cb_ident, void *cb_priv,
+                                              void (*release)(void *cb_priv),
+                                              struct flow_block_offload *bo,
+                                              struct net_device *dev, void *data,
+                                              void *indr_cb_priv,
+                                              void (*cleanup)(struct flow_block_cb *block_cb))
 {
        struct flow_block_cb *block_cb;
 
-       list_for_each_entry(block_cb, &bo->cb_list, list) {
-               switch (bo->command) {
-               case FLOW_BLOCK_BIND:
-                       flow_block_indr_init(block_cb, bo, dev, data, cleanup);
-                       list_add(&block_cb->indr.list, &flow_block_indr_list);
-                       break;
-               case FLOW_BLOCK_UNBIND:
-                       list_del(&block_cb->indr.list);
-                       break;
-               }
-       }
+       block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release);
+       if (IS_ERR(block_cb))
+               goto out;
+
+       flow_block_indr_init(block_cb, bo, dev, data, indr_cb_priv, cleanup);
+       list_add(&block_cb->indr.list, &flow_block_indr_list);
+
+out:
+       return block_cb;
 }
+EXPORT_SYMBOL(flow_indr_block_cb_alloc);
 
 int flow_indr_dev_setup_offload(struct net_device *dev,
                                enum tc_setup_type type, void *data,
@@ -465,9 +471,8 @@ int flow_indr_dev_setup_offload(struct net_device *dev,
 
        mutex_lock(&flow_indr_block_lock);
        list_for_each_entry(this, &flow_block_indr_dev_list, list)
-               this->cb(dev, this->cb_priv, type, bo);
+               this->cb(dev, this->cb_priv, type, bo, data, cleanup);
 
-       __flow_block_indr_binding(bo, dev, data, cleanup);
        mutex_unlock(&flow_indr_block_lock);
 
        return list_empty(&bo->cb_list) ? -EOPNOTSUPP : 0;
index 351afbf..6a32a1f 100644 (file)
@@ -683,7 +683,7 @@ static struct sk_psock *sk_psock_from_strp(struct strparser *strp)
        return container_of(parser, struct sk_psock, parser);
 }
 
-static void sk_psock_skb_redirect(struct sk_psock *psock, struct sk_buff *skb)
+static void sk_psock_skb_redirect(struct sk_buff *skb)
 {
        struct sk_psock *psock_other;
        struct sock *sk_other;
@@ -715,12 +715,11 @@ static void sk_psock_skb_redirect(struct sk_psock *psock, struct sk_buff *skb)
        }
 }
 
-static void sk_psock_tls_verdict_apply(struct sk_psock *psock,
-                                      struct sk_buff *skb, int verdict)
+static void sk_psock_tls_verdict_apply(struct sk_buff *skb, int verdict)
 {
        switch (verdict) {
        case __SK_REDIRECT:
-               sk_psock_skb_redirect(psock, skb);
+               sk_psock_skb_redirect(skb);
                break;
        case __SK_PASS:
        case __SK_DROP:
@@ -741,8 +740,8 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
                ret = sk_psock_bpf_run(psock, prog, skb);
                ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
        }
+       sk_psock_tls_verdict_apply(skb, ret);
        rcu_read_unlock();
-       sk_psock_tls_verdict_apply(psock, skb, ret);
        return ret;
 }
 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
@@ -770,7 +769,7 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,
                }
                goto out_free;
        case __SK_REDIRECT:
-               sk_psock_skb_redirect(psock, skb);
+               sk_psock_skb_redirect(skb);
                break;
        case __SK_DROP:
                /* fall-through */
@@ -782,11 +781,18 @@ out_free:
 
 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
 {
-       struct sk_psock *psock = sk_psock_from_strp(strp);
+       struct sk_psock *psock;
        struct bpf_prog *prog;
        int ret = __SK_DROP;
+       struct sock *sk;
 
        rcu_read_lock();
+       sk = strp->sk;
+       psock = sk_psock(sk);
+       if (unlikely(!psock)) {
+               kfree_skb(skb);
+               goto out;
+       }
        prog = READ_ONCE(psock->progs.skb_verdict);
        if (likely(prog)) {
                skb_orphan(skb);
@@ -794,8 +800,9 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
                ret = sk_psock_bpf_run(psock, prog, skb);
                ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
        }
-       rcu_read_unlock();
        sk_psock_verdict_apply(psock, skb, ret);
+out:
+       rcu_read_unlock();
 }
 
 static int sk_psock_strp_read_done(struct strparser *strp, int err)
index 6c4acf1..2e5b787 100644 (file)
@@ -718,7 +718,7 @@ bool sk_mc_loop(struct sock *sk)
                return inet6_sk(sk)->mc_loop;
 #endif
        }
-       WARN_ON(1);
+       WARN_ON_ONCE(1);
        return true;
 }
 EXPORT_SYMBOL(sk_mc_loop);
@@ -1767,6 +1767,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
                cgroup_sk_alloc(&sk->sk_cgrp_data);
                sock_update_classid(&sk->sk_cgrp_data);
                sock_update_netprioidx(&sk->sk_cgrp_data);
+               sk_tx_queue_clear(sk);
        }
 
        return sk;
@@ -1925,7 +1926,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                /* sk->sk_memcg will be populated at accept() time */
                newsk->sk_memcg = NULL;
 
-               cgroup_sk_alloc(&newsk->sk_cgrp_data);
+               cgroup_sk_clone(&newsk->sk_cgrp_data);
 
                rcu_read_lock();
                filter = rcu_dereference(sk->sk_filter);
@@ -1990,6 +1991,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                 */
                sk_refcnt_debug_inc(newsk);
                sk_set_socket(newsk, NULL);
+               sk_tx_queue_clear(newsk);
                RCU_INIT_POINTER(newsk->sk_wq, NULL);
 
                if (newsk->sk_prot->sockets_allocated)
index 4059f94..0971f17 100644 (file)
@@ -70,11 +70,49 @@ int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
        struct fd f;
        int ret;
 
+       if (attr->attach_flags || attr->replace_bpf_fd)
+               return -EINVAL;
+
        f = fdget(ufd);
        map = __bpf_map_get(f);
        if (IS_ERR(map))
                return PTR_ERR(map);
-       ret = sock_map_prog_update(map, prog, attr->attach_type);
+       ret = sock_map_prog_update(map, prog, NULL, attr->attach_type);
+       fdput(f);
+       return ret;
+}
+
+int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
+{
+       u32 ufd = attr->target_fd;
+       struct bpf_prog *prog;
+       struct bpf_map *map;
+       struct fd f;
+       int ret;
+
+       if (attr->attach_flags || attr->replace_bpf_fd)
+               return -EINVAL;
+
+       f = fdget(ufd);
+       map = __bpf_map_get(f);
+       if (IS_ERR(map))
+               return PTR_ERR(map);
+
+       prog = bpf_prog_get(attr->attach_bpf_fd);
+       if (IS_ERR(prog)) {
+               ret = PTR_ERR(prog);
+               goto put_map;
+       }
+
+       if (prog->type != ptype) {
+               ret = -EINVAL;
+               goto put_prog;
+       }
+
+       ret = sock_map_prog_update(map, NULL, prog, attr->attach_type);
+put_prog:
+       bpf_prog_put(prog);
+put_map:
        fdput(f);
        return ret;
 }
@@ -1203,27 +1241,32 @@ static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
 }
 
 int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
-                        u32 which)
+                        struct bpf_prog *old, u32 which)
 {
        struct sk_psock_progs *progs = sock_map_progs(map);
+       struct bpf_prog **pprog;
 
        if (!progs)
                return -EOPNOTSUPP;
 
        switch (which) {
        case BPF_SK_MSG_VERDICT:
-               psock_set_prog(&progs->msg_parser, prog);
+               pprog = &progs->msg_parser;
                break;
        case BPF_SK_SKB_STREAM_PARSER:
-               psock_set_prog(&progs->skb_parser, prog);
+               pprog = &progs->skb_parser;
                break;
        case BPF_SK_SKB_STREAM_VERDICT:
-               psock_set_prog(&progs->skb_verdict, prog);
+               pprog = &progs->skb_verdict;
                break;
        default:
                return -EOPNOTSUPP;
        }
 
+       if (old)
+               return psock_replace_prog(pprog, prog, old);
+
+       psock_set_prog(pprog, prog);
        return 0;
 }
 
index f93f8ac..6ada114 100644 (file)
@@ -274,7 +274,7 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
        ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
        if (write && !ret) {
                if (jit_enable < 2 ||
-                   (jit_enable == 2 && bpf_dump_raw_ok())) {
+                   (jit_enable == 2 && bpf_dump_raw_ok(current_cred()))) {
                        *(int *)table->data = jit_enable;
                        if (jit_enable == 2)
                                pr_warn("bpf_jit_enable = 2 was set! NEVER use this in production, only for JIT debugging!\n");
index 90f44f3..3c45f99 100644 (file)
@@ -462,6 +462,7 @@ struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
        xdpf->len = totsize - metasize;
        xdpf->headroom = 0;
        xdpf->metasize = metasize;
+       xdpf->frame_sz = PAGE_SIZE;
        xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
 
        xsk_buff_free(xdp);
index e8eaa80..d6200ff 100644 (file)
 #define DSA_HLEN       4
 #define EDSA_HLEN      8
 
+#define FRAME_TYPE_TO_CPU      0x00
+#define FRAME_TYPE_FORWARD     0x03
+
+#define TO_CPU_CODE_MGMT_TRAP          0x00
+#define TO_CPU_CODE_FRAME2REG          0x01
+#define TO_CPU_CODE_IGMP_MLD_TRAP      0x02
+#define TO_CPU_CODE_POLICY_TRAP                0x03
+#define TO_CPU_CODE_ARP_MIRROR         0x04
+#define TO_CPU_CODE_POLICY_MIRROR      0x05
+
 static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct dsa_port *dp = dsa_slave_to_port(dev);
@@ -77,6 +87,8 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
                                struct packet_type *pt)
 {
        u8 *edsa_header;
+       int frame_type;
+       int code;
        int source_device;
        int source_port;
 
@@ -91,8 +103,29 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
        /*
         * Check that frame type is either TO_CPU or FORWARD.
         */
-       if ((edsa_header[0] & 0xc0) != 0x00 && (edsa_header[0] & 0xc0) != 0xc0)
+       frame_type = edsa_header[0] >> 6;
+
+       switch (frame_type) {
+       case FRAME_TYPE_TO_CPU:
+               code = (edsa_header[1] & 0x6) | ((edsa_header[2] >> 4) & 1);
+
+               /*
+                * Mark the frame to never egress on any port of the same switch
+                * unless it's a trapped IGMP/MLD packet, in which case the
+                * bridge might want to forward it.
+                */
+               if (code != TO_CPU_CODE_IGMP_MLD_TRAP)
+                       skb->offload_fwd_mark = 1;
+
+               break;
+
+       case FRAME_TYPE_FORWARD:
+               skb->offload_fwd_mark = 1;
+               break;
+
+       default:
                return NULL;
+       }
 
        /*
         * Determine source device and port.
@@ -156,8 +189,6 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
                        2 * ETH_ALEN);
        }
 
-       skb->offload_fwd_mark = 1;
-
        return skb;
 }
 
index 7b7a045..7194956 100644 (file)
@@ -234,6 +234,14 @@ static int ethnl_act_cable_test_tdr_cfg(const struct nlattr *nest,
        struct nlattr *tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX + 1];
        int ret;
 
+       cfg->first = 100;
+       cfg->step = 100;
+       cfg->last = MAX_CABLE_LENGTH_CM;
+       cfg->pair = PHY_PAIR_ALL;
+
+       if (!nest)
+               return 0;
+
        ret = nla_parse_nested(tb, ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX, nest,
                               cable_test_tdr_act_cfg_policy, info->extack);
        if (ret < 0)
@@ -242,17 +250,12 @@ static int ethnl_act_cable_test_tdr_cfg(const struct nlattr *nest,
        if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST])
                cfg->first = nla_get_u32(
                        tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST]);
-       else
-               cfg->first = 100;
+
        if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST])
                cfg->last = nla_get_u32(tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST]);
-       else
-               cfg->last = MAX_CABLE_LENGTH_CM;
 
        if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP])
                cfg->step = nla_get_u32(tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP]);
-       else
-               cfg->step = 100;
 
        if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR]) {
                cfg->pair = nla_get_u8(tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR]);
@@ -263,8 +266,6 @@ static int ethnl_act_cable_test_tdr_cfg(const struct nlattr *nest,
                                "invalid pair parameter");
                        return -EINVAL;
                }
-       } else {
-               cfg->pair = PHY_PAIR_ALL;
        }
 
        if (cfg->first > MAX_CABLE_LENGTH_CM) {
index 423e640..aaecfc9 100644 (file)
@@ -40,9 +40,11 @@ const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = {
        [NETIF_F_GSO_UDP_TUNNEL_BIT] =   "tx-udp_tnl-segmentation",
        [NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation",
        [NETIF_F_GSO_PARTIAL_BIT] =      "tx-gso-partial",
+       [NETIF_F_GSO_TUNNEL_REMCSUM_BIT] = "tx-tunnel-remcsum-segmentation",
        [NETIF_F_GSO_SCTP_BIT] =         "tx-sctp-segmentation",
        [NETIF_F_GSO_ESP_BIT] =          "tx-esp-segmentation",
        [NETIF_F_GSO_UDP_L4_BIT] =       "tx-udp-segmentation",
+       [NETIF_F_GSO_FRAGLIST_BIT] =     "tx-gso-list",
 
        [NETIF_F_FCOE_CRC_BIT] =         "tx-checksum-fcoe-crc",
        [NETIF_F_SCTP_CRC_BIT] =        "tx-checksum-sctp",
index b5df90c..21d5fc0 100644 (file)
@@ -2978,7 +2978,7 @@ ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input)
                               sizeof(match->mask.ipv6.dst));
                }
                if (memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr)) ||
-                   memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr))) {
+                   memcmp(v6_m_spec->ip6dst, &zero_addr, sizeof(zero_addr))) {
                        match->dissector.used_keys |=
                                BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
                        match->dissector.offset[FLOW_DISSECTOR_KEY_IPV6_ADDRS] =
index 7f47ba8..afe5ac8 100644 (file)
@@ -78,19 +78,18 @@ static int linkstate_prepare_data(const struct ethnl_req_info *req_base,
 
        ret = linkstate_get_sqi(dev);
        if (ret < 0 && ret != -EOPNOTSUPP)
-               return ret;
-
+               goto out;
        data->sqi = ret;
 
        ret = linkstate_get_sqi_max(dev);
        if (ret < 0 && ret != -EOPNOTSUPP)
-               return ret;
-
+               goto out;
        data->sqi_max = ret;
 
+       ret = 0;
+out:
        ethnl_ops_complete(dev);
-
-       return 0;
+       return ret;
 }
 
 static int linkstate_reply_size(const struct ethnl_req_info *req_base,
index 88fd07f..dd8a1c1 100644 (file)
@@ -376,10 +376,17 @@ err_dev:
 }
 
 static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev,
-                                 const struct ethnl_dump_ctx *ctx)
+                                 const struct ethnl_dump_ctx *ctx,
+                                 struct netlink_callback *cb)
 {
+       void *ehdr;
        int ret;
 
+       ehdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+                          &ethtool_genl_family, 0, ctx->ops->reply_cmd);
+       if (!ehdr)
+               return -EMSGSIZE;
+
        ethnl_init_reply_data(ctx->reply_data, ctx->ops, dev);
        rtnl_lock();
        ret = ctx->ops->prepare_data(ctx->req_info, ctx->reply_data, NULL);
@@ -395,6 +402,10 @@ out:
        if (ctx->ops->cleanup_data)
                ctx->ops->cleanup_data(ctx->reply_data);
        ctx->reply_data->dev = NULL;
+       if (ret < 0)
+               genlmsg_cancel(skb, ehdr);
+       else
+               genlmsg_end(skb, ehdr);
        return ret;
 }
 
@@ -411,7 +422,6 @@ static int ethnl_default_dumpit(struct sk_buff *skb,
        int s_idx = ctx->pos_idx;
        int h, idx = 0;
        int ret = 0;
-       void *ehdr;
 
        rtnl_lock();
        for (h = ctx->pos_hash; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
@@ -431,26 +441,15 @@ restart_chain:
                        dev_hold(dev);
                        rtnl_unlock();
 
-                       ehdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
-                                          cb->nlh->nlmsg_seq,
-                                          &ethtool_genl_family, 0,
-                                          ctx->ops->reply_cmd);
-                       if (!ehdr) {
-                               dev_put(dev);
-                               ret = -EMSGSIZE;
-                               goto out;
-                       }
-                       ret = ethnl_default_dump_one(skb, dev, ctx);
+                       ret = ethnl_default_dump_one(skb, dev, ctx, cb);
                        dev_put(dev);
                        if (ret < 0) {
-                               genlmsg_cancel(skb, ehdr);
                                if (ret == -EOPNOTSUPP)
                                        goto lock_and_cont;
                                if (likely(skb->len))
                                        ret = skb->len;
                                goto out;
                        }
-                       genlmsg_end(skb, ehdr);
 lock_and_cont:
                        rtnl_lock();
                        if (net->dev_base_seq != seq) {
index cd99f54..a6f4e9f 100644 (file)
@@ -339,7 +339,7 @@ static void hsr_announce(struct timer_list *t)
        rcu_read_unlock();
 }
 
-static void hsr_del_ports(struct hsr_priv *hsr)
+void hsr_del_ports(struct hsr_priv *hsr)
 {
        struct hsr_port *port;
 
@@ -356,31 +356,12 @@ static void hsr_del_ports(struct hsr_priv *hsr)
                hsr_del_port(port);
 }
 
-/* This has to be called after all the readers are gone.
- * Otherwise we would have to check the return value of
- * hsr_port_get_hsr().
- */
-static void hsr_dev_destroy(struct net_device *hsr_dev)
-{
-       struct hsr_priv *hsr = netdev_priv(hsr_dev);
-
-       hsr_debugfs_term(hsr);
-       hsr_del_ports(hsr);
-
-       del_timer_sync(&hsr->prune_timer);
-       del_timer_sync(&hsr->announce_timer);
-
-       hsr_del_self_node(hsr);
-       hsr_del_nodes(&hsr->node_db);
-}
-
 static const struct net_device_ops hsr_device_ops = {
        .ndo_change_mtu = hsr_dev_change_mtu,
        .ndo_open = hsr_dev_open,
        .ndo_stop = hsr_dev_close,
        .ndo_start_xmit = hsr_dev_xmit,
        .ndo_fix_features = hsr_fix_features,
-       .ndo_uninit = hsr_dev_destroy,
 };
 
 static struct device_type hsr_type = {
@@ -434,6 +415,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
                     unsigned char multicast_spec, u8 protocol_version,
                     struct netlink_ext_ack *extack)
 {
+       bool unregister = false;
        struct hsr_priv *hsr;
        int res;
 
@@ -485,25 +467,27 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
        if (res)
                goto err_unregister;
 
+       unregister = true;
+
        res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A, extack);
        if (res)
-               goto err_add_slaves;
+               goto err_unregister;
 
        res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B, extack);
        if (res)
-               goto err_add_slaves;
+               goto err_unregister;
 
        hsr_debugfs_init(hsr, hsr_dev);
        mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD));
 
        return 0;
 
-err_add_slaves:
-       unregister_netdevice(hsr_dev);
 err_unregister:
        hsr_del_ports(hsr);
 err_add_master:
        hsr_del_self_node(hsr);
 
+       if (unregister)
+               unregister_netdevice(hsr_dev);
        return res;
 }
index a099d7d..b8f9262 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/netdevice.h>
 #include "hsr_main.h"
 
+void hsr_del_ports(struct hsr_priv *hsr);
 void hsr_dev_setup(struct net_device *dev);
 int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
                     unsigned char multicast_spec, u8 protocol_version,
@@ -18,5 +19,4 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
 void hsr_check_carrier_and_operstate(struct hsr_priv *hsr);
 bool is_hsr_master(struct net_device *dev);
 int hsr_get_max_mtu(struct hsr_priv *hsr);
-
 #endif /* __HSR_DEVICE_H */
index e2564de..144da15 100644 (file)
@@ -6,6 +6,7 @@
  */
 
 #include <linux/netdevice.h>
+#include <net/rtnetlink.h>
 #include <linux/rculist.h>
 #include <linux/timer.h>
 #include <linux/etherdevice.h>
@@ -100,8 +101,10 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
                        master = hsr_port_get_hsr(port->hsr, HSR_PT_MASTER);
                        hsr_del_port(port);
                        if (hsr_slave_empty(master->hsr)) {
-                               unregister_netdevice_queue(master->dev,
-                                                          &list_kill);
+                               const struct rtnl_link_ops *ops;
+
+                               ops = master->dev->rtnl_link_ops;
+                               ops->dellink(master->dev, &list_kill);
                                unregister_netdevice_many(&list_kill);
                        }
                }
@@ -144,9 +147,9 @@ static int __init hsr_init(void)
 
 static void __exit hsr_exit(void)
 {
-       unregister_netdevice_notifier(&hsr_nb);
        hsr_netlink_exit();
        hsr_debugfs_remove_root();
+       unregister_netdevice_notifier(&hsr_nb);
 }
 
 module_init(hsr_init);
index 1decb25..6e14b7d 100644 (file)
@@ -83,6 +83,22 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
        return hsr_dev_finalize(dev, link, multicast_spec, hsr_version, extack);
 }
 
+static void hsr_dellink(struct net_device *dev, struct list_head *head)
+{
+       struct hsr_priv *hsr = netdev_priv(dev);
+
+       del_timer_sync(&hsr->prune_timer);
+       del_timer_sync(&hsr->announce_timer);
+
+       hsr_debugfs_term(hsr);
+       hsr_del_ports(hsr);
+
+       hsr_del_self_node(hsr);
+       hsr_del_nodes(&hsr->node_db);
+
+       unregister_netdevice_queue(dev, head);
+}
+
 static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
 {
        struct hsr_priv *hsr = netdev_priv(dev);
@@ -118,6 +134,7 @@ static struct rtnl_link_ops hsr_link_ops __read_mostly = {
        .priv_size      = sizeof(struct hsr_priv),
        .setup          = hsr_dev_setup,
        .newlink        = hsr_newlink,
+       .dellink        = hsr_dellink,
        .fill_info      = hsr_fill_info,
 };
 
index 6ecbb0c..e64e59b 100644 (file)
@@ -340,29 +340,31 @@ config NET_FOU_IP_TUNNELS
 
 config INET_AH
        tristate "IP: AH transformation"
-       select XFRM_ALGO
-       select CRYPTO
-       select CRYPTO_HMAC
-       select CRYPTO_MD5
-       select CRYPTO_SHA1
+       select XFRM_AH
        help
-         Support for IPsec AH.
+         Support for IPsec AH (Authentication Header).
+
+         AH can be used with various authentication algorithms.  Besides
+         enabling AH support itself, this option enables the generic
+         implementations of the algorithms that RFC 8221 lists as MUST be
+         implemented.  If you need any other algorithms, you'll need to enable
+         them in the crypto API.  You should also enable accelerated
+         implementations of any needed algorithms when available.
 
          If unsure, say Y.
 
 config INET_ESP
        tristate "IP: ESP transformation"
-       select XFRM_ALGO
-       select CRYPTO
-       select CRYPTO_AUTHENC
-       select CRYPTO_HMAC
-       select CRYPTO_MD5
-       select CRYPTO_CBC
-       select CRYPTO_SHA1
-       select CRYPTO_DES
-       select CRYPTO_ECHAINIV
+       select XFRM_ESP
        help
-         Support for IPsec ESP.
+         Support for IPsec ESP (Encapsulating Security Payload).
+
+         ESP can be used with various encryption and authentication algorithms.
+         Besides enabling ESP support itself, this option enables the generic
+         implementations of the algorithms that RFC 8221 lists as MUST be
+         implemented.  If you need any other algorithms, you'll need to enable
+         them in the crypto API.  You should also enable accelerated
+         implementations of any needed algorithms when available.
 
          If unsure, say Y.
 
index d14133e..5bda5ae 100644 (file)
@@ -361,3 +361,4 @@ module_exit(esp4_offload_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
+MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");
index e53871e..1f75dc6 100644 (file)
@@ -1109,7 +1109,7 @@ static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table,
                if (fl4.flowi4_scope < RT_SCOPE_LINK)
                        fl4.flowi4_scope = RT_SCOPE_LINK;
 
-               if (table)
+               if (table && table != RT_TABLE_MAIN)
                        tbl = fib_get_table(net, table);
 
                if (tbl)
index dcc79ff..abd0834 100644 (file)
@@ -1304,3 +1304,4 @@ module_init(fou_init);
 module_exit(fou_fini);
 MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Foo over UDP");
index 956a806..e30515f 100644 (file)
@@ -427,7 +427,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
 
        ipcm_init(&ipc);
        inet->tos = ip_hdr(skb)->tos;
-       sk->sk_mark = mark;
+       ipc.sockc.mark = mark;
        daddr = ipc.addr = ip_hdr(skb)->saddr;
        saddr = fib_compute_spec_dst(skb);
 
@@ -710,10 +710,10 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
        icmp_param.skb    = skb_in;
        icmp_param.offset = skb_network_offset(skb_in);
        inet_sk(sk)->tos = tos;
-       sk->sk_mark = mark;
        ipcm_init(&ipc);
        ipc.addr = iph->saddr;
        ipc.opt = &icmp_param.replyopts.opt;
+       ipc.sockc.mark = mark;
 
        rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark,
                               type, code, &icmp_param);
index 090d309..1720667 100644 (file)
@@ -1702,7 +1702,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
        sk->sk_protocol = ip_hdr(skb)->protocol;
        sk->sk_bound_dev_if = arg->bound_dev_if;
        sk->sk_sndbuf = sysctl_wmem_default;
-       sk->sk_mark = fl4.flowi4_mark;
+       ipc.sockc.mark = fl4.flowi4_mark;
        err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
                             len, 0, &ipc, &rt, MSG_DONTWAIT);
        if (unlikely(err)) {
index f4f1d11..0c1f364 100644 (file)
@@ -85,9 +85,10 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
                                   __be32 remote, __be32 local,
                                   __be32 key)
 {
-       unsigned int hash;
        struct ip_tunnel *t, *cand = NULL;
        struct hlist_head *head;
+       struct net_device *ndev;
+       unsigned int hash;
 
        hash = ip_tunnel_hash(key, remote);
        head = &itn->tunnels[hash];
@@ -162,8 +163,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
        if (t && t->dev->flags & IFF_UP)
                return t;
 
-       if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
-               return netdev_priv(itn->fb_tunnel_dev);
+       ndev = READ_ONCE(itn->fb_tunnel_dev);
+       if (ndev && ndev->flags & IFF_UP)
+               return netdev_priv(ndev);
 
        return NULL;
 }
@@ -1259,9 +1261,9 @@ void ip_tunnel_uninit(struct net_device *dev)
        struct ip_tunnel_net *itn;
 
        itn = net_generic(net, tunnel->ip_tnl_net_id);
-       /* fb_tunnel_dev will be unregisted in net-exit call. */
-       if (itn->fb_tunnel_dev != dev)
-               ip_tunnel_del(itn, netdev_priv(dev));
+       ip_tunnel_del(itn, netdev_priv(dev));
+       if (itn->fb_tunnel_dev == dev)
+               WRITE_ONCE(itn->fb_tunnel_dev, NULL);
 
        dst_cache_reset(&tunnel->dst_cache);
 }
index 181b7a2..f8b419e 100644 (file)
@@ -844,3 +844,21 @@ void ip_tunnel_unneed_metadata(void)
        static_branch_dec(&ip_tunnel_metadata_cnt);
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata);
+
+/* Returns either the correct skb->protocol value, or 0 if invalid. */
+__be16 ip_tunnel_parse_protocol(const struct sk_buff *skb)
+{
+       if (skb_network_header(skb) >= skb->head &&
+           (skb_network_header(skb) + sizeof(struct iphdr)) <= skb_tail_pointer(skb) &&
+           ip_hdr(skb)->version == 4)
+               return htons(ETH_P_IP);
+       if (skb_network_header(skb) >= skb->head &&
+           (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= skb_tail_pointer(skb) &&
+           ipv6_hdr(skb)->version == 6)
+               return htons(ETH_P_IPV6);
+       return 0;
+}
+EXPORT_SYMBOL(ip_tunnel_parse_protocol);
+
+const struct header_ops ip_tunnel_header_ops = { .parse_protocol = ip_tunnel_parse_protocol };
+EXPORT_SYMBOL(ip_tunnel_header_ops);
index 1d9c8cf..460ca10 100644 (file)
@@ -441,6 +441,7 @@ static const struct net_device_ops vti_netdev_ops = {
 static void vti_tunnel_setup(struct net_device *dev)
 {
        dev->netdev_ops         = &vti_netdev_ops;
+       dev->header_ops         = &ip_tunnel_header_ops;
        dev->type               = ARPHRD_TUNNEL;
        ip_tunnel_setup(dev, vti_net_id);
 }
index 40fea52..75d35e7 100644 (file)
@@ -361,6 +361,7 @@ static const struct net_device_ops ipip_netdev_ops = {
 static void ipip_tunnel_setup(struct net_device *dev)
 {
        dev->netdev_ops         = &ipip_netdev_ops;
+       dev->header_ops         = &ip_tunnel_header_ops;
 
        dev->type               = ARPHRD_TUNNEL;
        dev->flags              = IFF_NOARP;
index c2670ea..5bf9fa0 100644 (file)
@@ -1797,11 +1797,22 @@ out_free:
        return ret;
 }
 
+void ipt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
+                                  const struct nf_hook_ops *ops)
+{
+       nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+}
+
+void ipt_unregister_table_exit(struct net *net, struct xt_table *table)
+{
+       __ipt_unregister_table(net, table);
+}
+
 void ipt_unregister_table(struct net *net, struct xt_table *table,
                          const struct nf_hook_ops *ops)
 {
        if (ops)
-               nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+               ipt_unregister_table_pre_exit(net, table, ops);
        __ipt_unregister_table(net, table);
 }
 
@@ -1958,6 +1969,8 @@ static void __exit ip_tables_fini(void)
 
 EXPORT_SYMBOL(ipt_register_table);
 EXPORT_SYMBOL(ipt_unregister_table);
+EXPORT_SYMBOL(ipt_unregister_table_pre_exit);
+EXPORT_SYMBOL(ipt_unregister_table_exit);
 EXPORT_SYMBOL(ipt_do_table);
 module_init(ip_tables_init);
 module_exit(ip_tables_fini);
index 748dc3c..f2984c7 100644 (file)
@@ -118,3 +118,4 @@ module_exit(synproxy_tg4_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("Intercept TCP connections and establish them using syncookies");
index 9d54b40..8f7bc1e 100644 (file)
@@ -72,16 +72,24 @@ static int __net_init iptable_filter_net_init(struct net *net)
        return 0;
 }
 
+static void __net_exit iptable_filter_net_pre_exit(struct net *net)
+{
+       if (net->ipv4.iptable_filter)
+               ipt_unregister_table_pre_exit(net, net->ipv4.iptable_filter,
+                                             filter_ops);
+}
+
 static void __net_exit iptable_filter_net_exit(struct net *net)
 {
        if (!net->ipv4.iptable_filter)
                return;
-       ipt_unregister_table(net, net->ipv4.iptable_filter, filter_ops);
+       ipt_unregister_table_exit(net, net->ipv4.iptable_filter);
        net->ipv4.iptable_filter = NULL;
 }
 
 static struct pernet_operations iptable_filter_net_ops = {
        .init = iptable_filter_net_init,
+       .pre_exit = iptable_filter_net_pre_exit,
        .exit = iptable_filter_net_exit,
 };
 
index bb9266e..f703a71 100644 (file)
@@ -100,15 +100,23 @@ static int __net_init iptable_mangle_table_init(struct net *net)
        return ret;
 }
 
+static void __net_exit iptable_mangle_net_pre_exit(struct net *net)
+{
+       if (net->ipv4.iptable_mangle)
+               ipt_unregister_table_pre_exit(net, net->ipv4.iptable_mangle,
+                                             mangle_ops);
+}
+
 static void __net_exit iptable_mangle_net_exit(struct net *net)
 {
        if (!net->ipv4.iptable_mangle)
                return;
-       ipt_unregister_table(net, net->ipv4.iptable_mangle, mangle_ops);
+       ipt_unregister_table_exit(net, net->ipv4.iptable_mangle);
        net->ipv4.iptable_mangle = NULL;
 }
 
 static struct pernet_operations iptable_mangle_net_ops = {
+       .pre_exit = iptable_mangle_net_pre_exit,
        .exit = iptable_mangle_net_exit,
 };
 
index ad33687..b0143b1 100644 (file)
@@ -113,16 +113,22 @@ static int __net_init iptable_nat_table_init(struct net *net)
        return ret;
 }
 
+static void __net_exit iptable_nat_net_pre_exit(struct net *net)
+{
+       if (net->ipv4.nat_table)
+               ipt_nat_unregister_lookups(net);
+}
+
 static void __net_exit iptable_nat_net_exit(struct net *net)
 {
        if (!net->ipv4.nat_table)
                return;
-       ipt_nat_unregister_lookups(net);
-       ipt_unregister_table(net, net->ipv4.nat_table, NULL);
+       ipt_unregister_table_exit(net, net->ipv4.nat_table);
        net->ipv4.nat_table = NULL;
 }
 
 static struct pernet_operations iptable_nat_net_ops = {
+       .pre_exit = iptable_nat_net_pre_exit,
        .exit   = iptable_nat_net_exit,
 };
 
index 69697eb..9abfe6b 100644 (file)
@@ -67,15 +67,23 @@ static int __net_init iptable_raw_table_init(struct net *net)
        return ret;
 }
 
+static void __net_exit iptable_raw_net_pre_exit(struct net *net)
+{
+       if (net->ipv4.iptable_raw)
+               ipt_unregister_table_pre_exit(net, net->ipv4.iptable_raw,
+                                             rawtable_ops);
+}
+
 static void __net_exit iptable_raw_net_exit(struct net *net)
 {
        if (!net->ipv4.iptable_raw)
                return;
-       ipt_unregister_table(net, net->ipv4.iptable_raw, rawtable_ops);
+       ipt_unregister_table_exit(net, net->ipv4.iptable_raw);
        net->ipv4.iptable_raw = NULL;
 }
 
 static struct pernet_operations iptable_raw_net_ops = {
+       .pre_exit = iptable_raw_net_pre_exit,
        .exit = iptable_raw_net_exit,
 };
 
index ac633c1..415c197 100644 (file)
@@ -62,16 +62,23 @@ static int __net_init iptable_security_table_init(struct net *net)
        return ret;
 }
 
+static void __net_exit iptable_security_net_pre_exit(struct net *net)
+{
+       if (net->ipv4.iptable_security)
+               ipt_unregister_table_pre_exit(net, net->ipv4.iptable_security,
+                                             sectbl_ops);
+}
+
 static void __net_exit iptable_security_net_exit(struct net *net)
 {
        if (!net->ipv4.iptable_security)
                return;
-
-       ipt_unregister_table(net, net->ipv4.iptable_security, sectbl_ops);
+       ipt_unregister_table_exit(net, net->ipv4.iptable_security);
        net->ipv4.iptable_security = NULL;
 }
 
 static struct pernet_operations iptable_security_net_ops = {
+       .pre_exit = iptable_security_net_pre_exit,
        .exit = iptable_security_net_exit,
 };
 
index e32e41b..aba65fe 100644 (file)
@@ -34,3 +34,4 @@ module_exit(nf_flow_ipv4_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NF_FLOWTABLE(AF_INET);
+MODULE_DESCRIPTION("Netfilter flow table support");
index abf89b9..bcdb37f 100644 (file)
@@ -107,3 +107,4 @@ module_exit(nft_dup_ipv4_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "dup");
+MODULE_DESCRIPTION("IPv4 nftables packet duplication support");
index ce29411..03df986 100644 (file)
@@ -210,3 +210,4 @@ module_exit(nft_fib4_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
 MODULE_ALIAS_NFT_AF_EXPR(2, "fib");
+MODULE_DESCRIPTION("nftables fib / ip route lookup support");
index 7e6fd5c..e408f81 100644 (file)
@@ -71,3 +71,4 @@ module_exit(nft_reject_ipv4_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "reject");
+MODULE_DESCRIPTION("IPv4 packet rejection for nftables");
index 5354272..df6fbef 100644 (file)
@@ -786,6 +786,9 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                           inet_sk_flowi_flags(sk), faddr, saddr, 0, 0,
                           sk->sk_uid);
 
+       fl4.fl4_icmp_type = user_icmph.type;
+       fl4.fl4_icmp_code = user_icmph.code;
+
        security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
        rt = ip_route_output_flow(net, &fl4, sk);
        if (IS_ERR(rt)) {
index 1d7076b..a01efa0 100644 (file)
@@ -2027,7 +2027,7 @@ int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
                      const struct sk_buff *hint)
 {
        struct in_device *in_dev = __in_dev_get_rcu(dev);
-       struct rtable *rt = (struct rtable *)hint;
+       struct rtable *rt = skb_rtable(hint);
        struct net *net = dev_net(dev);
        int err = -EINVAL;
        u32 tag = 0;
index 810cc16..6f0caf9 100644 (file)
@@ -2691,6 +2691,9 @@ int tcp_disconnect(struct sock *sk, int flags)
        tp->window_clamp = 0;
        tp->delivered = 0;
        tp->delivered_ce = 0;
+       if (icsk->icsk_ca_ops->release)
+               icsk->icsk_ca_ops->release(sk);
+       memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
        tcp_set_ca_state(sk, TCP_CA_Open);
        tp->is_sack_reneg = 0;
        tcp_clear_retrans(tp);
@@ -3246,10 +3249,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
 #ifdef CONFIG_TCP_MD5SIG
        case TCP_MD5SIG:
        case TCP_MD5SIG_EXT:
-               if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
-                       err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
-               else
-                       err = -EINVAL;
+               err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
                break;
 #endif
        case TCP_USER_TIMEOUT:
@@ -4033,11 +4033,14 @@ EXPORT_SYMBOL(tcp_md5_hash_skb_data);
 
 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
 {
+       u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */
        struct scatterlist sg;
 
-       sg_init_one(&sg, key->key, key->keylen);
-       ahash_request_set_crypt(hp->md5_req, &sg, NULL, key->keylen);
-       return crypto_ahash_update(hp->md5_req);
+       sg_init_one(&sg, key->key, keylen);
+       ahash_request_set_crypt(hp->md5_req, &sg, NULL, keylen);
+
+       /* We use data_race() because tcp_md5_do_add() might change key->key under us */
+       return data_race(crypto_ahash_update(hp->md5_req));
 }
 EXPORT_SYMBOL(tcp_md5_hash_key);
 
index 3172e31..62878cf 100644 (file)
@@ -197,7 +197,7 @@ static void tcp_reinit_congestion_control(struct sock *sk,
        icsk->icsk_ca_setsockopt = 1;
        memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
 
-       if (sk->sk_state != TCP_CLOSE)
+       if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
                tcp_init_congestion_control(sk);
 }
 
index 8f8eefd..c7bf5b2 100644 (file)
@@ -432,10 +432,9 @@ static void hystart_update(struct sock *sk, u32 delay)
 
        if (hystart_detect & HYSTART_DELAY) {
                /* obtain the minimum delay of more than sampling packets */
+               if (ca->curr_rtt > delay)
+                       ca->curr_rtt = delay;
                if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
-                       if (ca->curr_rtt > delay)
-                               ca->curr_rtt = delay;
-
                        ca->sample_cnt++;
                } else {
                        if (ca->curr_rtt > ca->delay_min +
index 12fda8f..9615e72 100644 (file)
@@ -261,7 +261,8 @@ static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)
                 * cwnd may be very low (even just 1 packet), so we should ACK
                 * immediately.
                 */
-               inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
+               if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq)
+                       inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
        }
 }
 
@@ -3665,6 +3666,15 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
                tcp_in_ack_event(sk, ack_ev_flags);
        }
 
+       /* This is a deviation from RFC3168 since it states that:
+        * "When the TCP data sender is ready to set the CWR bit after reducing
+        * the congestion window, it SHOULD set the CWR bit only on the first
+        * new data packet that it transmits."
+        * We accept CWR on pure ACKs to be more robust
+        * with widely-deployed TCP implementations that do this.
+        */
+       tcp_ecn_accept_cwr(sk, skb);
+
        /* We passed data and got it acked, remove any soft error
         * log. Something worked...
         */
@@ -4572,6 +4582,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
 
        if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
+               sk->sk_data_ready(sk);
                tcp_drop(sk, skb);
                return;
        }
@@ -4800,8 +4811,6 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
        skb_dst_drop(skb);
        __skb_pull(skb, tcp_hdr(skb)->doff * 4);
 
-       tcp_ecn_accept_cwr(sk, skb);
-
        tp->rx_opt.dsack = 0;
 
        /*  Queue data for delivery to the user.
@@ -4820,6 +4829,7 @@ queue_and_out:
                        sk_forced_mem_schedule(sk, skb->truesize);
                else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
                        NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
+                       sk->sk_data_ready(sk);
                        goto drop;
                }
 
index ad6435b..04bfcbb 100644 (file)
@@ -1111,9 +1111,21 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
 
        key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
        if (key) {
-               /* Pre-existing entry - just update that one. */
-               memcpy(key->key, newkey, newkeylen);
-               key->keylen = newkeylen;
+               /* Pre-existing entry - just update that one.
+                * Note that the key might be used concurrently.
+                * data_race() is telling kcsan that we do not care of
+                * key mismatches, since changing MD5 key on live flows
+                * can lead to packet drops.
+                */
+               data_race(memcpy(key->key, newkey, newkeylen));
+
+               /* Pairs with READ_ONCE() in tcp_md5_hash_key().
+                * Also note that a reader could catch new key->keylen value
+                * but old key->key[], this is the reason we use __GFP_ZERO
+                * at sock_kmalloc() time below these lines.
+                */
+               WRITE_ONCE(key->keylen, newkeylen);
+
                return 0;
        }
 
@@ -1129,7 +1141,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
                rcu_assign_pointer(tp->md5sig_info, md5sig);
        }
 
-       key = sock_kmalloc(sk, sizeof(*key), gfp);
+       key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO);
        if (!key)
                return -ENOMEM;
        if (!tcp_alloc_md5sig_pool()) {
index a50e199..5f5b2f0 100644 (file)
@@ -700,7 +700,8 @@ static unsigned int tcp_synack_options(const struct sock *sk,
                                       unsigned int mss, struct sk_buff *skb,
                                       struct tcp_out_options *opts,
                                       const struct tcp_md5sig_key *md5,
-                                      struct tcp_fastopen_cookie *foc)
+                                      struct tcp_fastopen_cookie *foc,
+                                      enum tcp_synack_type synack_type)
 {
        struct inet_request_sock *ireq = inet_rsk(req);
        unsigned int remaining = MAX_TCP_OPTION_SPACE;
@@ -715,7 +716,8 @@ static unsigned int tcp_synack_options(const struct sock *sk,
                 * rather than TS in order to fit in better with old,
                 * buggy kernels, but that was deemed to be unnecessary.
                 */
-               ireq->tstamp_ok &= !ireq->sack_ok;
+               if (synack_type != TCP_SYNACK_COOKIE)
+                       ireq->tstamp_ok &= !ireq->sack_ok;
        }
 #endif
 
@@ -3394,7 +3396,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
 #endif
        skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
        tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
-                                            foc) + sizeof(*th);
+                                            foc, synack_type) + sizeof(*th);
 
        skb_push(skb, tcp_header_size);
        skb_reset_transport_header(skb);
index 992cf45..f4f19e8 100644 (file)
@@ -49,29 +49,31 @@ config IPV6_OPTIMISTIC_DAD
 
 config INET6_AH
        tristate "IPv6: AH transformation"
-       select XFRM_ALGO
-       select CRYPTO
-       select CRYPTO_HMAC
-       select CRYPTO_MD5
-       select CRYPTO_SHA1
+       select XFRM_AH
        help
-         Support for IPsec AH.
+         Support for IPsec AH (Authentication Header).
+
+         AH can be used with various authentication algorithms.  Besides
+         enabling AH support itself, this option enables the generic
+         implementations of the algorithms that RFC 8221 lists as MUST be
+         implemented.  If you need any other algorithms, you'll need to enable
+         them in the crypto API.  You should also enable accelerated
+         implementations of any needed algorithms when available.
 
          If unsure, say Y.
 
 config INET6_ESP
        tristate "IPv6: ESP transformation"
-       select XFRM_ALGO
-       select CRYPTO
-       select CRYPTO_AUTHENC
-       select CRYPTO_HMAC
-       select CRYPTO_MD5
-       select CRYPTO_CBC
-       select CRYPTO_SHA1
-       select CRYPTO_DES
-       select CRYPTO_ECHAINIV
+       select XFRM_ESP
        help
-         Support for IPsec ESP.
+         Support for IPsec ESP (Encapsulating Security Payload).
+
+         ESP can be used with various encryption and authentication algorithms.
+         Besides enabling ESP support itself, this option enables the generic
+         implementations of the algorithms that RFC 8221 lists as MUST be
+         implemented.  If you need any other algorithms, you'll need to enable
+         them in the crypto API.  You should also enable accelerated
+         implementations of any needed algorithms when available.
 
          If unsure, say Y.
 
index 55addea..1ca516f 100644 (file)
@@ -395,3 +395,4 @@ module_exit(esp6_offload_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET6, XFRM_PROTO_ESP);
+MODULE_DESCRIPTION("IPV6 GSO/GRO offload support");
index 091f941..430518a 100644 (file)
@@ -224,3 +224,4 @@ module_init(fou6_init);
 module_exit(fou6_fini);
 MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Foo over UDP (IPv6)");
index fc50003..9df8737 100644 (file)
@@ -566,7 +566,6 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
        fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, NULL);
        security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 
-       sk->sk_mark = mark;
        np = inet6_sk(sk);
 
        if (!icmpv6_xrlim_allow(sk, type, &fl6))
@@ -583,6 +582,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
                fl6.flowi6_oif = np->ucast_oif;
 
        ipcm6_init_sk(&ipc6, np);
+       ipc6.sockc.mark = mark;
        fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
 
        dst = icmpv6_route_lookup(net, skb, sk, &fl6);
@@ -751,7 +751,6 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        sk = icmpv6_xmit_lock(net);
        if (!sk)
                goto out_bh_enable;
-       sk->sk_mark = mark;
        np = inet6_sk(sk);
 
        if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
@@ -779,6 +778,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        ipcm6_init_sk(&ipc6, np);
        ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
        ipc6.tclass = ipv6_get_dsfield(ipv6_hdr(skb));
+       ipc6.sockc.mark = mark;
 
        if (ip6_append_data(sk, icmpv6_getfrag, &msg,
                            skb->len + sizeof(struct icmp6hdr),
index 257d2b6..36c58aa 100644 (file)
@@ -120,3 +120,4 @@ module_init(ila_init);
 module_exit(ila_fini);
 MODULE_AUTHOR("Tom Herbert <tom@herbertland.com>");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IPv6: Identifier Locator Addressing (ILA)");
index 781ca8c..6532bde 100644 (file)
@@ -127,6 +127,7 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
                        gre_proto == htons(ETH_P_ERSPAN2)) ?
                       ARPHRD_ETHER : ARPHRD_IP6GRE;
        int score, cand_score = 4;
+       struct net_device *ndev;
 
        for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
                if (!ipv6_addr_equal(local, &t->parms.laddr) ||
@@ -238,9 +239,9 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
        if (t && t->dev->flags & IFF_UP)
                return t;
 
-       dev = ign->fb_tunnel_dev;
-       if (dev && dev->flags & IFF_UP)
-               return netdev_priv(dev);
+       ndev = READ_ONCE(ign->fb_tunnel_dev);
+       if (ndev && ndev->flags & IFF_UP)
+               return netdev_priv(ndev);
 
        return NULL;
 }
@@ -413,6 +414,8 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
 
        ip6gre_tunnel_unlink_md(ign, t);
        ip6gre_tunnel_unlink(ign, t);
+       if (ign->fb_tunnel_dev == dev)
+               WRITE_ONCE(ign->fb_tunnel_dev, NULL);
        dst_cache_reset(&t->dst_cache);
        dev_put(dev);
 }
index 821d96c..a18c378 100644 (file)
@@ -1846,6 +1846,7 @@ static const struct net_device_ops ip6_tnl_netdev_ops = {
 static void ip6_tnl_dev_setup(struct net_device *dev)
 {
        dev->netdev_ops = &ip6_tnl_netdev_ops;
+       dev->header_ops = &ip_tunnel_header_ops;
        dev->needs_free_netdev = true;
        dev->priv_destructor = ip6_dev_free;
 
index 1147f64..0d96416 100644 (file)
@@ -905,6 +905,7 @@ static const struct net_device_ops vti6_netdev_ops = {
 static void vti6_dev_setup(struct net_device *dev)
 {
        dev->netdev_ops = &vti6_netdev_ops;
+       dev->header_ops = &ip_tunnel_header_ops;
        dev->needs_free_netdev = true;
        dev->priv_destructor = vti6_dev_free;
 
index e273934..e96a431 100644 (file)
@@ -1807,11 +1807,22 @@ out_free:
        return ret;
 }
 
+void ip6t_unregister_table_pre_exit(struct net *net, struct xt_table *table,
+                                   const struct nf_hook_ops *ops)
+{
+       nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+}
+
+void ip6t_unregister_table_exit(struct net *net, struct xt_table *table)
+{
+       __ip6t_unregister_table(net, table);
+}
+
 void ip6t_unregister_table(struct net *net, struct xt_table *table,
                           const struct nf_hook_ops *ops)
 {
        if (ops)
-               nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+               ip6t_unregister_table_pre_exit(net, table, ops);
        __ip6t_unregister_table(net, table);
 }
 
@@ -1969,6 +1980,8 @@ static void __exit ip6_tables_fini(void)
 
 EXPORT_SYMBOL(ip6t_register_table);
 EXPORT_SYMBOL(ip6t_unregister_table);
+EXPORT_SYMBOL(ip6t_unregister_table_pre_exit);
+EXPORT_SYMBOL(ip6t_unregister_table_exit);
 EXPORT_SYMBOL(ip6t_do_table);
 
 module_init(ip6_tables_init);
index fd1f52a..d51d0c3 100644 (file)
@@ -121,3 +121,4 @@ module_exit(synproxy_tg6_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("Intercept IPv6 TCP connections and establish them using syncookies");
index 32667f5..88337b5 100644 (file)
@@ -73,16 +73,24 @@ static int __net_init ip6table_filter_net_init(struct net *net)
        return 0;
 }
 
+static void __net_exit ip6table_filter_net_pre_exit(struct net *net)
+{
+       if (net->ipv6.ip6table_filter)
+               ip6t_unregister_table_pre_exit(net, net->ipv6.ip6table_filter,
+                                              filter_ops);
+}
+
 static void __net_exit ip6table_filter_net_exit(struct net *net)
 {
        if (!net->ipv6.ip6table_filter)
                return;
-       ip6t_unregister_table(net, net->ipv6.ip6table_filter, filter_ops);
+       ip6t_unregister_table_exit(net, net->ipv6.ip6table_filter);
        net->ipv6.ip6table_filter = NULL;
 }
 
 static struct pernet_operations ip6table_filter_net_ops = {
        .init = ip6table_filter_net_init,
+       .pre_exit = ip6table_filter_net_pre_exit,
        .exit = ip6table_filter_net_exit,
 };
 
index 070afb9..1a27486 100644 (file)
@@ -93,16 +93,24 @@ static int __net_init ip6table_mangle_table_init(struct net *net)
        return ret;
 }
 
+static void __net_exit ip6table_mangle_net_pre_exit(struct net *net)
+{
+       if (net->ipv6.ip6table_mangle)
+               ip6t_unregister_table_pre_exit(net, net->ipv6.ip6table_mangle,
+                                              mangle_ops);
+}
+
 static void __net_exit ip6table_mangle_net_exit(struct net *net)
 {
        if (!net->ipv6.ip6table_mangle)
                return;
 
-       ip6t_unregister_table(net, net->ipv6.ip6table_mangle, mangle_ops);
+       ip6t_unregister_table_exit(net, net->ipv6.ip6table_mangle);
        net->ipv6.ip6table_mangle = NULL;
 }
 
 static struct pernet_operations ip6table_mangle_net_ops = {
+       .pre_exit = ip6table_mangle_net_pre_exit,
        .exit = ip6table_mangle_net_exit,
 };
 
index 0f48759..0a23265 100644 (file)
@@ -114,16 +114,22 @@ static int __net_init ip6table_nat_table_init(struct net *net)
        return ret;
 }
 
+static void __net_exit ip6table_nat_net_pre_exit(struct net *net)
+{
+       if (net->ipv6.ip6table_nat)
+               ip6t_nat_unregister_lookups(net);
+}
+
 static void __net_exit ip6table_nat_net_exit(struct net *net)
 {
        if (!net->ipv6.ip6table_nat)
                return;
-       ip6t_nat_unregister_lookups(net);
-       ip6t_unregister_table(net, net->ipv6.ip6table_nat, NULL);
+       ip6t_unregister_table_exit(net, net->ipv6.ip6table_nat);
        net->ipv6.ip6table_nat = NULL;
 }
 
 static struct pernet_operations ip6table_nat_net_ops = {
+       .pre_exit = ip6table_nat_net_pre_exit,
        .exit   = ip6table_nat_net_exit,
 };
 
index a22100b..8f9e742 100644 (file)
@@ -66,15 +66,23 @@ static int __net_init ip6table_raw_table_init(struct net *net)
        return ret;
 }
 
+static void __net_exit ip6table_raw_net_pre_exit(struct net *net)
+{
+       if (net->ipv6.ip6table_raw)
+               ip6t_unregister_table_pre_exit(net, net->ipv6.ip6table_raw,
+                                              rawtable_ops);
+}
+
 static void __net_exit ip6table_raw_net_exit(struct net *net)
 {
        if (!net->ipv6.ip6table_raw)
                return;
-       ip6t_unregister_table(net, net->ipv6.ip6table_raw, rawtable_ops);
+       ip6t_unregister_table_exit(net, net->ipv6.ip6table_raw);
        net->ipv6.ip6table_raw = NULL;
 }
 
 static struct pernet_operations ip6table_raw_net_ops = {
+       .pre_exit = ip6table_raw_net_pre_exit,
        .exit = ip6table_raw_net_exit,
 };
 
index a74335f..5e8c48f 100644 (file)
@@ -61,15 +61,23 @@ static int __net_init ip6table_security_table_init(struct net *net)
        return ret;
 }
 
+static void __net_exit ip6table_security_net_pre_exit(struct net *net)
+{
+       if (net->ipv6.ip6table_security)
+               ip6t_unregister_table_pre_exit(net, net->ipv6.ip6table_security,
+                                              sectbl_ops);
+}
+
 static void __net_exit ip6table_security_net_exit(struct net *net)
 {
        if (!net->ipv6.ip6table_security)
                return;
-       ip6t_unregister_table(net, net->ipv6.ip6table_security, sectbl_ops);
+       ip6t_unregister_table_exit(net, net->ipv6.ip6table_security);
        net->ipv6.ip6table_security = NULL;
 }
 
 static struct pernet_operations ip6table_security_net_ops = {
+       .pre_exit = ip6table_security_net_pre_exit,
        .exit = ip6table_security_net_exit,
 };
 
index a8566ee..667b8af 100644 (file)
@@ -35,3 +35,4 @@ module_exit(nf_flow_ipv6_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NF_FLOWTABLE(AF_INET6);
+MODULE_DESCRIPTION("Netfilter flow table IPv6 module");
index 2af3220..8b5193e 100644 (file)
@@ -105,3 +105,4 @@ module_exit(nft_dup_ipv6_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "dup");
+MODULE_DESCRIPTION("IPv6 nftables packet duplication support");
index 7ece86a..e204163 100644 (file)
@@ -255,3 +255,4 @@ module_exit(nft_fib6_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
 MODULE_ALIAS_NFT_AF_EXPR(10, "fib");
+MODULE_DESCRIPTION("nftables fib / ipv6 route lookup support");
index 680a28c..c1098a1 100644 (file)
@@ -72,3 +72,4 @@ module_exit(nft_reject_ipv6_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "reject");
+MODULE_DESCRIPTION("IPv6 packet rejection for nftables");
index 82cbb46..f327981 100644 (file)
@@ -431,9 +431,12 @@ void fib6_select_path(const struct net *net, struct fib6_result *res,
        struct fib6_info *sibling, *next_sibling;
        struct fib6_info *match = res->f6i;
 
-       if ((!match->fib6_nsiblings && !match->nh) || have_oif_match)
+       if (!match->nh && (!match->fib6_nsiblings || have_oif_match))
                goto out;
 
+       if (match->nh && have_oif_match && res->nh)
+               return;
+
        /* We might have already computed the hash for ICMPv6 errors. In such
         * case it will always be non-zero. Otherwise now is the time to do it.
         */
@@ -3402,7 +3405,7 @@ static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type)
        if ((flags & RTF_REJECT) ||
            (dev && (dev->flags & IFF_LOOPBACK) &&
             !(addr_type & IPV6_ADDR_LOOPBACK) &&
-            !(flags & RTF_LOCAL)))
+            !(flags & (RTF_ANYCAST | RTF_LOCAL))))
                return true;
 
        return false;
index 1fbb4df..5e2c34c 100644 (file)
@@ -1421,6 +1421,7 @@ static void ipip6_tunnel_setup(struct net_device *dev)
        int t_hlen = tunnel->hlen + sizeof(struct iphdr);
 
        dev->netdev_ops         = &ipip6_netdev_ops;
+       dev->header_ops         = &ip_tunnel_header_ops;
        dev->needs_free_netdev  = true;
        dev->priv_destructor    = ipip6_dev_free;
 
index 6d7ef78..6434d17 100644 (file)
@@ -1028,6 +1028,7 @@ static void l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
 
        /* Queue the packet to IP for output */
        skb->ignore_df = 1;
+       skb_dst_drop(skb);
 #if IS_ENABLED(CONFIG_IPV6)
        if (l2tp_sk_is_v6(tunnel->sock))
                error = inet6_csk_xmit(tunnel->sock, skb, NULL);
@@ -1099,10 +1100,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
                goto out_unlock;
        }
 
-       /* Get routing info from the tunnel socket */
-       skb_dst_drop(skb);
-       skb_dst_set(skb, sk_dst_check(sk, 0));
-
        inet = inet_sk(sk);
        fl = &inet->cork.fl;
        switch (tunnel->encap) {
index 54fb8d4..6e53e43 100644 (file)
@@ -273,6 +273,10 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
 
        if (!sock_flag(sk, SOCK_ZAPPED))
                goto out;
+       if (!addr->sllc_arphrd)
+               addr->sllc_arphrd = ARPHRD_ETHER;
+       if (addr->sllc_arphrd != ARPHRD_ETHER)
+               goto out;
        rc = -ENODEV;
        if (sk->sk_bound_dev_if) {
                llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
@@ -328,7 +332,9 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
        if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
                goto out;
        rc = -EAFNOSUPPORT;
-       if (unlikely(addr->sllc_family != AF_LLC))
+       if (!addr->sllc_arphrd)
+               addr->sllc_arphrd = ARPHRD_ETHER;
+       if (unlikely(addr->sllc_family != AF_LLC || addr->sllc_arphrd != ARPHRD_ETHER))
                goto out;
        dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
        rc = -ENODEV;
@@ -336,8 +342,6 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
        if (sk->sk_bound_dev_if) {
                llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if);
                if (llc->dev) {
-                       if (!addr->sllc_arphrd)
-                               addr->sllc_arphrd = llc->dev->type;
                        if (is_zero_ether_addr(addr->sllc_mac))
                                memcpy(addr->sllc_mac, llc->dev->dev_addr,
                                       IFHWADDRLEN);
index aa51509..02cde0f 100644 (file)
@@ -1105,11 +1105,8 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
                               ttl, lifetime, 0, ifmsh->preq_id++, sdata);
 
        spin_lock_bh(&mpath->state_lock);
-       if (mpath->flags & MESH_PATH_DELETED) {
-               spin_unlock_bh(&mpath->state_lock);
-               goto enddiscovery;
-       }
-       mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
+       if (!(mpath->flags & MESH_PATH_DELETED))
+               mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
        spin_unlock_bh(&mpath->state_lock);
 
 enddiscovery:
index a88ab6f..5c5af4b 100644 (file)
@@ -2396,6 +2396,7 @@ static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
 
 static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
 {
+       struct ieee80211_hdr *hdr = (void *)rx->skb->data;
        struct sk_buff *skb = rx->skb;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
 
@@ -2406,6 +2407,31 @@ static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
        if (status->flag & RX_FLAG_DECRYPTED)
                return 0;
 
+       /* check mesh EAPOL frames first */
+       if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) &&
+                    ieee80211_is_data(fc))) {
+               struct ieee80211s_hdr *mesh_hdr;
+               u16 hdr_len = ieee80211_hdrlen(fc);
+               u16 ethertype_offset;
+               __be16 ethertype;
+
+               if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr))
+                       goto drop_check;
+
+               /* make sure fixed part of mesh header is there, also checks skb len */
+               if (!pskb_may_pull(rx->skb, hdr_len + 6))
+                       goto drop_check;
+
+               mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len);
+               ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) +
+                                  sizeof(rfc1042_header);
+
+               if (skb_copy_bits(rx->skb, ethertype_offset, &ethertype, 2) == 0 &&
+                   ethertype == rx->sdata->control_port_protocol)
+                       return 0;
+       }
+
+drop_check:
        /* Drop unencrypted frames if key is set. */
        if (unlikely(!ieee80211_has_protected(fc) &&
                     !ieee80211_is_any_nullfunc(fc) &&
index 7b1baca..cbc40b3 100644 (file)
@@ -639,11 +639,23 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
                u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
                struct ieee80211_sub_if_data *sdata;
                struct ieee80211_hdr *hdr = (void *)skb->data;
+               __be16 ethertype = 0;
+
+               if (skb->len >= ETH_HLEN && skb->protocol == cpu_to_be16(ETH_P_802_3))
+                       skb_copy_bits(skb, 2 * ETH_ALEN, &ethertype, ETH_TLEN);
 
                rcu_read_lock();
                sdata = ieee80211_sdata_from_skb(local, skb);
                if (sdata) {
-                       if (ieee80211_is_any_nullfunc(hdr->frame_control))
+                       if (ethertype == sdata->control_port_protocol ||
+                           ethertype == cpu_to_be16(ETH_P_PREAUTH))
+                               cfg80211_control_port_tx_status(&sdata->wdev,
+                                                               cookie,
+                                                               skb->data,
+                                                               skb->len,
+                                                               acked,
+                                                               GFP_ATOMIC);
+                       else if (ieee80211_is_any_nullfunc(hdr->frame_control))
                                cfg80211_probe_status(sdata->dev, hdr->addr1,
                                                      cookie, acked,
                                                      info->status.ack_signal,
@@ -654,12 +666,8 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
                                                        skb->data, skb->len,
                                                        acked, GFP_ATOMIC);
                        else
-                               cfg80211_control_port_tx_status(&sdata->wdev,
-                                                               cookie,
-                                                               skb->data,
-                                                               skb->len,
-                                                               acked,
-                                                               GFP_ATOMIC);
+                               pr_warn("Unknown status report in ack skb\n");
+
                }
                rcu_read_unlock();
 
index e9ce658..1a2941e 100644 (file)
@@ -3996,6 +3996,9 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
        skb_list_walk_safe(skb, skb, next) {
                skb_mark_not_on_list(skb);
 
+               if (skb->protocol == sdata->control_port_protocol)
+                       ctrl_flags |= IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP;
+
                skb = ieee80211_build_hdr(sdata, skb, info_flags,
                                          sta, ctrl_flags, cookie);
                if (IS_ERR(skb)) {
@@ -4206,7 +4209,7 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
            (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER)))
                ra = sdata->u.mgd.bssid;
 
-       if (!is_valid_ether_addr(ra))
+       if (is_zero_ether_addr(ra))
                goto out_free;
 
        multicast = is_multicast_ether_addr(ra);
@@ -5371,7 +5374,8 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
                return -EINVAL;
 
        if (proto == sdata->control_port_protocol)
-               ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
+               ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO |
+                             IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP;
 
        if (unencrypted)
                flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
index 490b925..8f940be 100644 (file)
@@ -336,9 +336,7 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
         */
        subflow->snd_isn = TCP_SKB_CB(skb)->end_seq;
        if (subflow->request_mptcp) {
-               pr_debug("local_key=%llu", subflow->local_key);
                opts->suboptions = OPTION_MPTCP_MPC_SYN;
-               opts->sndr_key = subflow->local_key;
                *size = TCPOLEN_MPTCP_MPC_SYN;
                return true;
        } else if (subflow->request_join) {
@@ -451,9 +449,9 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
 }
 
 static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow,
-                                struct mptcp_ext *ext)
+                                struct sk_buff *skb, struct mptcp_ext *ext)
 {
-       if (!ext->use_map) {
+       if (!ext->use_map || !skb->len) {
                /* RFC6824 requires a DSS mapping with specific values
                 * if DATA_FIN is set but no data payload is mapped
                 */
@@ -505,7 +503,7 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
                        opts->ext_copy = *mpext;
 
                if (skb && tcp_fin && subflow->data_fin_tx_enable)
-                       mptcp_write_data_fin(subflow, &opts->ext_copy);
+                       mptcp_write_data_fin(subflow, skb, &opts->ext_copy);
                ret = true;
        }
 
index db56535..c6eeaf3 100644 (file)
@@ -249,6 +249,7 @@ struct mptcp_subflow_request_sock {
        u64     thmac;
        u32     local_nonce;
        u32     remote_nonce;
+       struct mptcp_sock       *msk;
 };
 
 static inline struct mptcp_subflow_request_sock *
index bbdb74b..3838a0b 100644 (file)
@@ -69,6 +69,9 @@ static void subflow_req_destructor(struct request_sock *req)
 
        pr_debug("subflow_req=%p", subflow_req);
 
+       if (subflow_req->msk)
+               sock_put((struct sock *)subflow_req->msk);
+
        if (subflow_req->mp_capable)
                mptcp_token_destroy_request(subflow_req->token);
        tcp_request_sock_ops.destructor(req);
@@ -86,8 +89,8 @@ static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
 }
 
 /* validate received token and create truncated hmac and nonce for SYN-ACK */
-static bool subflow_token_join_request(struct request_sock *req,
-                                      const struct sk_buff *skb)
+static struct mptcp_sock *subflow_token_join_request(struct request_sock *req,
+                                                    const struct sk_buff *skb)
 {
        struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
        u8 hmac[SHA256_DIGEST_SIZE];
@@ -97,13 +100,13 @@ static bool subflow_token_join_request(struct request_sock *req,
        msk = mptcp_token_get_sock(subflow_req->token);
        if (!msk) {
                SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
-               return false;
+               return NULL;
        }
 
        local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
        if (local_id < 0) {
                sock_put((struct sock *)msk);
-               return false;
+               return NULL;
        }
        subflow_req->local_id = local_id;
 
@@ -114,9 +117,7 @@ static bool subflow_token_join_request(struct request_sock *req,
                              subflow_req->remote_nonce, hmac);
 
        subflow_req->thmac = get_unaligned_be64(hmac);
-
-       sock_put((struct sock *)msk);
-       return true;
+       return msk;
 }
 
 static void subflow_init_req(struct request_sock *req,
@@ -133,6 +134,7 @@ static void subflow_init_req(struct request_sock *req,
 
        subflow_req->mp_capable = 0;
        subflow_req->mp_join = 0;
+       subflow_req->msk = NULL;
 
 #ifdef CONFIG_TCP_MD5SIG
        /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
@@ -166,12 +168,9 @@ static void subflow_init_req(struct request_sock *req,
                subflow_req->remote_id = mp_opt.join_id;
                subflow_req->token = mp_opt.token;
                subflow_req->remote_nonce = mp_opt.nonce;
-               pr_debug("token=%u, remote_nonce=%u", subflow_req->token,
-                        subflow_req->remote_nonce);
-               if (!subflow_token_join_request(req, skb)) {
-                       subflow_req->mp_join = 0;
-                       // @@ need to trigger RST
-               }
+               subflow_req->msk = subflow_token_join_request(req, skb);
+               pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
+                        subflow_req->remote_nonce, subflow_req->msk);
        }
 }
 
@@ -354,10 +353,9 @@ static bool subflow_hmac_valid(const struct request_sock *req,
        const struct mptcp_subflow_request_sock *subflow_req;
        u8 hmac[SHA256_DIGEST_SIZE];
        struct mptcp_sock *msk;
-       bool ret;
 
        subflow_req = mptcp_subflow_rsk(req);
-       msk = mptcp_token_get_sock(subflow_req->token);
+       msk = subflow_req->msk;
        if (!msk)
                return false;
 
@@ -365,12 +363,7 @@ static bool subflow_hmac_valid(const struct request_sock *req,
                              subflow_req->remote_nonce,
                              subflow_req->local_nonce, hmac);
 
-       ret = true;
-       if (crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN))
-               ret = false;
-
-       sock_put((struct sock *)msk);
-       return ret;
+       return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
 }
 
 static void mptcp_sock_destruct(struct sock *sk)
@@ -438,22 +431,25 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
        struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
        struct mptcp_subflow_request_sock *subflow_req;
        struct mptcp_options_received mp_opt;
-       bool fallback_is_fatal = false;
+       bool fallback, fallback_is_fatal;
        struct sock *new_msk = NULL;
-       bool fallback = false;
        struct sock *child;
 
        pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
 
-       /* we need later a valid 'mp_capable' value even when options are not
-        * parsed
+       /* After child creation we must look for 'mp_capable' even when options
+        * are not parsed
         */
        mp_opt.mp_capable = 0;
-       if (tcp_rsk(req)->is_mptcp == 0)
+
+       /* hopefully temporary handling for MP_JOIN+syncookie */
+       subflow_req = mptcp_subflow_rsk(req);
+       fallback_is_fatal = subflow_req->mp_join;
+       fallback = !tcp_rsk(req)->is_mptcp;
+       if (fallback)
                goto create_child;
 
        /* if the sk is MP_CAPABLE, we try to fetch the client key */
-       subflow_req = mptcp_subflow_rsk(req);
        if (subflow_req->mp_capable) {
                if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
                        /* here we can receive and accept an in-window,
@@ -474,12 +470,11 @@ create_msk:
                if (!new_msk)
                        fallback = true;
        } else if (subflow_req->mp_join) {
-               fallback_is_fatal = true;
                mptcp_get_options(skb, &mp_opt);
                if (!mp_opt.mp_join ||
                    !subflow_hmac_valid(req, &mp_opt)) {
                        SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
-                       return NULL;
+                       fallback = true;
                }
        }
 
@@ -522,10 +517,12 @@ create_child:
                } else if (ctx->mp_join) {
                        struct mptcp_sock *owner;
 
-                       owner = mptcp_token_get_sock(ctx->token);
+                       owner = subflow_req->msk;
                        if (!owner)
                                goto dispose_child;
 
+                       /* move the msk reference ownership to the subflow */
+                       subflow_req->msk = NULL;
                        ctx->conn = (struct sock *)owner;
                        if (!mptcp_finish_join(child))
                                goto dispose_child;
index 486959f..a8ce04a 100644 (file)
@@ -326,7 +326,7 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
        set->variant = &bitmap_ip;
        if (!init_map_ip(set, map, first_ip, last_ip,
                         elements, hosts, netmask)) {
-               kfree(map);
+               ip_set_free(map);
                return -ENOMEM;
        }
        if (tb[IPSET_ATTR_TIMEOUT]) {
index 2310a31..2c625e0 100644 (file)
@@ -363,7 +363,7 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
        map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
        set->variant = &bitmap_ipmac;
        if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
-               kfree(map);
+               ip_set_free(map);
                return -ENOMEM;
        }
        if (tb[IPSET_ATTR_TIMEOUT]) {
index e56ced6..7138e08 100644 (file)
@@ -274,7 +274,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
        map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
        set->variant = &bitmap_port;
        if (!init_map_port(set, map, first_port, last_port)) {
-               kfree(map);
+               ip_set_free(map);
                return -ENOMEM;
        }
        if (tb[IPSET_ATTR_TIMEOUT]) {
index 340cb95..56621d6 100644 (file)
@@ -460,6 +460,8 @@ ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len,
        for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
                if (!add_extension(id, cadt_flags, tb))
                        continue;
+               if (align < ip_set_extensions[id].align)
+                       align = ip_set_extensions[id].align;
                len = ALIGN(len, ip_set_extensions[id].align);
                set->offset[id] = len;
                set->extensions |= ip_set_extensions[id].type;
index 1ee4375..521e970 100644 (file)
@@ -682,7 +682,7 @@ retry:
        }
        t->hregion = ip_set_alloc(ahash_sizeof_regions(htable_bits));
        if (!t->hregion) {
-               kfree(t);
+               ip_set_free(t);
                ret = -ENOMEM;
                goto out;
        }
@@ -1533,7 +1533,7 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
        }
        t->hregion = ip_set_alloc(ahash_sizeof_regions(hbits));
        if (!t->hregion) {
-               kfree(t);
+               ip_set_free(t);
                kfree(h);
                return -ENOMEM;
        }
index 79cd9dd..f33d72c 100644 (file)
@@ -2158,6 +2158,8 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
                err = __nf_conntrack_update(net, skb, ct, ctinfo);
                if (err < 0)
                        return err;
+
+               ct = nf_ct_get(skb, &ctinfo);
        }
 
        return nf_confirm_cthelper(skb, ct, ctinfo);
index f108a76..2b01a15 100644 (file)
@@ -73,3 +73,4 @@ EXPORT_SYMBOL_GPL(nft_fwd_dup_netdev_offload);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("Netfilter packet duplication support");
index afa8517..b1eb527 100644 (file)
@@ -594,3 +594,4 @@ module_exit(nf_flow_table_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("Netfilter flow table module");
index 88bedf1..bc4126d 100644 (file)
@@ -72,3 +72,4 @@ module_exit(nf_flow_inet_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NF_FLOWTABLE(1); /* NFPROTO_INET */
+MODULE_DESCRIPTION("Netfilter flow table mixed IPv4/IPv6 module");
index 62651e6..5fff1e0 100644 (file)
@@ -950,6 +950,7 @@ static void nf_flow_table_indr_cleanup(struct flow_block_cb *block_cb)
        nf_flow_table_gc_cleanup(flowtable, dev);
        down_write(&flowtable->flow_block_lock);
        list_del(&block_cb->list);
+       list_del(&block_cb->driver_list);
        flow_block_cb_free(block_cb);
        up_write(&flowtable->flow_block_lock);
 }
index b9cbe1e..ebcdc8e 100644 (file)
@@ -1237,3 +1237,4 @@ EXPORT_SYMBOL_GPL(nf_synproxy_ipv6_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("nftables SYNPROXY expression support");
index 185fc82..c7cf1cd 100644 (file)
@@ -296,6 +296,7 @@ static void nft_indr_block_cleanup(struct flow_block_cb *block_cb)
        nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND,
                                    basechain, &extack);
        mutex_lock(&net->nft.commit_mutex);
+       list_del(&block_cb->driver_list);
        list_move(&block_cb->list, &bo.cb_list);
        nft_flow_offload_unbind(&bo, basechain);
        mutex_unlock(&net->nft.commit_mutex);
index 99127e2..5f24edf 100644 (file)
@@ -33,6 +33,7 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER);
+MODULE_DESCRIPTION("Netfilter messages via netlink socket");
 
 #define nfnl_dereference_protected(id) \
        rcu_dereference_protected(table[(id)].subsys, \
index f9adca6..aa1a066 100644 (file)
@@ -902,3 +902,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_EXPR("match");
 MODULE_ALIAS_NFT_EXPR("target");
+MODULE_DESCRIPTION("x_tables over nftables support");
index 69d6173..7d0761f 100644 (file)
@@ -280,3 +280,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso");
 MODULE_ALIAS_NFT_EXPR("connlimit");
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CONNLIMIT);
+MODULE_DESCRIPTION("nftables connlimit rule support");
index f6d4d0f..85ed461 100644 (file)
@@ -303,3 +303,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 MODULE_ALIAS_NFT_EXPR("counter");
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_COUNTER);
+MODULE_DESCRIPTION("nftables counter rule support");
index faea72c..77258af 100644 (file)
@@ -1345,3 +1345,4 @@ MODULE_ALIAS_NFT_EXPR("notrack");
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_HELPER);
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_TIMEOUT);
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_EXPECT);
+MODULE_DESCRIPTION("Netfilter nf_tables conntrack module");
index c2e78c1..40788b3 100644 (file)
@@ -102,3 +102,4 @@ module_exit(nft_dup_netdev_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_AF_EXPR(5, "dup");
+MODULE_DESCRIPTION("nftables netdev packet duplication support");
index 465432e..a88d44e 100644 (file)
@@ -76,3 +76,4 @@ module_exit(nft_fib_inet_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
 MODULE_ALIAS_NFT_AF_EXPR(1, "fib");
+MODULE_DESCRIPTION("nftables fib inet support");
index a2e726a..3f3478a 100644 (file)
@@ -85,3 +85,4 @@ module_exit(nft_fib_netdev_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo M. Bermudo Garay <pablombg@gmail.com>");
 MODULE_ALIAS_NFT_AF_EXPR(5, "fib");
+MODULE_DESCRIPTION("nftables netdev fib lookups support");
index b70b489..3b9b97a 100644 (file)
@@ -286,3 +286,4 @@ module_exit(nft_flow_offload_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_EXPR("flow_offload");
+MODULE_DESCRIPTION("nftables hardware flow offload module");
index b836d55..96371d8 100644 (file)
@@ -248,3 +248,4 @@ module_exit(nft_hash_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Laura Garcia <nevola@gmail.com>");
 MODULE_ALIAS_NFT_EXPR("hash");
+MODULE_DESCRIPTION("Netfilter nftables hash module");
index 35b67d7..0e2c315 100644 (file)
@@ -372,3 +372,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 MODULE_ALIAS_NFT_EXPR("limit");
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_LIMIT);
+MODULE_DESCRIPTION("nftables limit expression support");
index fe4831f..5789945 100644 (file)
@@ -298,3 +298,4 @@ module_exit(nft_log_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 MODULE_ALIAS_NFT_EXPR("log");
+MODULE_DESCRIPTION("Netfilter nf_tables log module");
index bc9fd98..71390b7 100644 (file)
@@ -305,3 +305,4 @@ module_exit(nft_masq_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
 MODULE_ALIAS_NFT_EXPR("masq");
+MODULE_DESCRIPTION("Netfilter nftables masquerade expression support");
index 23a7bfd..4bcf33b 100644 (file)
@@ -402,3 +402,4 @@ module_exit(nft_nat_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>");
 MODULE_ALIAS_NFT_EXPR("nat");
+MODULE_DESCRIPTION("Network Address Translation support");
index 48edb9d..f1fc824 100644 (file)
@@ -217,3 +217,4 @@ module_exit(nft_ng_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Laura Garcia <nevola@gmail.com>");
 MODULE_ALIAS_NFT_EXPR("numgen");
+MODULE_DESCRIPTION("nftables number generator module");
index bfd18d2..5f9207a 100644 (file)
@@ -252,3 +252,4 @@ module_exit(nft_objref_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_EXPR("objref");
+MODULE_DESCRIPTION("nftables stateful object reference module");
index b42247a..c261d57 100644 (file)
@@ -149,3 +149,4 @@ module_exit(nft_osf_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Fernando Fernandez <ffmancera@riseup.net>");
 MODULE_ALIAS_NFT_EXPR("osf");
+MODULE_DESCRIPTION("nftables passive OS fingerprint support");
index 5ece0a6..23265d7 100644 (file)
@@ -216,3 +216,4 @@ module_exit(nft_queue_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Eric Leblond <eric@regit.org>");
 MODULE_ALIAS_NFT_EXPR("queue");
+MODULE_DESCRIPTION("Netfilter nftables queue module");
index 4413690..0363f53 100644 (file)
@@ -254,3 +254,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_EXPR("quota");
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_QUOTA);
+MODULE_DESCRIPTION("Netfilter nftables quota module");
index 5b77917..2056051 100644 (file)
@@ -292,3 +292,4 @@ module_exit(nft_redir_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
 MODULE_ALIAS_NFT_EXPR("redir");
+MODULE_DESCRIPTION("Netfilter nftables redirect support");
index 00f865f..86eafbb 100644 (file)
@@ -119,3 +119,4 @@ EXPORT_SYMBOL_GPL(nft_reject_icmpv6_code);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("Netfilter x_tables over nftables module");
index f41f414..cf8f264 100644 (file)
@@ -149,3 +149,4 @@ module_exit(nft_reject_inet_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 MODULE_ALIAS_NFT_AF_EXPR(1, "reject");
+MODULE_DESCRIPTION("Netfilter nftables reject inet support");
index e2c1fc6..4fda8b3 100644 (file)
@@ -388,3 +388,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Fernando Fernandez <ffmancera@riseup.net>");
 MODULE_ALIAS_NFT_EXPR("synproxy");
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_SYNPROXY);
+MODULE_DESCRIPTION("nftables SYNPROXY expression support");
index 30be578..d3eb953 100644 (file)
@@ -719,3 +719,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_EXPR("tunnel");
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL);
+MODULE_DESCRIPTION("nftables tunnel expression support");
index a8e5f6c..b4f7bbc 100644 (file)
@@ -244,3 +244,4 @@ MODULE_ALIAS("ipt_SNAT");
 MODULE_ALIAS("ipt_DNAT");
 MODULE_ALIAS("ip6t_SNAT");
 MODULE_ALIAS("ip6t_DNAT");
+MODULE_DESCRIPTION("SNAT and DNAT targets support");
index 55ee680..9395ee8 100644 (file)
@@ -351,22 +351,11 @@ int genl_register_family(struct genl_family *family)
                start = end = GENL_ID_VFS_DQUOT;
        }
 
-       if (family->maxattr && !family->parallel_ops) {
-               family->attrbuf = kmalloc_array(family->maxattr + 1,
-                                               sizeof(struct nlattr *),
-                                               GFP_KERNEL);
-               if (family->attrbuf == NULL) {
-                       err = -ENOMEM;
-                       goto errout_locked;
-               }
-       } else
-               family->attrbuf = NULL;
-
        family->id = idr_alloc_cyclic(&genl_fam_idr, family,
                                      start, end + 1, GFP_KERNEL);
        if (family->id < 0) {
                err = family->id;
-               goto errout_free;
+               goto errout_locked;
        }
 
        err = genl_validate_assign_mc_groups(family);
@@ -385,8 +374,6 @@ int genl_register_family(struct genl_family *family)
 
 errout_remove:
        idr_remove(&genl_fam_idr, family->id);
-errout_free:
-       kfree(family->attrbuf);
 errout_locked:
        genl_unlock_all();
        return err;
@@ -419,8 +406,6 @@ int genl_unregister_family(const struct genl_family *family)
                   atomic_read(&genl_sk_destructing_cnt) == 0);
        genl_unlock();
 
-       kfree(family->attrbuf);
-
        genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
 
        return 0;
@@ -485,30 +470,23 @@ genl_family_rcv_msg_attrs_parse(const struct genl_family *family,
        if (!family->maxattr)
                return NULL;
 
-       if (family->parallel_ops) {
-               attrbuf = kmalloc_array(family->maxattr + 1,
-                                       sizeof(struct nlattr *), GFP_KERNEL);
-               if (!attrbuf)
-                       return ERR_PTR(-ENOMEM);
-       } else {
-               attrbuf = family->attrbuf;
-       }
+       attrbuf = kmalloc_array(family->maxattr + 1,
+                               sizeof(struct nlattr *), GFP_KERNEL);
+       if (!attrbuf)
+               return ERR_PTR(-ENOMEM);
 
        err = __nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr,
                            family->policy, validate, extack);
        if (err) {
-               if (family->parallel_ops)
-                       kfree(attrbuf);
+               kfree(attrbuf);
                return ERR_PTR(err);
        }
        return attrbuf;
 }
 
-static void genl_family_rcv_msg_attrs_free(const struct genl_family *family,
-                                          struct nlattr **attrbuf)
+static void genl_family_rcv_msg_attrs_free(struct nlattr **attrbuf)
 {
-       if (family->parallel_ops)
-               kfree(attrbuf);
+       kfree(attrbuf);
 }
 
 struct genl_start_context {
@@ -542,7 +520,7 @@ static int genl_start(struct netlink_callback *cb)
 no_attrs:
        info = genl_dumpit_info_alloc();
        if (!info) {
-               genl_family_rcv_msg_attrs_free(ctx->family, attrs);
+               genl_family_rcv_msg_attrs_free(attrs);
                return -ENOMEM;
        }
        info->family = ctx->family;
@@ -559,7 +537,7 @@ no_attrs:
        }
 
        if (rc) {
-               genl_family_rcv_msg_attrs_free(info->family, info->attrs);
+               genl_family_rcv_msg_attrs_free(info->attrs);
                genl_dumpit_info_free(info);
                cb->data = NULL;
        }
@@ -588,7 +566,7 @@ static int genl_lock_done(struct netlink_callback *cb)
                rc = ops->done(cb);
                genl_unlock();
        }
-       genl_family_rcv_msg_attrs_free(info->family, info->attrs);
+       genl_family_rcv_msg_attrs_free(info->attrs);
        genl_dumpit_info_free(info);
        return rc;
 }
@@ -601,7 +579,7 @@ static int genl_parallel_done(struct netlink_callback *cb)
 
        if (ops->done)
                rc = ops->done(cb);
-       genl_family_rcv_msg_attrs_free(info->family, info->attrs);
+       genl_family_rcv_msg_attrs_free(info->attrs);
        genl_dumpit_info_free(info);
        return rc;
 }
@@ -694,7 +672,7 @@ static int genl_family_rcv_msg_doit(const struct genl_family *family,
                family->post_doit(ops, skb, &info);
 
 out:
-       genl_family_rcv_msg_attrs_free(family, attrbuf);
+       genl_family_rcv_msg_attrs_free(attrbuf);
 
        return err;
 }
@@ -1166,60 +1144,11 @@ static struct genl_family genl_ctrl __ro_after_init = {
        .netnsok = true,
 };
 
-static int genl_bind(struct net *net, int group)
-{
-       struct genl_family *f;
-       int err = -ENOENT;
-       unsigned int id;
-
-       down_read(&cb_lock);
-
-       idr_for_each_entry(&genl_fam_idr, f, id) {
-               if (group >= f->mcgrp_offset &&
-                   group < f->mcgrp_offset + f->n_mcgrps) {
-                       int fam_grp = group - f->mcgrp_offset;
-
-                       if (!f->netnsok && net != &init_net)
-                               err = -ENOENT;
-                       else if (f->mcast_bind)
-                               err = f->mcast_bind(net, fam_grp);
-                       else
-                               err = 0;
-                       break;
-               }
-       }
-       up_read(&cb_lock);
-
-       return err;
-}
-
-static void genl_unbind(struct net *net, int group)
-{
-       struct genl_family *f;
-       unsigned int id;
-
-       down_read(&cb_lock);
-
-       idr_for_each_entry(&genl_fam_idr, f, id) {
-               if (group >= f->mcgrp_offset &&
-                   group < f->mcgrp_offset + f->n_mcgrps) {
-                       int fam_grp = group - f->mcgrp_offset;
-
-                       if (f->mcast_unbind)
-                               f->mcast_unbind(net, fam_grp);
-                       break;
-               }
-       }
-       up_read(&cb_lock);
-}
-
 static int __net_init genl_pernet_init(struct net *net)
 {
        struct netlink_kernel_cfg cfg = {
                .input          = genl_rcv,
                .flags          = NL_CFG_F_NONROOT_RECV,
-               .bind           = genl_bind,
-               .unbind         = genl_unbind,
        };
 
        /* we'll bump the group number right afterwards */
index fc0efd8..2611657 100644 (file)
@@ -1169,9 +1169,10 @@ static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
                                 struct sw_flow_key *key,
                                 const struct nlattr *attr, bool last)
 {
+       struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
        const struct nlattr *actions, *cpl_arg;
+       int len, max_len, rem = nla_len(attr);
        const struct check_pkt_len_arg *arg;
-       int rem = nla_len(attr);
        bool clone_flow_key;
 
        /* The first netlink attribute in 'attr' is always
@@ -1180,7 +1181,11 @@ static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
        cpl_arg = nla_data(attr);
        arg = nla_data(cpl_arg);
 
-       if (skb->len <= arg->pkt_len) {
+       len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
+       max_len = arg->pkt_len;
+
+       if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
+           len <= max_len) {
                /* Second netlink attribute in 'attr' is always
                 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
                 */
index 2d8d613..24a8c3c 100644 (file)
@@ -166,6 +166,7 @@ static void __qrtr_node_release(struct kref *kref)
 {
        struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
        struct radix_tree_iter iter;
+       struct qrtr_tx_flow *flow;
        unsigned long flags;
        void __rcu **slot;
 
@@ -181,8 +182,9 @@ static void __qrtr_node_release(struct kref *kref)
 
        /* Free tx flow counters */
        radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) {
+               flow = *slot;
                radix_tree_iter_delete(&node->qrtr_tx_flow, &iter, slot);
-               kfree(*slot);
+               kfree(flow);
        }
        kfree(node);
 }
@@ -427,7 +429,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
        unsigned int ver;
        size_t hdrlen;
 
-       if (len & 3)
+       if (len == 0 || len & 3)
                return -EINVAL;
 
        skb = netdev_alloc_skb(NULL, len);
@@ -441,6 +443,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
 
        switch (ver) {
        case QRTR_PROTO_VER_1:
+               if (len < sizeof(*v1))
+                       goto err;
                v1 = data;
                hdrlen = sizeof(*v1);
 
@@ -454,6 +458,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
                size = le32_to_cpu(v1->size);
                break;
        case QRTR_PROTO_VER_2:
+               if (len < sizeof(*v2))
+                       goto err;
                v2 = data;
                hdrlen = sizeof(*v2) + v2->optlen;
 
index ed7f213..f2fcab1 100644 (file)
@@ -905,6 +905,17 @@ void rds_conn_path_connect_if_down(struct rds_conn_path *cp)
 }
 EXPORT_SYMBOL_GPL(rds_conn_path_connect_if_down);
 
+/* Check connectivity of all paths
+ */
+void rds_check_all_paths(struct rds_connection *conn)
+{
+       int i = 0;
+
+       do {
+               rds_conn_path_connect_if_down(&conn->c_path[i]);
+       } while (++i < conn->c_npaths);
+}
+
 void rds_conn_connect_if_down(struct rds_connection *conn)
 {
        WARN_ON(conn->c_trans->t_mp_capable);
index 6019b0c..106e862 100644 (file)
@@ -778,6 +778,7 @@ void rds_conn_drop(struct rds_connection *conn);
 void rds_conn_path_drop(struct rds_conn_path *cpath, bool destroy);
 void rds_conn_connect_if_down(struct rds_connection *conn);
 void rds_conn_path_connect_if_down(struct rds_conn_path *cp);
+void rds_check_all_paths(struct rds_connection *conn);
 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
                          struct rds_info_iterator *iter,
                          struct rds_info_lengths *lens,
@@ -823,6 +824,12 @@ rds_conn_path_up(struct rds_conn_path *cp)
 }
 
 static inline int
+rds_conn_path_down(struct rds_conn_path *cp)
+{
+       return atomic_read(&cp->cp_state) == RDS_CONN_DOWN;
+}
+
+static inline int
 rds_conn_up(struct rds_connection *conn)
 {
        WARN_ON(conn->c_trans->t_mp_capable);
index 68e2bdb..9a529a0 100644 (file)
@@ -1340,7 +1340,8 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
                goto out;
        }
 
-       rds_conn_path_connect_if_down(cpath);
+       if (rds_conn_path_down(cpath))
+               rds_check_all_paths(conn);
 
        ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
        if (ret) {
index 46f709a..f8001ec 100644 (file)
 #include "rds.h"
 #include "loop.h"
 
+static char * const rds_trans_modules[] = {
+       [RDS_TRANS_IB] = "rds_rdma",
+       [RDS_TRANS_GAP] = NULL,
+       [RDS_TRANS_TCP] = "rds_tcp",
+};
+
 static struct rds_transport *transports[RDS_TRANS_COUNT];
 static DECLARE_RWSEM(rds_trans_sem);
 
@@ -110,18 +116,20 @@ struct rds_transport *rds_trans_get(int t_type)
 {
        struct rds_transport *ret = NULL;
        struct rds_transport *trans;
-       unsigned int i;
 
        down_read(&rds_trans_sem);
-       for (i = 0; i < RDS_TRANS_COUNT; i++) {
-               trans = transports[i];
-
-               if (trans && trans->t_type == t_type &&
-                   (!trans->t_owner || try_module_get(trans->t_owner))) {
-                       ret = trans;
-                       break;
-               }
+       trans = transports[t_type];
+       if (!trans) {
+               up_read(&rds_trans_sem);
+               if (rds_trans_modules[t_type])
+                       request_module(rds_trans_modules[t_type]);
+               down_read(&rds_trans_sem);
+               trans = transports[t_type];
        }
+       if (trans && trans->t_type == t_type &&
+           (!trans->t_owner || try_module_get(trans->t_owner)))
+               ret = trans;
+
        up_read(&rds_trans_sem);
 
        return ret;
index b7611cc..032ed76 100644 (file)
 #include <net/ip.h>
 #include "ar-internal.h"
 
+static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call,
+                              unsigned long user_call_ID)
+{
+}
+
 /*
  * Preallocate a single service call, connection and peer and, if possible,
  * give them a user ID and attach the user's side of the ID to them.
@@ -228,6 +233,8 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
                if (rx->discard_new_call) {
                        _debug("discard %lx", call->user_call_ID);
                        rx->discard_new_call(call, call->user_call_ID);
+                       if (call->notify_rx)
+                               call->notify_rx = rxrpc_dummy_notify;
                        rxrpc_put_call(call, rxrpc_call_put_kernel);
                }
                rxrpc_call_completed(call);
index aa1c8ee..6be2672 100644 (file)
@@ -253,7 +253,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
                 * confuse things
                 */
                annotation &= ~RXRPC_TX_ANNO_MASK;
-               annotation |= RXRPC_TX_ANNO_RESENT;
+               annotation |= RXRPC_TX_ANNO_UNACK | RXRPC_TX_ANNO_RESENT;
                call->rxtx_annotations[ix] = annotation;
 
                skb = call->rxtx_buffer[ix];
index 299ac98..7675793 100644 (file)
@@ -722,13 +722,12 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
               ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
               rwind, ntohl(ackinfo->jumbo_max));
 
+       if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
+               rwind = RXRPC_RXTX_BUFF_SIZE - 1;
        if (call->tx_winsize != rwind) {
-               if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
-                       rwind = RXRPC_RXTX_BUFF_SIZE - 1;
                if (rwind > call->tx_winsize)
                        wake = true;
-               trace_rxrpc_rx_rwind_change(call, sp->hdr.serial,
-                                           ntohl(ackinfo->rwind), wake);
+               trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, rwind, wake);
                call->tx_winsize = rwind;
        }
 
index 43a2430..f901421 100644 (file)
@@ -43,17 +43,20 @@ static int tcf_connmark_act(struct sk_buff *skb, const struct tc_action *a,
        tcf_lastuse_update(&ca->tcf_tm);
        bstats_update(&ca->tcf_bstats, skb);
 
-       if (skb->protocol == htons(ETH_P_IP)) {
+       switch (skb_protocol(skb, true)) {
+       case htons(ETH_P_IP):
                if (skb->len < sizeof(struct iphdr))
                        goto out;
 
                proto = NFPROTO_IPV4;
-       } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               break;
+       case htons(ETH_P_IPV6):
                if (skb->len < sizeof(struct ipv6hdr))
                        goto out;
 
                proto = NFPROTO_IPV6;
-       } else {
+               break;
+       default:
                goto out;
        }
 
index cb8608f..c60674c 100644 (file)
@@ -587,7 +587,7 @@ static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a,
                goto drop;
 
        update_flags = params->update_flags;
-       protocol = tc_skb_protocol(skb);
+       protocol = skb_protocol(skb, false);
 again:
        switch (protocol) {
        case cpu_to_be16(ETH_P_IP):
index e9f3576..67504ae 100644 (file)
@@ -624,7 +624,7 @@ static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
 {
        u8 family = NFPROTO_UNSPEC;
 
-       switch (skb->protocol) {
+       switch (skb_protocol(skb, true)) {
        case htons(ETH_P_IP):
                family = NFPROTO_IPV4;
                break;
@@ -748,6 +748,7 @@ static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
                          const struct nf_nat_range2 *range,
                          enum nf_nat_manip_type maniptype)
 {
+       __be16 proto = skb_protocol(skb, true);
        int hooknum, err = NF_ACCEPT;
 
        /* See HOOK2MANIP(). */
@@ -759,14 +760,13 @@ static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
        switch (ctinfo) {
        case IP_CT_RELATED:
        case IP_CT_RELATED_REPLY:
-               if (skb->protocol == htons(ETH_P_IP) &&
+               if (proto == htons(ETH_P_IP) &&
                    ip_hdr(skb)->protocol == IPPROTO_ICMP) {
                        if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
                                                           hooknum))
                                err = NF_DROP;
                        goto out;
-               } else if (IS_ENABLED(CONFIG_IPV6) &&
-                          skb->protocol == htons(ETH_P_IPV6)) {
+               } else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) {
                        __be16 frag_off;
                        u8 nexthdr = ipv6_hdr(skb)->nexthdr;
                        int hdrlen = ipv6_skip_exthdr(skb,
@@ -925,6 +925,8 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
        force = p->ct_action & TCA_CT_ACT_FORCE;
        tmpl = p->tmpl;
 
+       tcf_lastuse_update(&c->tcf_tm);
+
        if (clear) {
                ct = nf_ct_get(skb, &ctinfo);
                if (ct) {
@@ -1550,4 +1552,3 @@ MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
 MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
 MODULE_DESCRIPTION("Connection tracking action");
 MODULE_LICENSE("GPL v2");
-
index 1964962..b5042f3 100644 (file)
@@ -96,19 +96,22 @@ static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a,
        action = READ_ONCE(ca->tcf_action);
 
        wlen = skb_network_offset(skb);
-       if (tc_skb_protocol(skb) == htons(ETH_P_IP)) {
+       switch (skb_protocol(skb, true)) {
+       case htons(ETH_P_IP):
                wlen += sizeof(struct iphdr);
                if (!pskb_may_pull(skb, wlen))
                        goto out;
 
                proto = NFPROTO_IPV4;
-       } else if (tc_skb_protocol(skb) == htons(ETH_P_IPV6)) {
+               break;
+       case htons(ETH_P_IPV6):
                wlen += sizeof(struct ipv6hdr);
                if (!pskb_may_pull(skb, wlen))
                        goto out;
 
                proto = NFPROTO_IPV6;
-       } else {
+               break;
+       default:
                goto out;
        }
 
index 9c62859..323ae7f 100644 (file)
@@ -32,7 +32,7 @@ static ktime_t gate_get_time(struct tcf_gate *gact)
        return KTIME_MAX;
 }
 
-static int gate_get_start_time(struct tcf_gate *gact, ktime_t *start)
+static void gate_get_start_time(struct tcf_gate *gact, ktime_t *start)
 {
        struct tcf_gate_params *param = &gact->param;
        ktime_t now, base, cycle;
@@ -43,18 +43,13 @@ static int gate_get_start_time(struct tcf_gate *gact, ktime_t *start)
 
        if (ktime_after(base, now)) {
                *start = base;
-               return 0;
+               return;
        }
 
        cycle = param->tcfg_cycletime;
 
-       /* cycle time should not be zero */
-       if (!cycle)
-               return -EFAULT;
-
        n = div64_u64(ktime_sub_ns(now, base), cycle);
        *start = ktime_add_ns(base, (n + 1) * cycle);
-       return 0;
 }
 
 static void gate_start_timer(struct tcf_gate *gact, ktime_t start)
@@ -277,6 +272,27 @@ release_list:
        return err;
 }
 
+static void gate_setup_timer(struct tcf_gate *gact, u64 basetime,
+                            enum tk_offsets tko, s32 clockid,
+                            bool do_init)
+{
+       if (!do_init) {
+               if (basetime == gact->param.tcfg_basetime &&
+                   tko == gact->tk_offset &&
+                   clockid == gact->param.tcfg_clockid)
+                       return;
+
+               spin_unlock_bh(&gact->tcf_lock);
+               hrtimer_cancel(&gact->hitimer);
+               spin_lock_bh(&gact->tcf_lock);
+       }
+       gact->param.tcfg_basetime = basetime;
+       gact->param.tcfg_clockid = clockid;
+       gact->tk_offset = tko;
+       hrtimer_init(&gact->hitimer, clockid, HRTIMER_MODE_ABS_SOFT);
+       gact->hitimer.function = gate_timer_func;
+}
+
 static int tcf_gate_init(struct net *net, struct nlattr *nla,
                         struct nlattr *est, struct tc_action **a,
                         int ovr, int bind, bool rtnl_held,
@@ -287,12 +303,12 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
        enum tk_offsets tk_offset = TK_OFFS_TAI;
        struct nlattr *tb[TCA_GATE_MAX + 1];
        struct tcf_chain *goto_ch = NULL;
+       u64 cycletime = 0, basetime = 0;
        struct tcf_gate_params *p;
        s32 clockid = CLOCK_TAI;
        struct tcf_gate *gact;
        struct tc_gate *parm;
        int ret = 0, err;
-       u64 basetime = 0;
        u32 gflags = 0;
        s32 prio = -1;
        ktime_t start;
@@ -308,6 +324,27 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
        if (!tb[TCA_GATE_PARMS])
                return -EINVAL;
 
+       if (tb[TCA_GATE_CLOCKID]) {
+               clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]);
+               switch (clockid) {
+               case CLOCK_REALTIME:
+                       tk_offset = TK_OFFS_REAL;
+                       break;
+               case CLOCK_MONOTONIC:
+                       tk_offset = TK_OFFS_MAX;
+                       break;
+               case CLOCK_BOOTTIME:
+                       tk_offset = TK_OFFS_BOOT;
+                       break;
+               case CLOCK_TAI:
+                       tk_offset = TK_OFFS_TAI;
+                       break;
+               default:
+                       NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
+                       return -EINVAL;
+               }
+       }
+
        parm = nla_data(tb[TCA_GATE_PARMS]);
        index = parm->index;
 
@@ -331,10 +368,6 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
                tcf_idr_release(*a, bind);
                return -EEXIST;
        }
-       if (ret == ACT_P_CREATED) {
-               to_gate(*a)->param.tcfg_clockid = -1;
-               INIT_LIST_HEAD(&(to_gate(*a)->param.entries));
-       }
 
        if (tb[TCA_GATE_PRIORITY])
                prio = nla_get_s32(tb[TCA_GATE_PRIORITY]);
@@ -345,41 +378,19 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
        if (tb[TCA_GATE_FLAGS])
                gflags = nla_get_u32(tb[TCA_GATE_FLAGS]);
 
-       if (tb[TCA_GATE_CLOCKID]) {
-               clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]);
-               switch (clockid) {
-               case CLOCK_REALTIME:
-                       tk_offset = TK_OFFS_REAL;
-                       break;
-               case CLOCK_MONOTONIC:
-                       tk_offset = TK_OFFS_MAX;
-                       break;
-               case CLOCK_BOOTTIME:
-                       tk_offset = TK_OFFS_BOOT;
-                       break;
-               case CLOCK_TAI:
-                       tk_offset = TK_OFFS_TAI;
-                       break;
-               default:
-                       NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
-                       goto release_idr;
-               }
-       }
+       gact = to_gate(*a);
+       if (ret == ACT_P_CREATED)
+               INIT_LIST_HEAD(&gact->param.entries);
 
        err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
        if (err < 0)
                goto release_idr;
 
-       gact = to_gate(*a);
-
        spin_lock_bh(&gact->tcf_lock);
        p = &gact->param;
 
-       if (tb[TCA_GATE_CYCLE_TIME]) {
-               p->tcfg_cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]);
-               if (!p->tcfg_cycletime_ext)
-                       goto chain_put;
-       }
+       if (tb[TCA_GATE_CYCLE_TIME])
+               cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]);
 
        if (tb[TCA_GATE_ENTRY_LIST]) {
                err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], p, extack);
@@ -387,35 +398,29 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
                        goto chain_put;
        }
 
-       if (!p->tcfg_cycletime) {
+       if (!cycletime) {
                struct tcfg_gate_entry *entry;
                ktime_t cycle = 0;
 
                list_for_each_entry(entry, &p->entries, list)
                        cycle = ktime_add_ns(cycle, entry->interval);
-               p->tcfg_cycletime = cycle;
+               cycletime = cycle;
+               if (!cycletime) {
+                       err = -EINVAL;
+                       goto chain_put;
+               }
        }
+       p->tcfg_cycletime = cycletime;
 
        if (tb[TCA_GATE_CYCLE_TIME_EXT])
                p->tcfg_cycletime_ext =
                        nla_get_u64(tb[TCA_GATE_CYCLE_TIME_EXT]);
 
+       gate_setup_timer(gact, basetime, tk_offset, clockid,
+                        ret == ACT_P_CREATED);
        p->tcfg_priority = prio;
-       p->tcfg_basetime = basetime;
-       p->tcfg_clockid = clockid;
        p->tcfg_flags = gflags;
-
-       gact->tk_offset = tk_offset;
-       hrtimer_init(&gact->hitimer, clockid, HRTIMER_MODE_ABS_SOFT);
-       gact->hitimer.function = gate_timer_func;
-
-       err = gate_get_start_time(gact, &start);
-       if (err < 0) {
-               NL_SET_ERR_MSG(extack,
-                              "Internal error: failed get start time");
-               release_entry_list(&p->entries);
-               goto chain_put;
-       }
+       gate_get_start_time(gact, &start);
 
        gact->current_close_time = start;
        gact->current_gate_status = GATE_ACT_GATE_OPEN | GATE_ACT_PENDING;
@@ -443,6 +448,13 @@ chain_put:
        if (goto_ch)
                tcf_chain_put_by_act(goto_ch);
 release_idr:
+       /* action is not inserted in any list: it's safe to init hitimer
+        * without taking tcf_lock.
+        */
+       if (ret == ACT_P_CREATED)
+               gate_setup_timer(gact, gact->param.tcfg_basetime,
+                                gact->tk_offset, gact->param.tcfg_clockid,
+                                true);
        tcf_idr_release(*a, bind);
        return err;
 }
@@ -453,9 +465,7 @@ static void tcf_gate_cleanup(struct tc_action *a)
        struct tcf_gate_params *p;
 
        p = &gact->param;
-       if (p->tcfg_clockid != -1)
-               hrtimer_cancel(&gact->hitimer);
-
+       hrtimer_cancel(&gact->hitimer);
        release_entry_list(&p->entries);
 }
 
index be3f215..8118e26 100644 (file)
@@ -82,7 +82,7 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
                        goto drop;
                break;
        case TCA_MPLS_ACT_PUSH:
-               new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb->protocol));
+               new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb_protocol(skb, true)));
                if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len,
                                  skb->dev && skb->dev->type == ARPHRD_ETHER))
                        goto drop;
index b125b2b..b2b3faa 100644 (file)
@@ -41,7 +41,7 @@ static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a,
        if (params->flags & SKBEDIT_F_INHERITDSFIELD) {
                int wlen = skb_network_offset(skb);
 
-               switch (tc_skb_protocol(skb)) {
+               switch (skb_protocol(skb, true)) {
                case htons(ETH_P_IP):
                        wlen += sizeof(struct iphdr);
                        if (!pskb_may_pull(skb, wlen))
index a00a203..e62beec 100644 (file)
@@ -652,6 +652,7 @@ static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
                               &block->flow_block, tcf_block_shared(block),
                               &extack);
        down_write(&block->cb_lock);
+       list_del(&block_cb->driver_list);
        list_move(&block_cb->list, &bo.cb_list);
        up_write(&block->cb_lock);
        rtnl_lock();
@@ -671,25 +672,29 @@ static int tcf_block_offload_cmd(struct tcf_block *block,
                                 struct netlink_ext_ack *extack)
 {
        struct flow_block_offload bo = {};
-       int err;
 
        tcf_block_offload_init(&bo, dev, command, ei->binder_type,
                               &block->flow_block, tcf_block_shared(block),
                               extack);
 
-       if (dev->netdev_ops->ndo_setup_tc)
+       if (dev->netdev_ops->ndo_setup_tc) {
+               int err;
+
                err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
-       else
-               err = flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, block,
-                                                 &bo, tc_block_indr_cleanup);
+               if (err < 0) {
+                       if (err != -EOPNOTSUPP)
+                               NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
+                       return err;
+               }
 
-       if (err < 0) {
-               if (err != -EOPNOTSUPP)
-                       NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
-               return err;
+               return tcf_block_setup(block, &bo);
        }
 
-       return tcf_block_setup(block, &bo);
+       flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, block, &bo,
+                                   tc_block_indr_cleanup);
+       tcf_block_setup(block, &bo);
+
+       return -EOPNOTSUPP;
 }
 
 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
@@ -1533,7 +1538,7 @@ static inline int __tcf_classify(struct sk_buff *skb,
 reclassify:
 #endif
        for (; tp; tp = rcu_dereference_bh(tp->next)) {
-               __be16 protocol = tc_skb_protocol(skb);
+               __be16 protocol = skb_protocol(skb, false);
                int err;
 
                if (tp->protocol != protocol &&
index 80ae7b9..ab53a93 100644 (file)
@@ -80,7 +80,7 @@ static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
        if (dst)
                return ntohl(dst);
 
-       return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
+       return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
 }
 
 static u32 flow_get_proto(const struct sk_buff *skb,
@@ -104,7 +104,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb,
        if (flow->ports.ports)
                return ntohs(flow->ports.dst);
 
-       return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
+       return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
 }
 
 static u32 flow_get_iif(const struct sk_buff *skb)
@@ -151,7 +151,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
 static u32 flow_get_nfct_src(const struct sk_buff *skb,
                             const struct flow_keys *flow)
 {
-       switch (tc_skb_protocol(skb)) {
+       switch (skb_protocol(skb, true)) {
        case htons(ETH_P_IP):
                return ntohl(CTTUPLE(skb, src.u3.ip));
        case htons(ETH_P_IPV6):
@@ -164,7 +164,7 @@ fallback:
 static u32 flow_get_nfct_dst(const struct sk_buff *skb,
                             const struct flow_keys *flow)
 {
-       switch (tc_skb_protocol(skb)) {
+       switch (skb_protocol(skb, true)) {
        case htons(ETH_P_IP):
                return ntohl(CTTUPLE(skb, dst.u3.ip));
        case htons(ETH_P_IPV6):
index b2da372..e30bd96 100644 (file)
@@ -313,7 +313,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
                /* skb_flow_dissect() does not set n_proto in case an unknown
                 * protocol, so do it rather here.
                 */
-               skb_key.basic.n_proto = skb->protocol;
+               skb_key.basic.n_proto = skb_protocol(skb, false);
                skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
                skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
                                    fl_ct_info_to_flower_map,
index df00566..c95cf86 100644 (file)
@@ -59,7 +59,7 @@ static int em_ipset_match(struct sk_buff *skb, struct tcf_ematch *em,
        };
        int ret, network_offset;
 
-       switch (tc_skb_protocol(skb)) {
+       switch (skb_protocol(skb, true)) {
        case htons(ETH_P_IP):
                state.pf = NFPROTO_IPV4;
                if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
index 18755d2..3650117 100644 (file)
@@ -212,7 +212,7 @@ static int em_ipt_match(struct sk_buff *skb, struct tcf_ematch *em,
        struct nf_hook_state state;
        int ret;
 
-       switch (tc_skb_protocol(skb)) {
+       switch (skb_protocol(skb, true)) {
        case htons(ETH_P_IP):
                if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
                        return 0;
index d99966a..4625496 100644 (file)
@@ -195,7 +195,7 @@ META_COLLECTOR(int_priority)
 META_COLLECTOR(int_protocol)
 {
        /* Let userspace take care of the byte ordering */
-       dst->value = tc_skb_protocol(skb);
+       dst->value = skb_protocol(skb, false);
 }
 
 META_COLLECTOR(int_pkttype)
index ee12ca9..1c281cc 100644 (file)
@@ -553,16 +553,16 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt,
        if (!p->link.q)
                p->link.q = &noop_qdisc;
        pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
+       p->link.vcc = NULL;
+       p->link.sock = NULL;
+       p->link.common.classid = sch->handle;
+       p->link.ref = 1;
 
        err = tcf_block_get(&p->link.block, &p->link.filter_list, sch,
                            extack);
        if (err)
                return err;
 
-       p->link.vcc = NULL;
-       p->link.sock = NULL;
-       p->link.common.classid = sch->handle;
-       p->link.ref = 1;
        tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch);
        return 0;
 }
index 60f8ae5..ebaeec1 100644 (file)
@@ -592,7 +592,7 @@ static bool cake_update_flowkeys(struct flow_keys *keys,
        bool rev = !skb->_nfct, upd = false;
        __be32 ip;
 
-       if (tc_skb_protocol(skb) != htons(ETH_P_IP))
+       if (skb_protocol(skb, true) != htons(ETH_P_IP))
                return false;
 
        if (!nf_ct_get_tuple_skb(&tuple, skb))
@@ -1551,32 +1551,51 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
        return idx + (tin << 16);
 }
 
-static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
+static u8 cake_handle_diffserv(struct sk_buff *skb, bool wash)
 {
-       int wlen = skb_network_offset(skb);
+       const int offset = skb_network_offset(skb);
+       u16 *buf, buf_;
        u8 dscp;
 
-       switch (tc_skb_protocol(skb)) {
+       switch (skb_protocol(skb, true)) {
        case htons(ETH_P_IP):
-               wlen += sizeof(struct iphdr);
-               if (!pskb_may_pull(skb, wlen) ||
-                   skb_try_make_writable(skb, wlen))
+               buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
+               if (unlikely(!buf))
                        return 0;
 
-               dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
-               if (wash && dscp)
+               /* ToS is in the second byte of iphdr */
+               dscp = ipv4_get_dsfield((struct iphdr *)buf) >> 2;
+
+               if (wash && dscp) {
+                       const int wlen = offset + sizeof(struct iphdr);
+
+                       if (!pskb_may_pull(skb, wlen) ||
+                           skb_try_make_writable(skb, wlen))
+                               return 0;
+
                        ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+               }
+
                return dscp;
 
        case htons(ETH_P_IPV6):
-               wlen += sizeof(struct ipv6hdr);
-               if (!pskb_may_pull(skb, wlen) ||
-                   skb_try_make_writable(skb, wlen))
+               buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
+               if (unlikely(!buf))
                        return 0;
 
-               dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
-               if (wash && dscp)
+               /* Traffic class is in the first and second bytes of ipv6hdr */
+               dscp = ipv6_get_dsfield((struct ipv6hdr *)buf) >> 2;
+
+               if (wash && dscp) {
+                       const int wlen = offset + sizeof(struct ipv6hdr);
+
+                       if (!pskb_may_pull(skb, wlen) ||
+                           skb_try_make_writable(skb, wlen))
+                               return 0;
+
                        ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+               }
+
                return dscp;
 
        case htons(ETH_P_ARP):
@@ -1593,14 +1612,17 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
 {
        struct cake_sched_data *q = qdisc_priv(sch);
        u32 tin, mark;
+       bool wash;
        u8 dscp;
 
        /* Tin selection: Default to diffserv-based selection, allow overriding
-        * using firewall marks or skb->priority.
+        * using firewall marks or skb->priority. Call DSCP parsing early if
+        * wash is enabled, otherwise defer to below to skip unneeded parsing.
         */
-       dscp = cake_handle_diffserv(skb,
-                                   q->rate_flags & CAKE_FLAG_WASH);
        mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft;
+       wash = !!(q->rate_flags & CAKE_FLAG_WASH);
+       if (wash)
+               dscp = cake_handle_diffserv(skb, wash);
 
        if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
                tin = 0;
@@ -1614,6 +1636,8 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
                tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
 
        else {
+               if (!wash)
+                       dscp = cake_handle_diffserv(skb, wash);
                tin = q->tin_index[dscp];
 
                if (unlikely(tin >= q->tin_cnt))
@@ -2691,7 +2715,7 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
        qdisc_watchdog_init(&q->watchdog, sch);
 
        if (opt) {
-               int err = cake_change(sch, opt, extack);
+               err = cake_change(sch, opt, extack);
 
                if (err)
                        return err;
@@ -3008,7 +3032,7 @@ static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl,
                        PUT_STAT_S32(BLUE_TIMER_US,
                                     ktime_to_us(
                                             ktime_sub(now,
-                                                    flow->cvars.blue_timer)));
+                                                      flow->cvars.blue_timer)));
                }
                if (flow->cvars.dropping) {
                        PUT_STAT_S32(DROP_NEXT_US,
index 05605b3..2b88710 100644 (file)
@@ -210,7 +210,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
        if (p->set_tc_index) {
                int wlen = skb_network_offset(skb);
 
-               switch (tc_skb_protocol(skb)) {
+               switch (skb_protocol(skb, true)) {
                case htons(ETH_P_IP):
                        wlen += sizeof(struct iphdr);
                        if (!pskb_may_pull(skb, wlen) ||
@@ -303,7 +303,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
        index = skb->tc_index & (p->indices - 1);
        pr_debug("index %d->%d\n", skb->tc_index, index);
 
-       switch (tc_skb_protocol(skb)) {
+       switch (skb_protocol(skb, true)) {
        case htons(ETH_P_IP):
                ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask,
                                    p->mv[index].value);
@@ -320,7 +320,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
                 */
                if (p->mv[index].mask != 0xff || p->mv[index].value)
                        pr_warn("%s: unsupported protocol %d\n",
-                               __func__, ntohs(tc_skb_protocol(skb)));
+                               __func__, ntohs(skb_protocol(skb, true)));
                break;
        }
 
index 8f06a80..2fb76fc 100644 (file)
@@ -1075,3 +1075,4 @@ module_init(fq_module_init)
 module_exit(fq_module_exit)
 MODULE_AUTHOR("Eric Dumazet");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Fair Queue Packet Scheduler");
index 436160b..459a784 100644 (file)
@@ -721,3 +721,4 @@ module_init(fq_codel_module_init)
 module_exit(fq_codel_module_exit)
 MODULE_AUTHOR("Eric Dumazet");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Fair Queue CoDel discipline");
index be35f03..420ede8 100644 (file)
@@ -721,3 +721,4 @@ module_exit(hhf_module_exit)
 MODULE_AUTHOR("Terry Lam");
 MODULE_AUTHOR("Nandita Dukkipati");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Heavy-Hitter Filter (HHF)");
index 689ef6f..2f1f0a3 100644 (file)
@@ -239,7 +239,7 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
                char haddr[MAX_ADDR_LEN];
 
                neigh_ha_snapshot(haddr, n, dev);
-               err = dev_hard_header(skb, dev, ntohs(tc_skb_protocol(skb)),
+               err = dev_hard_header(skb, dev, ntohs(skb_protocol(skb, false)),
                                      haddr, NULL, skb->len);
 
                if (err < 0)
index 7231513..8d73546 100644 (file)
@@ -1565,12 +1565,15 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
                                     enum sctp_scope scope, gfp_t gfp)
 {
+       struct sock *sk = asoc->base.sk;
        int flags;
 
        /* Use scoping rules to determine the subset of addresses from
         * the endpoint.
         */
-       flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
+       flags = (PF_INET6 == sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
+       if (!inet_v6_ipv6only(sk))
+               flags |= SCTP_ADDR4_ALLOWED;
        if (asoc->peer.ipv4_address)
                flags |= SCTP_ADDR4_PEERSUPP;
        if (asoc->peer.ipv6_address)
index 53bc615..701c5a4 100644 (file)
@@ -461,6 +461,7 @@ static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest,
                 * well as the remote peer.
                 */
                if ((((AF_INET == addr->sa.sa_family) &&
+                     (flags & SCTP_ADDR4_ALLOWED) &&
                      (flags & SCTP_ADDR4_PEERSUPP))) ||
                    (((AF_INET6 == addr->sa.sa_family) &&
                      (flags & SCTP_ADDR6_ALLOWED) &&
index 092d1af..cde29f3 100644 (file)
@@ -148,7 +148,8 @@ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp,
                 * sock as well as the remote peer.
                 */
                if (addr->a.sa.sa_family == AF_INET &&
-                   !(copy_flags & SCTP_ADDR4_PEERSUPP))
+                   (!(copy_flags & SCTP_ADDR4_ALLOWED) ||
+                    !(copy_flags & SCTP_ADDR4_PEERSUPP)))
                        continue;
                if (addr->a.sa.sa_family == AF_INET6 &&
                    (!(copy_flags & SCTP_ADDR6_ALLOWED) ||
index d5627df..779f414 100644 (file)
@@ -27,6 +27,7 @@
 
 #define SMCR_CLC_ACCEPT_CONFIRM_LEN 68
 #define SMCD_CLC_ACCEPT_CONFIRM_LEN 48
+#define SMC_CLC_RECV_BUF_LEN   100
 
 /* eye catcher "SMCR" EBCDIC for CLC messages */
 static const char SMC_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xd9'};
@@ -36,7 +37,7 @@ static const char SMCD_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xc4'};
 /* check if received message has a correct header length and contains valid
  * heading and trailing eyecatchers
  */
-static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
+static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm, bool check_trl)
 {
        struct smc_clc_msg_proposal_prefix *pclc_prfx;
        struct smc_clc_msg_accept_confirm *clc;
@@ -49,12 +50,9 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
                return false;
        switch (clcm->type) {
        case SMC_CLC_PROPOSAL:
-               if (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D &&
-                   clcm->path != SMC_TYPE_B)
-                       return false;
                pclc = (struct smc_clc_msg_proposal *)clcm;
                pclc_prfx = smc_clc_proposal_get_prefix(pclc);
-               if (ntohs(pclc->hdr.length) !=
+               if (ntohs(pclc->hdr.length) <
                        sizeof(*pclc) + ntohs(pclc->iparea_offset) +
                        sizeof(*pclc_prfx) +
                        pclc_prfx->ipv6_prefixes_cnt *
@@ -86,7 +84,8 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
        default:
                return false;
        }
-       if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) &&
+       if (check_trl &&
+           memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) &&
            memcmp(trl->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER)))
                return false;
        return true;
@@ -276,7 +275,8 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
        struct msghdr msg = {NULL, 0};
        int reason_code = 0;
        struct kvec vec = {buf, buflen};
-       int len, datlen;
+       int len, datlen, recvlen;
+       bool check_trl = true;
        int krflags;
 
        /* peek the first few bytes to determine length of data to receive
@@ -320,10 +320,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
        }
        datlen = ntohs(clcm->length);
        if ((len < sizeof(struct smc_clc_msg_hdr)) ||
-           (datlen > buflen) ||
-           (clcm->version != SMC_CLC_V1) ||
-           (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D &&
-            clcm->path != SMC_TYPE_B) ||
+           (clcm->version < SMC_CLC_V1) ||
            ((clcm->type != SMC_CLC_DECLINE) &&
             (clcm->type != expected_type))) {
                smc->sk.sk_err = EPROTO;
@@ -331,16 +328,38 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
                goto out;
        }
 
+       if (clcm->type == SMC_CLC_PROPOSAL && clcm->path == SMC_TYPE_N)
+               reason_code = SMC_CLC_DECL_VERSMISMAT; /* just V2 offered */
+
        /* receive the complete CLC message */
        memset(&msg, 0, sizeof(struct msghdr));
-       iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, datlen);
+       if (datlen > buflen) {
+               check_trl = false;
+               recvlen = buflen;
+       } else {
+               recvlen = datlen;
+       }
+       iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, recvlen);
        krflags = MSG_WAITALL;
        len = sock_recvmsg(smc->clcsock, &msg, krflags);
-       if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) {
+       if (len < recvlen || !smc_clc_msg_hdr_valid(clcm, check_trl)) {
                smc->sk.sk_err = EPROTO;
                reason_code = -EPROTO;
                goto out;
        }
+       datlen -= len;
+       while (datlen) {
+               u8 tmp[SMC_CLC_RECV_BUF_LEN];
+
+               vec.iov_base = &tmp;
+               vec.iov_len = SMC_CLC_RECV_BUF_LEN;
+               /* receive remaining proposal message */
+               recvlen = datlen > SMC_CLC_RECV_BUF_LEN ?
+                                               SMC_CLC_RECV_BUF_LEN : datlen;
+               iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, recvlen);
+               len = sock_recvmsg(smc->clcsock, &msg, krflags);
+               datlen -= len;
+       }
        if (clcm->type == SMC_CLC_DECLINE) {
                struct smc_clc_msg_decline *dclc;
 
index 4658767..76c2b15 100644 (file)
@@ -25,6 +25,7 @@
 #define SMC_CLC_V1             0x1             /* SMC version                */
 #define SMC_TYPE_R             0               /* SMC-R only                 */
 #define SMC_TYPE_D             1               /* SMC-D only                 */
+#define SMC_TYPE_N             2               /* neither SMC-R nor SMC-D    */
 #define SMC_TYPE_B             3               /* SMC-R and SMC-D            */
 #define CLC_WAIT_TIME          (6 * HZ)        /* max. wait time on clcsock  */
 #define CLC_WAIT_TIME_SHORT    HZ              /* short wait time on clcsock */
@@ -46,6 +47,7 @@
 #define SMC_CLC_DECL_ISMVLANERR        0x03090000  /* err to reg vlan id on ism dev  */
 #define SMC_CLC_DECL_NOACTLINK 0x030a0000  /* no active smc-r link in lgr    */
 #define SMC_CLC_DECL_NOSRVLINK 0x030b0000  /* SMC-R link from srv not found  */
+#define SMC_CLC_DECL_VERSMISMAT        0x030c0000  /* SMC version mismatch           */
 #define SMC_CLC_DECL_SYNCERR   0x04000000  /* synchronization error          */
 #define SMC_CLC_DECL_PEERDECL  0x05000000  /* peer declined during handshake */
 #define SMC_CLC_DECL_INTERR    0x09990000  /* internal error                 */
index 7964a21..f69d205 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/workqueue.h>
 #include <linux/wait.h>
 #include <linux/reboot.h>
+#include <linux/mutex.h>
 #include <net/tcp.h>
 #include <net/sock.h>
 #include <rdma/ib_verbs.h>
@@ -247,7 +248,8 @@ static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
                if (smc_link_usable(lnk))
                        lnk->state = SMC_LNK_INACTIVE;
        }
-       wake_up_interruptible_all(&lgr->llc_waiter);
+       wake_up_all(&lgr->llc_msg_waiter);
+       wake_up_all(&lgr->llc_flow_waiter);
 }
 
 static void smc_lgr_free(struct smc_link_group *lgr);
@@ -1130,18 +1132,19 @@ static void smcr_link_up(struct smc_link_group *lgr,
                        return;
                if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
                        /* some other llc task is ongoing */
-                       wait_event_interruptible_timeout(lgr->llc_waiter,
-                               (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
+                       wait_event_timeout(lgr->llc_flow_waiter,
+                               (list_empty(&lgr->list) ||
+                                lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
                                SMC_LLC_WAIT_TIME);
                }
-               if (list_empty(&lgr->list) ||
-                   !smc_ib_port_active(smcibdev, ibport))
-                       return; /* lgr or device no longer active */
-               link = smc_llc_usable_link(lgr);
-               if (!link)
-                       return;
-               smc_llc_send_add_link(link, smcibdev->mac[ibport - 1], gid,
-                                     NULL, SMC_LLC_REQ);
+               /* lgr or device no longer active? */
+               if (!list_empty(&lgr->list) &&
+                   smc_ib_port_active(smcibdev, ibport))
+                       link = smc_llc_usable_link(lgr);
+               if (link)
+                       smc_llc_send_add_link(link, smcibdev->mac[ibport - 1],
+                                             gid, NULL, SMC_LLC_REQ);
+               wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */
        }
 }
 
@@ -1195,13 +1198,17 @@ static void smcr_link_down(struct smc_link *lnk)
                if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
                        /* another llc task is ongoing */
                        mutex_unlock(&lgr->llc_conf_mutex);
-                       wait_event_interruptible_timeout(lgr->llc_waiter,
-                               (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
+                       wait_event_timeout(lgr->llc_flow_waiter,
+                               (list_empty(&lgr->list) ||
+                                lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
                                SMC_LLC_WAIT_TIME);
                        mutex_lock(&lgr->llc_conf_mutex);
                }
-               smc_llc_send_delete_link(to_lnk, del_link_id, SMC_LLC_REQ, true,
-                                        SMC_LLC_DEL_LOST_PATH);
+               if (!list_empty(&lgr->list))
+                       smc_llc_send_delete_link(to_lnk, del_link_id,
+                                                SMC_LLC_REQ, true,
+                                                SMC_LLC_DEL_LOST_PATH);
+               wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */
        }
 }
 
@@ -1262,7 +1269,7 @@ static void smc_link_down_work(struct work_struct *work)
 
        if (list_empty(&lgr->list))
                return;
-       wake_up_interruptible_all(&lgr->llc_waiter);
+       wake_up_all(&lgr->llc_msg_waiter);
        mutex_lock(&lgr->llc_conf_mutex);
        smcr_link_down(link);
        mutex_unlock(&lgr->llc_conf_mutex);
@@ -1955,20 +1962,20 @@ static void smc_core_going_away(void)
        struct smc_ib_device *smcibdev;
        struct smcd_dev *smcd;
 
-       spin_lock(&smc_ib_devices.lock);
+       mutex_lock(&smc_ib_devices.mutex);
        list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
                int i;
 
                for (i = 0; i < SMC_MAX_PORTS; i++)
                        set_bit(i, smcibdev->ports_going_away);
        }
-       spin_unlock(&smc_ib_devices.lock);
+       mutex_unlock(&smc_ib_devices.mutex);
 
-       spin_lock(&smcd_dev_list.lock);
+       mutex_lock(&smcd_dev_list.mutex);
        list_for_each_entry(smcd, &smcd_dev_list.list, list) {
                smcd->going_away = 1;
        }
-       spin_unlock(&smcd_dev_list.lock);
+       mutex_unlock(&smcd_dev_list.mutex);
 }
 
 /* Clean up all SMC link groups */
@@ -1980,10 +1987,10 @@ static void smc_lgrs_shutdown(void)
 
        smc_smcr_terminate_all(NULL);
 
-       spin_lock(&smcd_dev_list.lock);
+       mutex_lock(&smcd_dev_list.mutex);
        list_for_each_entry(smcd, &smcd_dev_list.list, list)
                smc_smcd_terminate_all(smcd);
-       spin_unlock(&smcd_dev_list.lock);
+       mutex_unlock(&smcd_dev_list.mutex);
 }
 
 static int smc_core_reboot_event(struct notifier_block *this,
index 86d160f..c3ff512 100644 (file)
@@ -262,8 +262,10 @@ struct smc_link_group {
                        struct work_struct      llc_del_link_work;
                        struct work_struct      llc_event_work;
                                                /* llc event worker */
-                       wait_queue_head_t       llc_waiter;
+                       wait_queue_head_t       llc_flow_waiter;
                                                /* w4 next llc event */
+                       wait_queue_head_t       llc_msg_waiter;
+                                               /* w4 next llc msg */
                        struct smc_llc_flow     llc_flow_lcl;
                                                /* llc local control field */
                        struct smc_llc_flow     llc_flow_rmt;
index 562a52d..7637fde 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/workqueue.h>
 #include <linux/scatterlist.h>
 #include <linux/wait.h>
+#include <linux/mutex.h>
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_cache.h>
 
@@ -33,7 +34,7 @@
 #define SMC_QP_RNR_RETRY                       7 /* 7: infinite */
 
 struct smc_ib_devices smc_ib_devices = {       /* smc-registered ib devices */
-       .lock = __SPIN_LOCK_UNLOCKED(smc_ib_devices.lock),
+       .mutex = __MUTEX_INITIALIZER(smc_ib_devices.mutex),
        .list = LIST_HEAD_INIT(smc_ib_devices.list),
 };
 
@@ -565,9 +566,9 @@ static int smc_ib_add_dev(struct ib_device *ibdev)
        INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work);
        atomic_set(&smcibdev->lnk_cnt, 0);
        init_waitqueue_head(&smcibdev->lnks_deleted);
-       spin_lock(&smc_ib_devices.lock);
+       mutex_lock(&smc_ib_devices.mutex);
        list_add_tail(&smcibdev->list, &smc_ib_devices.list);
-       spin_unlock(&smc_ib_devices.lock);
+       mutex_unlock(&smc_ib_devices.mutex);
        ib_set_client_data(ibdev, &smc_ib_client, smcibdev);
        INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev,
                              smc_ib_global_event_handler);
@@ -602,9 +603,9 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
 {
        struct smc_ib_device *smcibdev = client_data;
 
-       spin_lock(&smc_ib_devices.lock);
+       mutex_lock(&smc_ib_devices.mutex);
        list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
-       spin_unlock(&smc_ib_devices.lock);
+       mutex_unlock(&smc_ib_devices.mutex);
        pr_warn_ratelimited("smc: removing ib device %s\n",
                            smcibdev->ibdev->name);
        smc_smcr_terminate_all(smcibdev);
index e6a696a..ae6776e 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/interrupt.h>
 #include <linux/if_ether.h>
+#include <linux/mutex.h>
 #include <linux/wait.h>
 #include <rdma/ib_verbs.h>
 #include <net/smc.h>
@@ -25,7 +26,7 @@
 
 struct smc_ib_devices {                        /* list of smc ib devices definition */
        struct list_head        list;
-       spinlock_t              lock;   /* protects list of smc ib devices */
+       struct mutex            mutex;  /* protects list of smc ib devices */
 };
 
 extern struct smc_ib_devices   smc_ib_devices; /* list of smc ib devices */
index 91f85fc..998c525 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/spinlock.h>
+#include <linux/mutex.h>
 #include <linux/slab.h>
 #include <asm/page.h>
 
@@ -17,7 +18,7 @@
 
 struct smcd_dev_list smcd_dev_list = {
        .list = LIST_HEAD_INIT(smcd_dev_list.list),
-       .lock = __SPIN_LOCK_UNLOCKED(smcd_dev_list.lock)
+       .mutex = __MUTEX_INITIALIZER(smcd_dev_list.mutex)
 };
 
 /* Test if an ISM communication is possible. */
@@ -317,9 +318,9 @@ EXPORT_SYMBOL_GPL(smcd_alloc_dev);
 
 int smcd_register_dev(struct smcd_dev *smcd)
 {
-       spin_lock(&smcd_dev_list.lock);
+       mutex_lock(&smcd_dev_list.mutex);
        list_add_tail(&smcd->list, &smcd_dev_list.list);
-       spin_unlock(&smcd_dev_list.lock);
+       mutex_unlock(&smcd_dev_list.mutex);
 
        pr_warn_ratelimited("smc: adding smcd device %s with pnetid %.16s%s\n",
                            dev_name(&smcd->dev), smcd->pnetid,
@@ -333,9 +334,9 @@ void smcd_unregister_dev(struct smcd_dev *smcd)
 {
        pr_warn_ratelimited("smc: removing smcd device %s\n",
                            dev_name(&smcd->dev));
-       spin_lock(&smcd_dev_list.lock);
+       mutex_lock(&smcd_dev_list.mutex);
        list_del_init(&smcd->list);
-       spin_unlock(&smcd_dev_list.lock);
+       mutex_unlock(&smcd_dev_list.mutex);
        smcd->going_away = 1;
        smc_smcd_terminate_all(smcd);
        flush_workqueue(smcd->event_wq);
index 4da946c..81cc453 100644 (file)
 #define SMCD_ISM_H
 
 #include <linux/uio.h>
+#include <linux/mutex.h>
 
 #include "smc.h"
 
 struct smcd_dev_list { /* List of SMCD devices */
        struct list_head list;
-       spinlock_t lock;        /* Protects list of devices */
+       struct mutex mutex;     /* Protects list of devices */
 };
 
 extern struct smcd_dev_list    smcd_dev_list; /* list of smcd devices */
index 391237b..c1a0386 100644 (file)
@@ -186,6 +186,26 @@ static inline void smc_llc_flow_qentry_set(struct smc_llc_flow *flow,
        flow->qentry = qentry;
 }
 
+static void smc_llc_flow_parallel(struct smc_link_group *lgr, u8 flow_type,
+                                 struct smc_llc_qentry *qentry)
+{
+       u8 msg_type = qentry->msg.raw.hdr.common.type;
+
+       if ((msg_type == SMC_LLC_ADD_LINK || msg_type == SMC_LLC_DELETE_LINK) &&
+           flow_type != msg_type && !lgr->delayed_event) {
+               lgr->delayed_event = qentry;
+               return;
+       }
+       /* drop parallel or already-in-progress llc requests */
+       if (flow_type != msg_type)
+               pr_warn_once("smc: SMC-R lg %*phN dropped parallel "
+                            "LLC msg: msg %d flow %d role %d\n",
+                            SMC_LGR_ID_SIZE, &lgr->id,
+                            qentry->msg.raw.hdr.common.type,
+                            flow_type, lgr->role);
+       kfree(qentry);
+}
+
 /* try to start a new llc flow, initiated by an incoming llc msg */
 static bool smc_llc_flow_start(struct smc_llc_flow *flow,
                               struct smc_llc_qentry *qentry)
@@ -195,14 +215,7 @@ static bool smc_llc_flow_start(struct smc_llc_flow *flow,
        spin_lock_bh(&lgr->llc_flow_lock);
        if (flow->type) {
                /* a flow is already active */
-               if ((qentry->msg.raw.hdr.common.type == SMC_LLC_ADD_LINK ||
-                    qentry->msg.raw.hdr.common.type == SMC_LLC_DELETE_LINK) &&
-                   !lgr->delayed_event) {
-                       lgr->delayed_event = qentry;
-               } else {
-                       /* forget this llc request */
-                       kfree(qentry);
-               }
+               smc_llc_flow_parallel(lgr, flow->type, qentry);
                spin_unlock_bh(&lgr->llc_flow_lock);
                return false;
        }
@@ -222,8 +235,8 @@ static bool smc_llc_flow_start(struct smc_llc_flow *flow,
        }
        if (qentry == lgr->delayed_event)
                lgr->delayed_event = NULL;
-       spin_unlock_bh(&lgr->llc_flow_lock);
        smc_llc_flow_qentry_set(flow, qentry);
+       spin_unlock_bh(&lgr->llc_flow_lock);
        return true;
 }
 
@@ -251,11 +264,11 @@ again:
                return 0;
        }
        spin_unlock_bh(&lgr->llc_flow_lock);
-       rc = wait_event_interruptible_timeout(lgr->llc_waiter,
-                       (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
-                        (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
-                         lgr->llc_flow_rmt.type == allowed_remote)),
-                       SMC_LLC_WAIT_TIME);
+       rc = wait_event_timeout(lgr->llc_flow_waiter, (list_empty(&lgr->list) ||
+                               (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
+                                (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
+                                 lgr->llc_flow_rmt.type == allowed_remote))),
+                               SMC_LLC_WAIT_TIME * 10);
        if (!rc)
                return -ETIMEDOUT;
        goto again;
@@ -272,7 +285,7 @@ void smc_llc_flow_stop(struct smc_link_group *lgr, struct smc_llc_flow *flow)
            flow == &lgr->llc_flow_lcl)
                schedule_work(&lgr->llc_event_work);
        else
-               wake_up_interruptible(&lgr->llc_waiter);
+               wake_up(&lgr->llc_flow_waiter);
 }
 
 /* lnk is optional and used for early wakeup when link goes down, useful in
@@ -283,26 +296,32 @@ struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr,
                                    int time_out, u8 exp_msg)
 {
        struct smc_llc_flow *flow = &lgr->llc_flow_lcl;
+       u8 rcv_msg;
 
-       wait_event_interruptible_timeout(lgr->llc_waiter,
-                                        (flow->qentry ||
-                                         (lnk && !smc_link_usable(lnk)) ||
-                                         list_empty(&lgr->list)),
-                                        time_out);
+       wait_event_timeout(lgr->llc_msg_waiter,
+                          (flow->qentry ||
+                           (lnk && !smc_link_usable(lnk)) ||
+                           list_empty(&lgr->list)),
+                          time_out);
        if (!flow->qentry ||
            (lnk && !smc_link_usable(lnk)) || list_empty(&lgr->list)) {
                smc_llc_flow_qentry_del(flow);
                goto out;
        }
-       if (exp_msg && flow->qentry->msg.raw.hdr.common.type != exp_msg) {
+       rcv_msg = flow->qentry->msg.raw.hdr.common.type;
+       if (exp_msg && rcv_msg != exp_msg) {
                if (exp_msg == SMC_LLC_ADD_LINK &&
-                   flow->qentry->msg.raw.hdr.common.type ==
-                   SMC_LLC_DELETE_LINK) {
+                   rcv_msg == SMC_LLC_DELETE_LINK) {
                        /* flow_start will delay the unexpected msg */
                        smc_llc_flow_start(&lgr->llc_flow_lcl,
                                           smc_llc_flow_qentry_clr(flow));
                        return NULL;
                }
+               pr_warn_once("smc: SMC-R lg %*phN dropped unexpected LLC msg: "
+                            "msg %d exp %d flow %d role %d flags %x\n",
+                            SMC_LGR_ID_SIZE, &lgr->id, rcv_msg, exp_msg,
+                            flow->type, lgr->role,
+                            flow->qentry->msg.raw.hdr.flags);
                smc_llc_flow_qentry_del(flow);
        }
 out:
@@ -1222,8 +1241,8 @@ static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr)
        smc_llc_send_message(lnk, &qentry->msg); /* response */
 
        if (smc_link_downing(&lnk_del->state)) {
-               smc_switch_conns(lgr, lnk_del, false);
-               smc_wr_tx_wait_no_pending_sends(lnk_del);
+               if (smc_switch_conns(lgr, lnk_del, false))
+                       smc_wr_tx_wait_no_pending_sends(lnk_del);
        }
        smcr_link_clear(lnk_del, true);
 
@@ -1297,8 +1316,8 @@ static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr)
                goto out; /* asymmetric link already deleted */
 
        if (smc_link_downing(&lnk_del->state)) {
-               smc_switch_conns(lgr, lnk_del, false);
-               smc_wr_tx_wait_no_pending_sends(lnk_del);
+               if (smc_switch_conns(lgr, lnk_del, false))
+                       smc_wr_tx_wait_no_pending_sends(lnk_del);
        }
        if (!list_empty(&lgr->list)) {
                /* qentry is either a request from peer (send it back to
@@ -1459,7 +1478,7 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
                                /* a flow is waiting for this message */
                                smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
                                                        qentry);
-                               wake_up_interruptible(&lgr->llc_waiter);
+                               wake_up(&lgr->llc_msg_waiter);
                        } else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
                                                      qentry)) {
                                schedule_work(&lgr->llc_add_link_work);
@@ -1474,7 +1493,7 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
                if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
                        /* a flow is waiting for this message */
                        smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry);
-                       wake_up_interruptible(&lgr->llc_waiter);
+                       wake_up(&lgr->llc_msg_waiter);
                        return;
                }
                break;
@@ -1485,7 +1504,7 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
                                /* DEL LINK REQ during ADD LINK SEQ */
                                smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
                                                        qentry);
-                               wake_up_interruptible(&lgr->llc_waiter);
+                               wake_up(&lgr->llc_msg_waiter);
                        } else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
                                                      qentry)) {
                                schedule_work(&lgr->llc_del_link_work);
@@ -1496,7 +1515,7 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
                                /* DEL LINK REQ during ADD LINK SEQ */
                                smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
                                                        qentry);
-                               wake_up_interruptible(&lgr->llc_waiter);
+                               wake_up(&lgr->llc_msg_waiter);
                        } else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
                                                      qentry)) {
                                schedule_work(&lgr->llc_del_link_work);
@@ -1581,7 +1600,7 @@ static void smc_llc_rx_response(struct smc_link *link,
        case SMC_LLC_DELETE_RKEY:
                /* assign responses to the local flow, we requested them */
                smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry);
-               wake_up_interruptible(&link->lgr->llc_waiter);
+               wake_up(&link->lgr->llc_msg_waiter);
                return;
        case SMC_LLC_CONFIRM_RKEY_CONT:
                /* not used because max links is 3 */
@@ -1616,7 +1635,7 @@ static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc)
        spin_lock_irqsave(&lgr->llc_event_q_lock, flags);
        list_add_tail(&qentry->list, &lgr->llc_event_q);
        spin_unlock_irqrestore(&lgr->llc_event_q_lock, flags);
-       schedule_work(&link->lgr->llc_event_work);
+       schedule_work(&lgr->llc_event_work);
 }
 
 /* copy received msg and add it to the event queue */
@@ -1677,7 +1696,8 @@ void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc)
        INIT_LIST_HEAD(&lgr->llc_event_q);
        spin_lock_init(&lgr->llc_event_q_lock);
        spin_lock_init(&lgr->llc_flow_lock);
-       init_waitqueue_head(&lgr->llc_waiter);
+       init_waitqueue_head(&lgr->llc_flow_waiter);
+       init_waitqueue_head(&lgr->llc_msg_waiter);
        mutex_init(&lgr->llc_conf_mutex);
        lgr->llc_testlink_time = net->ipv4.sysctl_tcp_keepalive_time;
 }
@@ -1686,7 +1706,8 @@ void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc)
 void smc_llc_lgr_clear(struct smc_link_group *lgr)
 {
        smc_llc_event_flush(lgr);
-       wake_up_interruptible_all(&lgr->llc_waiter);
+       wake_up_all(&lgr->llc_flow_waiter);
+       wake_up_all(&lgr->llc_msg_waiter);
        cancel_work_sync(&lgr->llc_event_work);
        cancel_work_sync(&lgr->llc_add_link_work);
        cancel_work_sync(&lgr->llc_del_link_work);
index 014d91b..30e5fac 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/module.h>
 #include <linux/list.h>
 #include <linux/ctype.h>
+#include <linux/mutex.h>
 #include <net/netlink.h>
 #include <net/genetlink.h>
 
@@ -129,7 +130,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
                return rc;
 
        /* remove ib devices */
-       spin_lock(&smc_ib_devices.lock);
+       mutex_lock(&smc_ib_devices.mutex);
        list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
                for (ibport = 0; ibport < SMC_MAX_PORTS; ibport++) {
                        if (ibdev->pnetid_by_user[ibport] &&
@@ -149,9 +150,9 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
                        }
                }
        }
-       spin_unlock(&smc_ib_devices.lock);
+       mutex_unlock(&smc_ib_devices.mutex);
        /* remove smcd devices */
-       spin_lock(&smcd_dev_list.lock);
+       mutex_lock(&smcd_dev_list.mutex);
        list_for_each_entry(smcd_dev, &smcd_dev_list.list, list) {
                if (smcd_dev->pnetid_by_user &&
                    (!pnet_name ||
@@ -165,7 +166,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
                        rc = 0;
                }
        }
-       spin_unlock(&smcd_dev_list.lock);
+       mutex_unlock(&smcd_dev_list.mutex);
        return rc;
 }
 
@@ -240,14 +241,14 @@ static bool smc_pnet_apply_ib(struct smc_ib_device *ib_dev, u8 ib_port,
        u8 pnet_null[SMC_MAX_PNETID_LEN] = {0};
        bool applied = false;
 
-       spin_lock(&smc_ib_devices.lock);
+       mutex_lock(&smc_ib_devices.mutex);
        if (smc_pnet_match(ib_dev->pnetid[ib_port - 1], pnet_null)) {
                memcpy(ib_dev->pnetid[ib_port - 1], pnet_name,
                       SMC_MAX_PNETID_LEN);
                ib_dev->pnetid_by_user[ib_port - 1] = true;
                applied = true;
        }
-       spin_unlock(&smc_ib_devices.lock);
+       mutex_unlock(&smc_ib_devices.mutex);
        return applied;
 }
 
@@ -258,13 +259,13 @@ static bool smc_pnet_apply_smcd(struct smcd_dev *smcd_dev, char *pnet_name)
        u8 pnet_null[SMC_MAX_PNETID_LEN] = {0};
        bool applied = false;
 
-       spin_lock(&smcd_dev_list.lock);
+       mutex_lock(&smcd_dev_list.mutex);
        if (smc_pnet_match(smcd_dev->pnetid, pnet_null)) {
                memcpy(smcd_dev->pnetid, pnet_name, SMC_MAX_PNETID_LEN);
                smcd_dev->pnetid_by_user = true;
                applied = true;
        }
-       spin_unlock(&smcd_dev_list.lock);
+       mutex_unlock(&smcd_dev_list.mutex);
        return applied;
 }
 
@@ -300,7 +301,7 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name)
 {
        struct smc_ib_device *ibdev;
 
-       spin_lock(&smc_ib_devices.lock);
+       mutex_lock(&smc_ib_devices.mutex);
        list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
                if (!strncmp(ibdev->ibdev->name, ib_name,
                             sizeof(ibdev->ibdev->name)) ||
@@ -311,7 +312,7 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name)
        }
        ibdev = NULL;
 out:
-       spin_unlock(&smc_ib_devices.lock);
+       mutex_unlock(&smc_ib_devices.mutex);
        return ibdev;
 }
 
@@ -320,7 +321,7 @@ static struct smcd_dev *smc_pnet_find_smcd(char *smcd_name)
 {
        struct smcd_dev *smcd_dev;
 
-       spin_lock(&smcd_dev_list.lock);
+       mutex_lock(&smcd_dev_list.mutex);
        list_for_each_entry(smcd_dev, &smcd_dev_list.list, list) {
                if (!strncmp(dev_name(&smcd_dev->dev), smcd_name,
                             IB_DEVICE_NAME_MAX - 1))
@@ -328,7 +329,7 @@ static struct smcd_dev *smc_pnet_find_smcd(char *smcd_name)
        }
        smcd_dev = NULL;
 out:
-       spin_unlock(&smcd_dev_list.lock);
+       mutex_unlock(&smcd_dev_list.mutex);
        return smcd_dev;
 }
 
@@ -825,7 +826,7 @@ static void _smc_pnet_find_roce_by_pnetid(u8 *pnet_id,
        int i;
 
        ini->ib_dev = NULL;
-       spin_lock(&smc_ib_devices.lock);
+       mutex_lock(&smc_ib_devices.mutex);
        list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
                if (ibdev == known_dev)
                        continue;
@@ -844,7 +845,7 @@ static void _smc_pnet_find_roce_by_pnetid(u8 *pnet_id,
                }
        }
 out:
-       spin_unlock(&smc_ib_devices.lock);
+       mutex_unlock(&smc_ib_devices.mutex);
 }
 
 /* find alternate roce device with same pnet_id and vlan_id */
@@ -863,7 +864,7 @@ static void smc_pnet_find_rdma_dev(struct net_device *netdev,
 {
        struct smc_ib_device *ibdev;
 
-       spin_lock(&smc_ib_devices.lock);
+       mutex_lock(&smc_ib_devices.mutex);
        list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
                struct net_device *ndev;
                int i;
@@ -888,7 +889,7 @@ static void smc_pnet_find_rdma_dev(struct net_device *netdev,
                        }
                }
        }
-       spin_unlock(&smc_ib_devices.lock);
+       mutex_unlock(&smc_ib_devices.mutex);
 }
 
 /* Determine the corresponding IB device port based on the hardware PNETID.
@@ -924,7 +925,7 @@ static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev,
            smc_pnet_find_ndev_pnetid_by_table(ndev, ndev_pnetid))
                return; /* pnetid could not be determined */
 
-       spin_lock(&smcd_dev_list.lock);
+       mutex_lock(&smcd_dev_list.mutex);
        list_for_each_entry(ismdev, &smcd_dev_list.list, list) {
                if (smc_pnet_match(ismdev->pnetid, ndev_pnetid) &&
                    !ismdev->going_away) {
@@ -932,7 +933,7 @@ static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev,
                        break;
                }
        }
-       spin_unlock(&smcd_dev_list.lock);
+       mutex_unlock(&smcd_dev_list.mutex);
 }
 
 /* PNET table analysis for a given sock:
index 7239ba9..1e23cdd 100644 (file)
@@ -169,6 +169,8 @@ void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
 static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
 {
        *idx = link->wr_tx_cnt;
+       if (!smc_link_usable(link))
+               return -ENOLINK;
        for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) {
                if (!test_and_set_bit(*idx, link->wr_tx_mask))
                        return 0;
@@ -560,15 +562,15 @@ void smc_wr_free_link(struct smc_link *lnk)
 {
        struct ib_device *ibdev;
 
+       if (!lnk->smcibdev)
+               return;
+       ibdev = lnk->smcibdev->ibdev;
+
        if (smc_wr_tx_wait_no_pending_sends(lnk))
                memset(lnk->wr_tx_mask, 0,
                       BITS_TO_LONGS(SMC_WR_BUF_CNT) *
                                                sizeof(*lnk->wr_tx_mask));
 
-       if (!lnk->smcibdev)
-               return;
-       ibdev = lnk->smcibdev->ibdev;
-
        if (lnk->wr_rx_dma_addr) {
                ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
                                    SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
index 39e14d5..e9d0953 100644 (file)
@@ -1317,6 +1317,7 @@ rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data)
        q.len = strlen(gssd_dummy_clnt_dir[0].name);
        clnt_dentry = d_hash_and_lookup(gssd_dentry, &q);
        if (!clnt_dentry) {
+               __rpc_depopulate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1);
                pipe_dentry = ERR_PTR(-ENOENT);
                goto out;
        }
index 5c4ec93..c537272 100644 (file)
@@ -44,6 +44,7 @@
 #include <net/tcp.h>
 #include <net/tcp_states.h>
 #include <linux/uaccess.h>
+#include <linux/highmem.h>
 #include <asm/ioctls.h>
 
 #include <linux/sunrpc/types.h>
index 6f7d82f..be11d67 100644 (file)
@@ -1118,6 +1118,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
                base = 0;
        } else {
                base -= buf->head[0].iov_len;
+               subbuf->head[0].iov_base = buf->head[0].iov_base;
                subbuf->head[0].iov_len = 0;
        }
 
@@ -1130,6 +1131,8 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
                base = 0;
        } else {
                base -= buf->page_len;
+               subbuf->pages = buf->pages;
+               subbuf->page_base = 0;
                subbuf->page_len = 0;
        }
 
@@ -1141,6 +1144,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
                base = 0;
        } else {
                base -= buf->tail[0].iov_len;
+               subbuf->tail[0].iov_base = buf->tail[0].iov_base;
                subbuf->tail[0].iov_len = 0;
        }
 
index ef99788..b647562 100644 (file)
@@ -367,7 +367,7 @@ static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
        trace_xprtrdma_wc_fastreg(wc, frwr);
        /* The MR will get recycled when the associated req is retransmitted */
 
-       rpcrdma_flush_disconnect(cq, wc);
+       rpcrdma_flush_disconnect(cq->cq_context, wc);
 }
 
 /**
@@ -452,7 +452,7 @@ static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
        trace_xprtrdma_wc_li(wc, frwr);
        __frwr_release_mr(wc, mr);
 
-       rpcrdma_flush_disconnect(cq, wc);
+       rpcrdma_flush_disconnect(cq->cq_context, wc);
 }
 
 /**
@@ -474,7 +474,7 @@ static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
        __frwr_release_mr(wc, mr);
        complete(&frwr->fr_linv_done);
 
-       rpcrdma_flush_disconnect(cq, wc);
+       rpcrdma_flush_disconnect(cq->cq_context, wc);
 }
 
 /**
@@ -582,7 +582,7 @@ static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc)
        smp_rmb();
        rpcrdma_complete_rqst(rep);
 
-       rpcrdma_flush_disconnect(cq, wc);
+       rpcrdma_flush_disconnect(cq->cq_context, wc);
 }
 
 /**
index 2081c8f..453bacc 100644 (file)
@@ -71,7 +71,7 @@ static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
        size = RPCRDMA_HDRLEN_MIN;
 
        /* Maximum Read list size */
-       size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
+       size += maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
 
        /* Minimal Read chunk size */
        size += sizeof(__be32); /* segment count */
@@ -94,7 +94,7 @@ static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
        size = RPCRDMA_HDRLEN_MIN;
 
        /* Maximum Write list size */
-       size = sizeof(__be32);          /* segment count */
+       size += sizeof(__be32);         /* segment count */
        size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
        size += sizeof(__be32); /* list discriminator */
 
@@ -1349,8 +1349,7 @@ rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
                        be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
        }
 
-       r_xprt->rx_stats.bad_reply_count++;
-       return -EREMOTEIO;
+       return -EIO;
 }
 
 /* Perform XID lookup, reconstruction of the RPC reply, and
@@ -1387,13 +1386,11 @@ out:
        spin_unlock(&xprt->queue_lock);
        return;
 
-/* If the incoming reply terminated a pending RPC, the next
- * RPC call will post a replacement receive buffer as it is
- * being marshaled.
- */
 out_badheader:
        trace_xprtrdma_reply_hdr(rep);
        r_xprt->rx_stats.bad_reply_count++;
+       rqst->rq_task->tk_status = status;
+       status = 0;
        goto out;
 }
 
index 0c4af7f..053c8ab 100644 (file)
@@ -242,13 +242,18 @@ xprt_rdma_connect_worker(struct work_struct *work)
 
        rc = rpcrdma_xprt_connect(r_xprt);
        xprt_clear_connecting(xprt);
-       if (r_xprt->rx_ep && r_xprt->rx_ep->re_connect_status > 0) {
+       if (!rc) {
                xprt->connect_cookie++;
                xprt->stat.connect_count++;
                xprt->stat.connect_time += (long)jiffies -
                                           xprt->stat.connect_start;
                xprt_set_connected(xprt);
                rc = -EAGAIN;
+       } else {
+               /* Force a call to xprt_rdma_close to clean up */
+               spin_lock(&xprt->transport_lock);
+               set_bit(XPRT_CLOSE_WAIT, &xprt->state);
+               spin_unlock(&xprt->transport_lock);
        }
        xprt_wake_pending_tasks(xprt, rc);
 }
index 2ae3483..75c6467 100644 (file)
@@ -84,7 +84,8 @@ static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep);
 static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt);
 static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
 static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt);
-static int rpcrdma_ep_destroy(struct rpcrdma_ep *ep);
+static void rpcrdma_ep_get(struct rpcrdma_ep *ep);
+static int rpcrdma_ep_put(struct rpcrdma_ep *ep);
 static struct rpcrdma_regbuf *
 rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
                     gfp_t flags);
@@ -97,7 +98,8 @@ static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb);
  */
 static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
 {
-       struct rdma_cm_id *id = r_xprt->rx_ep->re_id;
+       struct rpcrdma_ep *ep = r_xprt->rx_ep;
+       struct rdma_cm_id *id = ep->re_id;
 
        /* Flush Receives, then wait for deferred Reply work
         * to complete.
@@ -108,6 +110,8 @@ static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
         * local invalidations.
         */
        ib_drain_sq(id->qp);
+
+       rpcrdma_ep_put(ep);
 }
 
 /**
@@ -126,23 +130,27 @@ static void rpcrdma_qp_event_handler(struct ib_event *event, void *context)
        trace_xprtrdma_qp_event(ep, event);
 }
 
+/* Ensure xprt_force_disconnect() is invoked exactly once when a
+ * connection is closed or lost. (The important thing is it needs
+ * to be invoked "at least" once).
+ */
+static void rpcrdma_force_disconnect(struct rpcrdma_ep *ep)
+{
+       if (atomic_add_unless(&ep->re_force_disconnect, 1, 1))
+               xprt_force_disconnect(ep->re_xprt);
+}
+
 /**
  * rpcrdma_flush_disconnect - Disconnect on flushed completion
- * @cq: completion queue
+ * @r_xprt: transport to disconnect
  * @wc: work completion entry
  *
  * Must be called in process context.
  */
-void rpcrdma_flush_disconnect(struct ib_cq *cq, struct ib_wc *wc)
+void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc)
 {
-       struct rpcrdma_xprt *r_xprt = cq->cq_context;
-       struct rpc_xprt *xprt = &r_xprt->rx_xprt;
-
-       if (wc->status != IB_WC_SUCCESS &&
-           r_xprt->rx_ep->re_connect_status == 1) {
-               r_xprt->rx_ep->re_connect_status = -ECONNABORTED;
-               xprt_force_disconnect(xprt);
-       }
+       if (wc->status != IB_WC_SUCCESS)
+               rpcrdma_force_disconnect(r_xprt->rx_ep);
 }
 
 /**
@@ -156,11 +164,12 @@ static void rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
        struct ib_cqe *cqe = wc->wr_cqe;
        struct rpcrdma_sendctx *sc =
                container_of(cqe, struct rpcrdma_sendctx, sc_cqe);
+       struct rpcrdma_xprt *r_xprt = cq->cq_context;
 
        /* WARNING: Only wr_cqe and status are reliable at this point */
        trace_xprtrdma_wc_send(sc, wc);
-       rpcrdma_sendctx_put_locked((struct rpcrdma_xprt *)cq->cq_context, sc);
-       rpcrdma_flush_disconnect(cq, wc);
+       rpcrdma_sendctx_put_locked(r_xprt, sc);
+       rpcrdma_flush_disconnect(r_xprt, wc);
 }
 
 /**
@@ -195,7 +204,7 @@ static void rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
        return;
 
 out_flushed:
-       rpcrdma_flush_disconnect(cq, wc);
+       rpcrdma_flush_disconnect(r_xprt, wc);
        rpcrdma_rep_destroy(rep);
 }
 
@@ -239,7 +248,6 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
 {
        struct sockaddr *sap = (struct sockaddr *)&id->route.addr.dst_addr;
        struct rpcrdma_ep *ep = id->context;
-       struct rpc_xprt *xprt = ep->re_xprt;
 
        might_sleep();
 
@@ -263,10 +271,9 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
                /* fall through */
        case RDMA_CM_EVENT_ADDR_CHANGE:
                ep->re_connect_status = -ENODEV;
-               xprt_force_disconnect(xprt);
                goto disconnected;
        case RDMA_CM_EVENT_ESTABLISHED:
-               kref_get(&ep->re_kref);
+               rpcrdma_ep_get(ep);
                ep->re_connect_status = 1;
                rpcrdma_update_cm_private(ep, &event->param.conn);
                trace_xprtrdma_inline_thresh(ep);
@@ -274,22 +281,24 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
                break;
        case RDMA_CM_EVENT_CONNECT_ERROR:
                ep->re_connect_status = -ENOTCONN;
-               goto disconnected;
+               goto wake_connect_worker;
        case RDMA_CM_EVENT_UNREACHABLE:
                ep->re_connect_status = -ENETUNREACH;
-               goto disconnected;
+               goto wake_connect_worker;
        case RDMA_CM_EVENT_REJECTED:
                dprintk("rpcrdma: connection to %pISpc rejected: %s\n",
                        sap, rdma_reject_msg(id, event->status));
                ep->re_connect_status = -ECONNREFUSED;
                if (event->status == IB_CM_REJ_STALE_CONN)
-                       ep->re_connect_status = -EAGAIN;
-               goto disconnected;
+                       ep->re_connect_status = -ENOTCONN;
+wake_connect_worker:
+               wake_up_all(&ep->re_connect_wait);
+               return 0;
        case RDMA_CM_EVENT_DISCONNECTED:
                ep->re_connect_status = -ECONNABORTED;
 disconnected:
-               xprt_force_disconnect(xprt);
-               return rpcrdma_ep_destroy(ep);
+               rpcrdma_force_disconnect(ep);
+               return rpcrdma_ep_put(ep);
        default:
                break;
        }
@@ -345,7 +354,7 @@ out:
        return ERR_PTR(rc);
 }
 
-static void rpcrdma_ep_put(struct kref *kref)
+static void rpcrdma_ep_destroy(struct kref *kref)
 {
        struct rpcrdma_ep *ep = container_of(kref, struct rpcrdma_ep, re_kref);
 
@@ -369,13 +378,18 @@ static void rpcrdma_ep_put(struct kref *kref)
        module_put(THIS_MODULE);
 }
 
+static noinline void rpcrdma_ep_get(struct rpcrdma_ep *ep)
+{
+       kref_get(&ep->re_kref);
+}
+
 /* Returns:
  *     %0 if @ep still has a positive kref count, or
  *     %1 if @ep was destroyed successfully.
  */
-static int rpcrdma_ep_destroy(struct rpcrdma_ep *ep)
+static noinline int rpcrdma_ep_put(struct rpcrdma_ep *ep)
 {
-       return kref_put(&ep->re_kref, rpcrdma_ep_put);
+       return kref_put(&ep->re_kref, rpcrdma_ep_destroy);
 }
 
 static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
@@ -388,14 +402,14 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
 
        ep = kzalloc(sizeof(*ep), GFP_NOFS);
        if (!ep)
-               return -EAGAIN;
+               return -ENOTCONN;
        ep->re_xprt = &r_xprt->rx_xprt;
        kref_init(&ep->re_kref);
 
        id = rpcrdma_create_id(r_xprt, ep);
        if (IS_ERR(id)) {
-               rc = PTR_ERR(id);
-               goto out_free;
+               kfree(ep);
+               return PTR_ERR(id);
        }
        __module_get(THIS_MODULE);
        device = id->device;
@@ -492,11 +506,8 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
        return 0;
 
 out_destroy:
-       rpcrdma_ep_destroy(ep);
+       rpcrdma_ep_put(ep);
        rdma_destroy_id(id);
-out_free:
-       kfree(ep);
-       r_xprt->rx_ep = NULL;
        return rc;
 }
 
@@ -512,22 +523,19 @@ int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt)
        struct rpcrdma_ep *ep;
        int rc;
 
-retry:
-       rpcrdma_xprt_disconnect(r_xprt);
        rc = rpcrdma_ep_create(r_xprt);
        if (rc)
                return rc;
        ep = r_xprt->rx_ep;
 
-       ep->re_connect_status = 0;
        xprt_clear_connected(xprt);
-
        rpcrdma_reset_cwnd(r_xprt);
-       rpcrdma_post_recvs(r_xprt, true);
 
-       rc = rpcrdma_sendctxs_create(r_xprt);
-       if (rc)
-               goto out;
+       /* Bump the ep's reference count while there are
+        * outstanding Receives.
+        */
+       rpcrdma_ep_get(ep);
+       rpcrdma_post_recvs(r_xprt, true);
 
        rc = rdma_connect(ep->re_id, &ep->re_remote_cma);
        if (rc)
@@ -538,22 +546,24 @@ retry:
        wait_event_interruptible(ep->re_connect_wait,
                                 ep->re_connect_status != 0);
        if (ep->re_connect_status <= 0) {
-               if (ep->re_connect_status == -EAGAIN)
-                       goto retry;
                rc = ep->re_connect_status;
                goto out;
        }
 
+       rc = rpcrdma_sendctxs_create(r_xprt);
+       if (rc) {
+               rc = -ENOTCONN;
+               goto out;
+       }
+
        rc = rpcrdma_reqs_setup(r_xprt);
        if (rc) {
-               rpcrdma_xprt_disconnect(r_xprt);
+               rc = -ENOTCONN;
                goto out;
        }
        rpcrdma_mrs_create(r_xprt);
 
 out:
-       if (rc)
-               ep->re_connect_status = rc;
        trace_xprtrdma_connect(r_xprt, rc);
        return rc;
 }
@@ -587,7 +597,7 @@ void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt)
        rpcrdma_mrs_destroy(r_xprt);
        rpcrdma_sendctxs_destroy(r_xprt);
 
-       if (rpcrdma_ep_destroy(ep))
+       if (rpcrdma_ep_put(ep))
                rdma_destroy_id(id);
 
        r_xprt->rx_ep = NULL;
index 0a16fdb..43974ef 100644 (file)
@@ -82,6 +82,7 @@ struct rpcrdma_ep {
        unsigned int            re_max_inline_recv;
        int                     re_async_rc;
        int                     re_connect_status;
+       atomic_t                re_force_disconnect;
        struct ib_qp_init_attr  re_attr;
        wait_queue_head_t       re_connect_wait;
        struct rpc_xprt         *re_xprt;
@@ -446,7 +447,7 @@ extern unsigned int xprt_rdma_memreg_strategy;
 /*
  * Endpoint calls - xprtrdma/verbs.c
  */
-void rpcrdma_flush_disconnect(struct ib_cq *cq, struct ib_wc *wc);
+void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc);
 int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt);
 void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt);
 
index ee3b8d0..263d950 100644 (file)
@@ -921,6 +921,21 @@ static void link_prepare_wakeup(struct tipc_link *l)
 
 }
 
+/**
+ * tipc_link_set_skb_retransmit_time - set the time at which retransmission of
+ *                                     the given skb should be next attempted
+ * @skb: skb to set a future retransmission time for
+ * @l: link the skb will be transmitted on
+ */
+static void tipc_link_set_skb_retransmit_time(struct sk_buff *skb,
+                                             struct tipc_link *l)
+{
+       if (link_is_bc_sndlink(l))
+               TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
+       else
+               TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
+}
+
 void tipc_link_reset(struct tipc_link *l)
 {
        struct sk_buff_head list;
@@ -1036,9 +1051,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
                                return -ENOBUFS;
                        }
                        __skb_queue_tail(transmq, skb);
-                       /* next retransmit attempt */
-                       if (link_is_bc_sndlink(l))
-                               TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
+                       tipc_link_set_skb_retransmit_time(skb, l);
                        __skb_queue_tail(xmitq, _skb);
                        TIPC_SKB_CB(skb)->ackers = l->ackers;
                        l->rcv_unacked = 0;
@@ -1139,9 +1152,7 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
                if (unlikely(skb == l->backlog[imp].target_bskb))
                        l->backlog[imp].target_bskb = NULL;
                __skb_queue_tail(&l->transmq, skb);
-               /* next retransmit attempt */
-               if (link_is_bc_sndlink(l))
-                       TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
+               tipc_link_set_skb_retransmit_time(skb, l);
 
                __skb_queue_tail(xmitq, _skb);
                TIPC_SKB_CB(skb)->ackers = l->ackers;
@@ -1584,8 +1595,7 @@ release:
                        /* retransmit skb if unrestricted*/
                        if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
                                continue;
-                       TIPC_SKB_CB(skb)->nxt_retr = (is_uc) ?
-                                       TIPC_UC_RETR_TIME : TIPC_BC_RETR_LIM;
+                       tipc_link_set_skb_retransmit_time(skb, l);
                        _skb = pskb_copy(skb, GFP_ATOMIC);
                        if (!_skb)
                                continue;
index 263ae39..0e07fb8 100644 (file)
@@ -5016,7 +5016,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
                err = nl80211_parse_he_obss_pd(
                                        info->attrs[NL80211_ATTR_HE_OBSS_PD],
                                        &params.he_obss_pd);
-               goto out;
+               if (err)
+                       goto out;
        }
 
        if (info->attrs[NL80211_ATTR_HE_BSS_COLOR]) {
@@ -5024,7 +5025,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
                                        info->attrs[NL80211_ATTR_HE_BSS_COLOR],
                                        &params.he_bss_color);
                if (err)
-                       return err;
+                       goto out;
        }
 
        nl80211_calculate_ap_params(&params);
index 540ed75..08b8066 100644 (file)
@@ -2,9 +2,6 @@
 
 #include <net/xsk_buff_pool.h>
 #include <net/xdp_sock.h>
-#include <linux/dma-direct.h>
-#include <linux/dma-noncoherent.h>
-#include <linux/swiotlb.h>
 
 #include "xsk_queue.h"
 
@@ -55,7 +52,6 @@ struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks,
        pool->free_heads_cnt = chunks;
        pool->headroom = headroom;
        pool->chunk_size = chunk_size;
-       pool->cheap_dma = true;
        pool->unaligned = unaligned;
        pool->frame_len = chunk_size - headroom - XDP_PACKET_HEADROOM;
        INIT_LIST_HEAD(&pool->free_list);
@@ -125,48 +121,6 @@ static void xp_check_dma_contiguity(struct xsk_buff_pool *pool)
        }
 }
 
-static bool __maybe_unused xp_check_swiotlb_dma(struct xsk_buff_pool *pool)
-{
-#if defined(CONFIG_SWIOTLB)
-       phys_addr_t paddr;
-       u32 i;
-
-       for (i = 0; i < pool->dma_pages_cnt; i++) {
-               paddr = dma_to_phys(pool->dev, pool->dma_pages[i]);
-               if (is_swiotlb_buffer(paddr))
-                       return false;
-       }
-#endif
-       return true;
-}
-
-static bool xp_check_cheap_dma(struct xsk_buff_pool *pool)
-{
-#if defined(CONFIG_HAS_DMA)
-       const struct dma_map_ops *ops = get_dma_ops(pool->dev);
-
-       if (ops) {
-               return !ops->sync_single_for_cpu &&
-                       !ops->sync_single_for_device;
-       }
-
-       if (!dma_is_direct(ops))
-               return false;
-
-       if (!xp_check_swiotlb_dma(pool))
-               return false;
-
-       if (!dev_is_dma_coherent(pool->dev)) {
-#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) ||               \
-       defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) ||        \
-       defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE)
-               return false;
-#endif
-       }
-#endif
-       return true;
-}
-
 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
               unsigned long attrs, struct page **pages, u32 nr_pages)
 {
@@ -180,6 +134,7 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
 
        pool->dev = dev;
        pool->dma_pages_cnt = nr_pages;
+       pool->dma_need_sync = false;
 
        for (i = 0; i < pool->dma_pages_cnt; i++) {
                dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
@@ -188,14 +143,13 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
                        xp_dma_unmap(pool, attrs);
                        return -ENOMEM;
                }
+               if (dma_need_sync(dev, dma))
+                       pool->dma_need_sync = true;
                pool->dma_pages[i] = dma;
        }
 
        if (pool->unaligned)
                xp_check_dma_contiguity(pool);
-
-       pool->dev = dev;
-       pool->cheap_dma = xp_check_cheap_dma(pool);
        return 0;
 }
 EXPORT_SYMBOL(xp_dma_map);
@@ -280,7 +234,7 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
        xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
        xskb->xdp.data_meta = xskb->xdp.data;
 
-       if (!pool->cheap_dma) {
+       if (pool->dma_need_sync) {
                dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
                                                 pool->frame_len,
                                                 DMA_BIDIRECTIONAL);
index b5d4a1e..5b9a5ab 100644 (file)
@@ -67,6 +67,30 @@ config XFRM_STATISTICS
 
          If unsure, say N.
 
+# This option selects XFRM_ALGO along with the AH authentication algorithms that
+# RFC 8221 lists as MUST be implemented.
+config XFRM_AH
+       tristate
+       select XFRM_ALGO
+       select CRYPTO
+       select CRYPTO_HMAC
+       select CRYPTO_SHA256
+
+# This option selects XFRM_ALGO along with the ESP encryption and authentication
+# algorithms that RFC 8221 lists as MUST be implemented.
+config XFRM_ESP
+       tristate
+       select XFRM_ALGO
+       select CRYPTO
+       select CRYPTO_AES
+       select CRYPTO_AUTHENC
+       select CRYPTO_CBC
+       select CRYPTO_ECHAINIV
+       select CRYPTO_GCM
+       select CRYPTO_HMAC
+       select CRYPTO_SEQIV
+       select CRYPTO_SHA256
+
 config XFRM_IPCOMP
        tristate
        select XFRM_ALGO
index f50d1f9..626096b 100644 (file)
@@ -108,7 +108,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
        struct xfrm_offload *xo = xfrm_offload(skb);
        struct sec_path *sp;
 
-       if (!xo)
+       if (!xo || (xo->flags & XFRM_XMIT))
                return skb;
 
        if (!(features & NETIF_F_HW_ESP))
@@ -129,6 +129,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
                return skb;
        }
 
+       xo->flags |= XFRM_XMIT;
+
        if (skb_is_gso(skb)) {
                struct net_device *dev = skb->dev;
 
index c407ecb..b615729 100644 (file)
@@ -37,6 +37,7 @@
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
+#include <net/ip_tunnels.h>
 #include <net/addrconf.h>
 #include <net/xfrm.h>
 #include <net/net_namespace.h>
@@ -581,6 +582,7 @@ static const struct net_device_ops xfrmi_netdev_ops = {
 static void xfrmi_dev_setup(struct net_device *dev)
 {
        dev->netdev_ops         = &xfrmi_netdev_ops;
+       dev->header_ops         = &ip_tunnel_header_ops;
        dev->type               = ARPHRD_NONE;
        dev->mtu                = ETH_DATA_LEN;
        dev->min_mtu            = ETH_MIN_MTU;
index e4c23f6..a7ab193 100644 (file)
@@ -574,16 +574,12 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
        switch (x->outer_mode.family) {
        case AF_INET:
                memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
-#ifdef CONFIG_NETFILTER
                IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
-#endif
                break;
        case AF_INET6:
                memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
 
-#ifdef CONFIG_NETFILTER
                IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
-#endif
                break;
        }
 
index dd558cb..ef53b93 100644 (file)
@@ -509,11 +509,8 @@ static void *alloc_rec_per_cpu(int record_size)
 {
        unsigned int nr_cpus = bpf_num_possible_cpus();
        void *array;
-       size_t size;
 
-       size = record_size * nr_cpus;
-       array = malloc(size);
-       memset(array, 0, size);
+       array = calloc(nr_cpus, record_size);
        if (!array) {
                fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
                exit(EXIT_FAIL_MEM);
@@ -528,8 +525,7 @@ static struct stats_record *alloc_stats_record(void)
        int i;
 
        /* Alloc main stats_record structure */
-       rec = malloc(sizeof(*rec));
-       memset(rec, 0, sizeof(*rec));
+       rec = calloc(1, sizeof(*rec));
        if (!rec) {
                fprintf(stderr, "Mem alloc error\n");
                exit(EXIT_FAIL_MEM);
index f346816..f4e755e 100644 (file)
@@ -207,11 +207,8 @@ static struct datarec *alloc_record_per_cpu(void)
 {
        unsigned int nr_cpus = bpf_num_possible_cpus();
        struct datarec *array;
-       size_t size;
 
-       size = sizeof(struct datarec) * nr_cpus;
-       array = malloc(size);
-       memset(array, 0, size);
+       array = calloc(nr_cpus, sizeof(struct datarec));
        if (!array) {
                fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
                exit(EXIT_FAIL_MEM);
@@ -226,11 +223,11 @@ static struct stats_record *alloc_stats_record(void)
 
        size = sizeof(*rec) + n_cpus * sizeof(struct record);
        rec = malloc(size);
-       memset(rec, 0, size);
        if (!rec) {
                fprintf(stderr, "Mem alloc error\n");
                exit(EXIT_FAIL_MEM);
        }
+       memset(rec, 0, size);
        rec->rx_cnt.cpu    = alloc_record_per_cpu();
        rec->redir_err.cpu = alloc_record_per_cpu();
        rec->kthread.cpu   = alloc_record_per_cpu();
index 4fe4750..caa4e7f 100644 (file)
@@ -198,11 +198,8 @@ static struct datarec *alloc_record_per_cpu(void)
 {
        unsigned int nr_cpus = bpf_num_possible_cpus();
        struct datarec *array;
-       size_t size;
 
-       size = sizeof(struct datarec) * nr_cpus;
-       array = malloc(size);
-       memset(array, 0, size);
+       array = calloc(nr_cpus, sizeof(struct datarec));
        if (!array) {
                fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
                exit(EXIT_FAIL_MEM);
@@ -214,11 +211,8 @@ static struct record *alloc_record_per_rxq(void)
 {
        unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries;
        struct record *array;
-       size_t size;
 
-       size = sizeof(struct record) * nr_rxqs;
-       array = malloc(size);
-       memset(array, 0, size);
+       array = calloc(nr_rxqs, sizeof(struct record));
        if (!array) {
                fprintf(stderr, "Mem alloc error (nr_rxqs:%u)\n", nr_rxqs);
                exit(EXIT_FAIL_MEM);
@@ -232,8 +226,7 @@ static struct stats_record *alloc_stats_record(void)
        struct stats_record *rec;
        int i;
 
-       rec = malloc(sizeof(*rec));
-       memset(rec, 0, sizeof(*rec));
+       rec = calloc(1, sizeof(struct stats_record));
        if (!rec) {
                fprintf(stderr, "Mem alloc error\n");
                exit(EXIT_FAIL_MEM);
index 76c577e..49c7a46 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/fcntl.h>
 #define statx foo
 #define statx_timestamp foo_timestamp
+struct statx;
+struct statx_timestamp;
 #include <sys/stat.h>
 #undef statx
 #undef statx_timestamp
index 4aea7cf..62c2756 100644 (file)
@@ -35,6 +35,7 @@ KBUILD_CFLAGS += $(call cc-option, -Wstringop-truncation)
 # The following turn off the warnings enabled by -Wextra
 KBUILD_CFLAGS += -Wno-missing-field-initializers
 KBUILD_CFLAGS += -Wno-sign-compare
+KBUILD_CFLAGS += -Wno-type-limits
 
 KBUILD_CPPFLAGS += -DKBUILD_EXTRA_WARN1
 
@@ -66,6 +67,7 @@ KBUILD_CFLAGS += -Wshadow
 KBUILD_CFLAGS += $(call cc-option, -Wlogical-op)
 KBUILD_CFLAGS += -Wmissing-field-initializers
 KBUILD_CFLAGS += -Wsign-compare
+KBUILD_CFLAGS += -Wtype-limits
 KBUILD_CFLAGS += $(call cc-option, -Wmaybe-uninitialized)
 KBUILD_CFLAGS += $(call cc-option, -Wunused-macros)
 
index 99ac59c..916b2f7 100644 (file)
@@ -212,6 +212,9 @@ $(foreach m, $(notdir $1), \
        $(addprefix $(obj)/, $(foreach s, $3, $($(m:%$(strip $2)=%$(s)))))))
 endef
 
+quiet_cmd_copy = COPY    $@
+      cmd_copy = cp $< $@
+
 # Shipped files
 # ===========================================================================
 
@@ -259,6 +262,7 @@ quiet_cmd_gzip = GZIP    $@
 # DTC
 # ---------------------------------------------------------------------------
 DTC ?= $(objtree)/scripts/dtc/dtc
+DTC_FLAGS += -Wno-interrupt_provider
 
 # Disable noisy checks by default
 ifeq ($(findstring 1,$(KBUILD_EXTRA_WARN)),)
@@ -274,7 +278,8 @@ endif
 
 ifneq ($(findstring 2,$(KBUILD_EXTRA_WARN)),)
 DTC_FLAGS += -Wnode_name_chars_strict \
-       -Wproperty_name_chars_strict
+       -Wproperty_name_chars_strict \
+       -Winterrupt_provider
 endif
 
 DTC_FLAGS += $(DTC_FLAGS_$(basetarget))
index 0fd1cf0..693dfa1 100755 (executable)
@@ -58,6 +58,21 @@ cat << EOF
 EOF
 }
 
+gen_proto_order_variant()
+{
+       local meta="$1"; shift
+       local pfx="$1"; shift
+       local name="$1"; shift
+       local sfx="$1"; shift
+       local order="$1"; shift
+       local arch="$1"
+       local atomic="$2"
+
+       local basename="${arch}${atomic}_${pfx}${name}${sfx}"
+
+       printf "#define arch_${basename}${order} ${basename}${order}\n"
+}
+
 #gen_proto_order_variants(meta, pfx, name, sfx, arch, atomic, int, args...)
 gen_proto_order_variants()
 {
@@ -72,6 +87,22 @@ gen_proto_order_variants()
 
        local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
 
+       if [ -z "$arch" ]; then
+               gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
+
+               if meta_has_acquire "${meta}"; then
+                       gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
+               fi
+               if meta_has_release "${meta}"; then
+                       gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
+               fi
+               if meta_has_relaxed "${meta}"; then
+                       gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@"
+               fi
+
+               echo ""
+       fi
+
        # If we don't have relaxed atomics, then we don't bother with ordering fallbacks
        # read_acquire and set_release need to be templated, though
        if ! meta_has_relaxed "${meta}"; then
index 4b3c486..b7955db 100644 (file)
@@ -1022,6 +1022,9 @@ static void check_i2c_bus_bridge(struct check *c, struct dt_info *dti, struct no
 }
 WARNING(i2c_bus_bridge, check_i2c_bus_bridge, NULL, &addr_size_cells);
 
+#define I2C_OWN_SLAVE_ADDRESS  (1U << 30)
+#define I2C_TEN_BIT_ADDRESS    (1U << 31)
+
 static void check_i2c_bus_reg(struct check *c, struct dt_info *dti, struct node *node)
 {
        struct property *prop;
@@ -1044,6 +1047,8 @@ static void check_i2c_bus_reg(struct check *c, struct dt_info *dti, struct node
        }
 
        reg = fdt32_to_cpu(*cells);
+       /* Ignore I2C_OWN_SLAVE_ADDRESS */
+       reg &= ~I2C_OWN_SLAVE_ADDRESS;
        snprintf(unit_addr, sizeof(unit_addr), "%x", reg);
        if (!streq(unitname, unit_addr))
                FAIL(c, dti, node, "I2C bus unit address format error, expected \"%s\"",
@@ -1051,10 +1056,15 @@ static void check_i2c_bus_reg(struct check *c, struct dt_info *dti, struct node
 
        for (len = prop->val.len; len > 0; len -= 4) {
                reg = fdt32_to_cpu(*(cells++));
-               if (reg > 0x3ff)
+               /* Ignore I2C_OWN_SLAVE_ADDRESS */
+               reg &= ~I2C_OWN_SLAVE_ADDRESS;
+
+               if ((reg & I2C_TEN_BIT_ADDRESS) && ((reg & ~I2C_TEN_BIT_ADDRESS) > 0x3ff))
                        FAIL_PROP(c, dti, node, prop, "I2C address must be less than 10-bits, got \"0x%x\"",
                                  reg);
-
+               else if (reg > 0x7f)
+                       FAIL_PROP(c, dti, node, prop, "I2C address must be less than 7-bits, got \"0x%x\". Set I2C_TEN_BIT_ADDRESS for 10 bit addresses or fix the property",
+                                 reg);
        }
 }
 WARNING(i2c_bus_reg, check_i2c_bus_reg, NULL, &reg_format, &i2c_bus_bridge);
@@ -1547,6 +1557,28 @@ static bool node_is_interrupt_provider(struct node *node)
 
        return false;
 }
+
+static void check_interrupt_provider(struct check *c,
+                                    struct dt_info *dti,
+                                    struct node *node)
+{
+       struct property *prop;
+
+       if (!node_is_interrupt_provider(node))
+               return;
+
+       prop = get_property(node, "#interrupt-cells");
+       if (!prop)
+               FAIL(c, dti, node,
+                    "Missing #interrupt-cells in interrupt provider");
+
+       prop = get_property(node, "#address-cells");
+       if (!prop)
+               FAIL(c, dti, node,
+                    "Missing #address-cells in interrupt provider");
+}
+WARNING(interrupt_provider, check_interrupt_provider, NULL);
+
 static void check_interrupts_property(struct check *c,
                                      struct dt_info *dti,
                                      struct node *node)
@@ -1604,7 +1636,7 @@ static void check_interrupts_property(struct check *c,
 
        prop = get_property(irq_node, "#interrupt-cells");
        if (!prop) {
-               FAIL(c, dti, irq_node, "Missing #interrupt-cells in interrupt-parent");
+               /* We warn about that already in another test. */
                return;
        }
 
@@ -1828,6 +1860,7 @@ static struct check *check_table[] = {
        &deprecated_gpio_property,
        &gpios_property,
        &interrupts_property,
+       &interrupt_provider,
 
        &alias_paths,
 
index 6e74ece..a08f415 100644 (file)
@@ -51,6 +51,37 @@ extern int annotate;         /* annotate .dts with input source location */
 
 typedef uint32_t cell_t;
 
+static inline uint16_t dtb_ld16(const void *p)
+{
+       const uint8_t *bp = (const uint8_t *)p;
+
+       return ((uint16_t)bp[0] << 8)
+               | bp[1];
+}
+
+static inline uint32_t dtb_ld32(const void *p)
+{
+       const uint8_t *bp = (const uint8_t *)p;
+
+       return ((uint32_t)bp[0] << 24)
+               | ((uint32_t)bp[1] << 16)
+               | ((uint32_t)bp[2] << 8)
+               | bp[3];
+}
+
+static inline uint64_t dtb_ld64(const void *p)
+{
+       const uint8_t *bp = (const uint8_t *)p;
+
+       return ((uint64_t)bp[0] << 56)
+               | ((uint64_t)bp[1] << 48)
+               | ((uint64_t)bp[2] << 40)
+               | ((uint64_t)bp[3] << 32)
+               | ((uint64_t)bp[4] << 24)
+               | ((uint64_t)bp[5] << 16)
+               | ((uint64_t)bp[6] << 8)
+               | bp[7];
+}
 
 #define streq(a, b)    (strcmp((a), (b)) == 0)
 #define strstarts(s, prefix)   (strncmp((s), (prefix), strlen(prefix)) == 0)
index bd6977e..07f10d2 100644 (file)
@@ -156,7 +156,7 @@ static void asm_emit_data(void *e, struct data d)
                emit_offset_label(f, m->ref, m->offset);
 
        while ((d.len - off) >= sizeof(uint32_t)) {
-               asm_emit_cell(e, fdt32_to_cpu(*((fdt32_t *)(d.val+off))));
+               asm_emit_cell(e, dtb_ld32(d.val + off));
                off += sizeof(uint32_t);
        }
 
index 524b520..93e4a2b 100644 (file)
@@ -436,7 +436,7 @@ int fdt_open_into(const void *fdt, void *buf, int bufsize)
                        return struct_size;
        }
 
-       if (can_assume(LIBFDT_ORDER) |
+       if (can_assume(LIBFDT_ORDER) ||
            !fdt_blocks_misordered_(fdt, mem_rsv_size, struct_size)) {
                /* no further work necessary */
                err = fdt_move(fdt, buf, bufsize);
index 26759d5..94ce4bb 100644 (file)
@@ -32,7 +32,7 @@ static int fdt_sw_probe_(void *fdt)
 /* 'memrsv' state:     Initial state after fdt_create()
  *
  * Allowed functions:
- *     fdt_add_reservmap_entry()
+ *     fdt_add_reservemap_entry()
  *     fdt_finish_reservemap()         [moves to 'struct' state]
  */
 static int fdt_sw_probe_memrsv_(void *fdt)
index 36fadcd..fe49b5d 100644 (file)
@@ -9,6 +9,10 @@
 #include "libfdt_env.h"
 #include "fdt.h"
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 #define FDT_FIRST_SUPPORTED_VERSION    0x02
 #define FDT_LAST_SUPPORTED_VERSION     0x11
 
@@ -2069,4 +2073,8 @@ int fdt_overlay_apply(void *fdt, void *fdto);
 
 const char *fdt_strerror(int errval);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LIBFDT_H */
index c9d980c..061ba8c 100644 (file)
@@ -110,13 +110,13 @@ static void write_propval_int(FILE *f, const char *p, size_t len, size_t width)
                        fprintf(f, "%02"PRIx8, *(const uint8_t*)p);
                        break;
                case 2:
-                       fprintf(f, "0x%02"PRIx16, fdt16_to_cpu(*(const fdt16_t*)p));
+                       fprintf(f, "0x%02"PRIx16, dtb_ld16(p));
                        break;
                case 4:
-                       fprintf(f, "0x%02"PRIx32, fdt32_to_cpu(*(const fdt32_t*)p));
+                       fprintf(f, "0x%02"PRIx32, dtb_ld32(p));
                        break;
                case 8:
-                       fprintf(f, "0x%02"PRIx64, fdt64_to_cpu(*(const fdt64_t*)p));
+                       fprintf(f, "0x%02"PRIx64, dtb_ld64(p));
                        break;
                }
                if (p + width < end)
@@ -183,7 +183,7 @@ static enum markertype guess_value_type(struct property *prop)
                        nnotcelllbl++;
        }
 
-       if ((p[len-1] == '\0') && (nnotstring == 0) && (nnul < (len-nnul))
+       if ((p[len-1] == '\0') && (nnotstring == 0) && (nnul <= (len-nnul))
            && (nnotstringlbl == 0)) {
                return TYPE_STRING;
        } else if (((len % sizeof(cell_t)) == 0) && (nnotcelllbl == 0)) {
index 61dd711..0714799 100644 (file)
@@ -1 +1 @@
-#define DTC_VERSION "DTC 1.6.0-g87a656ae"
+#define DTC_VERSION "DTC 1.6.0-g9d7888cb"
index 5b6ea8e..4e93c12 100644 (file)
@@ -59,10 +59,10 @@ static void yaml_propval_int(yaml_emitter_t *emitter, struct marker *markers, ch
                        sprintf(buf, "0x%"PRIx8, *(uint8_t*)(data + off));
                        break;
                case 2:
-                       sprintf(buf, "0x%"PRIx16, fdt16_to_cpu(*(fdt16_t*)(data + off)));
+                       sprintf(buf, "0x%"PRIx16, dtb_ld16(data + off));
                        break;
                case 4:
-                       sprintf(buf, "0x%"PRIx32, fdt32_to_cpu(*(fdt32_t*)(data + off)));
+                       sprintf(buf, "0x%"PRIx32, dtb_ld32(data + off));
                        m = markers;
                        is_phandle = false;
                        for_each_marker_of_type(m, REF_PHANDLE) {
@@ -73,7 +73,7 @@ static void yaml_propval_int(yaml_emitter_t *emitter, struct marker *markers, ch
                        }
                        break;
                case 8:
-                       sprintf(buf, "0x%"PRIx64, fdt64_to_cpu(*(fdt64_t*)(data + off)));
+                       sprintf(buf, "0x%"PRIx64, dtb_ld64(data + off));
                        break;
                }
 
index ce0b99f..ae19fb0 100644 (file)
@@ -78,7 +78,7 @@ config GCC_PLUGIN_RANDSTRUCT
          source tree isn't cleaned after kernel installation).
 
          The seed used for compilation is located at
-         scripts/gcc-plgins/randomize_layout_seed.h.  It remains after
+         scripts/gcc-plugins/randomize_layout_seed.h.  It remains after
          a make clean to allow for external modules to be compiled with
          the existing seed and will be removed by a make mrproper or
          make distclean.
index c0ac8f7..4a61612 100644 (file)
@@ -4,27 +4,19 @@
  * Copyright (C) 2015 Boris Barbulovski <bbarbulovski@gmail.com>
  */
 
-#include <qglobal.h>
-
-#include <QMainWindow>
-#include <QList>
-#include <qtextbrowser.h>
 #include <QAction>
+#include <QApplication>
+#include <QCloseEvent>
+#include <QDebug>
+#include <QDesktopWidget>
 #include <QFileDialog>
+#include <QLabel>
+#include <QLayout>
+#include <QList>
 #include <QMenu>
-
-#include <qapplication.h>
-#include <qdesktopwidget.h>
-#include <qtoolbar.h>
-#include <qlayout.h>
-#include <qsplitter.h>
-#include <qlineedit.h>
-#include <qlabel.h>
-#include <qpushbutton.h>
-#include <qmenubar.h>
-#include <qmessagebox.h>
-#include <qregexp.h>
-#include <qevent.h>
+#include <QMenuBar>
+#include <QMessageBox>
+#include <QToolBar>
 
 #include <stdlib.h>
 
@@ -445,9 +437,10 @@ void ConfigList::updateList(ConfigItem* item)
        if (rootEntry != &rootmenu && (mode == singleMode ||
            (mode == symbolMode && rootEntry->parent != &rootmenu))) {
                item = (ConfigItem *)topLevelItem(0);
-               if (!item)
+               if (!item && mode != symbolMode) {
                        item = new ConfigItem(this, 0, true);
-               last = item;
+                       last = item;
+               }
        }
        if ((mode == singleMode || (mode == symbolMode && !(rootEntry->flags & MENU_ROOT))) &&
            rootEntry->sym && rootEntry->prompt) {
@@ -545,7 +538,7 @@ void ConfigList::setRootMenu(struct menu *menu)
        rootEntry = menu;
        updateListAll();
        if (currentItem()) {
-               currentItem()->setSelected(hasFocus());
+               setSelected(currentItem(), hasFocus());
                scrollToItem(currentItem());
        }
 }
@@ -873,7 +866,7 @@ void ConfigList::focusInEvent(QFocusEvent *e)
 
        ConfigItem* item = (ConfigItem *)currentItem();
        if (item) {
-               item->setSelected(true);
+               setSelected(item, true);
                menu = item->menu;
        }
        emit gotFocus(menu);
@@ -1021,7 +1014,7 @@ ConfigInfoView::ConfigInfoView(QWidget* parent, const char *name)
        : Parent(parent), sym(0), _menu(0)
 {
        setObjectName(name);
-
+       setOpenLinks(false);
 
        if (!objectName().isEmpty()) {
                configSettings->beginGroup(objectName());
@@ -1094,7 +1087,7 @@ void ConfigInfoView::menuInfo(void)
                        if (sym->name) {
                                head += " (";
                                if (showDebug())
-                                       head += QString().sprintf("<a href=\"s%p\">", sym);
+                                       head += QString().sprintf("<a href=\"s%s\">", sym->name);
                                head += print_filter(sym->name);
                                if (showDebug())
                                        head += "</a>";
@@ -1103,7 +1096,7 @@ void ConfigInfoView::menuInfo(void)
                } else if (sym->name) {
                        head += "<big><b>";
                        if (showDebug())
-                               head += QString().sprintf("<a href=\"s%p\">", sym);
+                               head += QString().sprintf("<a href=\"s%s\">", sym->name);
                        head += print_filter(sym->name);
                        if (showDebug())
                                head += "</a>";
@@ -1154,13 +1147,16 @@ QString ConfigInfoView::debug_info(struct symbol *sym)
                switch (prop->type) {
                case P_PROMPT:
                case P_MENU:
-                       debug += QString().sprintf("prompt: <a href=\"m%p\">", prop->menu);
+                       debug += QString().sprintf("prompt: <a href=\"m%s\">", sym->name);
                        debug += print_filter(prop->text);
                        debug += "</a><br>";
                        break;
                case P_DEFAULT:
                case P_SELECT:
                case P_RANGE:
+               case P_COMMENT:
+               case P_IMPLY:
+               case P_SYMBOL:
                        debug += prop_get_type_name(prop->type);
                        debug += ": ";
                        expr_print(prop->expr, expr_print_help, &debug, E_NONE);
@@ -1226,13 +1222,62 @@ void ConfigInfoView::expr_print_help(void *data, struct symbol *sym, const char
        QString str2 = print_filter(str);
 
        if (sym && sym->name && !(sym->flags & SYMBOL_CONST)) {
-               *text += QString().sprintf("<a href=\"s%p\">", sym);
+               *text += QString().sprintf("<a href=\"s%s\">", sym->name);
                *text += str2;
                *text += "</a>";
        } else
                *text += str2;
 }
 
+void ConfigInfoView::clicked(const QUrl &url)
+{
+       QByteArray str = url.toEncoded();
+       const std::size_t count = str.size();
+       char *data = new char[count + 1];
+       struct symbol **result;
+       struct menu *m = NULL;
+
+       if (count < 1) {
+               qInfo() << "Clicked link is empty";
+               delete data;
+               return;
+       }
+
+       memcpy(data, str.constData(), count);
+       data[count] = '\0';
+
+       /* Seek for exact match */
+       data[0] = '^';
+       strcat(data, "$");
+       result = sym_re_search(data);
+       if (!result) {
+               qInfo() << "Clicked symbol is invalid:" << data;
+               delete data;
+               return;
+       }
+
+       sym = *result;
+
+       /* Seek for the menu which holds the symbol */
+       for (struct property *prop = sym->prop; prop; prop = prop->next) {
+                   if (prop->type != P_PROMPT && prop->type != P_MENU)
+                           continue;
+                   m = prop->menu;
+                   break;
+       }
+
+       if (!m) {
+               /* Symbol is not visible as a menu */
+               symbolInfo();
+               emit showDebugChanged(true);
+       } else {
+               emit menuSelected(m);
+       }
+
+       free(result);
+       delete data;
+}
+
 QMenu* ConfigInfoView::createStandardContextMenu(const QPoint & pos)
 {
        QMenu* popup = Parent::createStandardContextMenu(pos);
@@ -1402,18 +1447,22 @@ ConfigMainWindow::ConfigMainWindow(void)
        addToolBar(toolBar);
 
        backAction = new QAction(QPixmap(xpm_back), "Back", this);
-         connect(backAction, SIGNAL(triggered(bool)), SLOT(goBack()));
-         backAction->setEnabled(false);
+       connect(backAction, SIGNAL(triggered(bool)), SLOT(goBack()));
+
        QAction *quitAction = new QAction("&Quit", this);
        quitAction->setShortcut(Qt::CTRL + Qt::Key_Q);
-         connect(quitAction, SIGNAL(triggered(bool)), SLOT(close()));
+       connect(quitAction, SIGNAL(triggered(bool)), SLOT(close()));
+
        QAction *loadAction = new QAction(QPixmap(xpm_load), "&Load", this);
        loadAction->setShortcut(Qt::CTRL + Qt::Key_L);
-         connect(loadAction, SIGNAL(triggered(bool)), SLOT(loadConfig()));
+       connect(loadAction, SIGNAL(triggered(bool)), SLOT(loadConfig()));
+
        saveAction = new QAction(QPixmap(xpm_save), "&Save", this);
        saveAction->setShortcut(Qt::CTRL + Qt::Key_S);
-         connect(saveAction, SIGNAL(triggered(bool)), SLOT(saveConfig()));
+       connect(saveAction, SIGNAL(triggered(bool)), SLOT(saveConfig()));
+
        conf_set_changed_callback(conf_changed);
+
        // Set saveAction's initial state
        conf_changed();
        configname = xstrdup(conf_get_configname());
@@ -1506,6 +1555,9 @@ ConfigMainWindow::ConfigMainWindow(void)
        helpMenu->addAction(showIntroAction);
        helpMenu->addAction(showAboutAction);
 
+       connect (helpText, SIGNAL (anchorClicked (const QUrl &)),
+                helpText, SLOT (clicked (const QUrl &)) );
+
        connect(configList, SIGNAL(menuChanged(struct menu *)),
                helpText, SLOT(setInfo(struct menu *)));
        connect(configList, SIGNAL(menuSelected(struct menu *)),
@@ -1611,21 +1663,11 @@ void ConfigMainWindow::searchConfig(void)
 void ConfigMainWindow::changeItens(struct menu *menu)
 {
        configList->setRootMenu(menu);
-
-       if (configList->rootEntry->parent == &rootmenu)
-               backAction->setEnabled(false);
-       else
-               backAction->setEnabled(true);
 }
 
 void ConfigMainWindow::changeMenu(struct menu *menu)
 {
        menuList->setRootMenu(menu);
-
-       if (menuList->rootEntry->parent == &rootmenu)
-               backAction->setEnabled(false);
-       else
-               backAction->setEnabled(true);
 }
 
 void ConfigMainWindow::setMenuLink(struct menu *menu)
@@ -1645,22 +1687,26 @@ void ConfigMainWindow::setMenuLink(struct menu *menu)
                        return;
                list->setRootMenu(parent);
                break;
-       case symbolMode:
+       case menuMode:
                if (menu->flags & MENU_ROOT) {
-                       configList->setRootMenu(menu);
+                       menuList->setRootMenu(menu);
                        configList->clearSelection();
-                       list = menuList;
-               } else {
                        list = configList;
+               } else {
                        parent = menu_get_parent_menu(menu->parent);
                        if (!parent)
                                return;
-                       item = menuList->findConfigItem(parent);
+
+                       /* Select the config view */
+                       item = configList->findConfigItem(parent);
                        if (item) {
-                               item->setSelected(true);
-                               menuList->scrollToItem(item);
+                               configList->setSelected(item, true);
+                               configList->scrollToItem(item);
                        }
-                       list->setRootMenu(parent);
+
+                       menuList->setRootMenu(parent);
+                       menuList->clearSelection();
+                       list = menuList;
                }
                break;
        case fullMode:
@@ -1673,9 +1719,10 @@ void ConfigMainWindow::setMenuLink(struct menu *menu)
        if (list) {
                item = list->findConfigItem(menu);
                if (item) {
-                       item->setSelected(true);
+                       list->setSelected(item, true);
                        list->scrollToItem(item);
                        list->setFocus();
+                       helpText->setInfo(menu);
                }
        }
 }
@@ -1688,25 +1735,11 @@ void ConfigMainWindow::listFocusChanged(void)
 
 void ConfigMainWindow::goBack(void)
 {
-       ConfigItem* item, *oldSelection;
-
-       configList->setParentMenu();
+qInfo() << __FUNCTION__;
        if (configList->rootEntry == &rootmenu)
-               backAction->setEnabled(false);
-
-       if (menuList->selectedItems().count() == 0)
                return;
 
-       item = (ConfigItem*)menuList->selectedItems().first();
-       oldSelection = item;
-       while (item) {
-               if (item->menu == configList->rootEntry) {
-                       oldSelection->setSelected(false);
-                       item->setSelected(true);
-                       break;
-               }
-               item = (ConfigItem*)item->parent();
-       }
+       configList->setParentMenu();
 }
 
 void ConfigMainWindow::showSingleView(void)
@@ -1718,6 +1751,8 @@ void ConfigMainWindow::showSingleView(void)
        fullViewAction->setEnabled(true);
        fullViewAction->setChecked(false);
 
+       backAction->setEnabled(true);
+
        menuView->hide();
        menuList->setRootMenu(0);
        configList->mode = singleMode;
@@ -1737,6 +1772,8 @@ void ConfigMainWindow::showSplitView(void)
        fullViewAction->setEnabled(true);
        fullViewAction->setChecked(false);
 
+       backAction->setEnabled(false);
+
        configList->mode = menuMode;
        if (configList->rootEntry == &rootmenu)
                configList->updateListAll();
@@ -1760,6 +1797,8 @@ void ConfigMainWindow::showFullView(void)
        fullViewAction->setEnabled(false);
        fullViewAction->setChecked(true);
 
+       backAction->setEnabled(false);
+
        menuView->hide();
        menuList->setRootMenu(0);
        configList->mode = fullMode;
index c879d79..fb9e972 100644 (file)
@@ -3,17 +3,17 @@
  * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>
  */
 
-#include <QTextBrowser>
-#include <QTreeWidget>
-#include <QMainWindow>
+#include <QCheckBox>
+#include <QDialog>
 #include <QHeaderView>
-#include <qsettings.h>
+#include <QLineEdit>
+#include <QMainWindow>
 #include <QPushButton>
 #include <QSettings>
-#include <QLineEdit>
 #include <QSplitter>
-#include <QCheckBox>
-#include <QDialog>
+#include <QTextBrowser>
+#include <QTreeWidget>
+
 #include "expr.h"
 
 class ConfigView;
@@ -45,11 +45,17 @@ class ConfigList : public QTreeWidget {
 public:
        ConfigList(ConfigView* p, const char *name = 0);
        void reinit(void);
+       ConfigItem* findConfigItem(struct menu *);
        ConfigView* parent(void) const
        {
                return (ConfigView*)Parent::parent();
        }
-       ConfigItem* findConfigItem(struct menu *);
+       void setSelected(QTreeWidgetItem *item, bool enable) {
+               for (int i = 0; i < selectedItems().size(); i++)
+                       selectedItems().at(i)->setSelected(false);
+
+               item->setSelected(enable);
+       }
 
 protected:
        void keyPressEvent(QKeyEvent *e);
@@ -250,6 +256,7 @@ public slots:
        void setInfo(struct menu *menu);
        void saveSettings(void);
        void setShowDebug(bool);
+       void clicked (const QUrl &url);
 
 signals:
        void showDebugChanged(bool);
index e12c490..1d20003 100644 (file)
@@ -188,19 +188,7 @@ DEFINE_LSM(integrity) = {
 int integrity_kernel_read(struct file *file, loff_t offset,
                          void *addr, unsigned long count)
 {
-       mm_segment_t old_fs;
-       char __user *buf = (char __user *)addr;
-       ssize_t ret;
-
-       if (!(file->f_mode & FMODE_READ))
-               return -EBADF;
-
-       old_fs = get_fs();
-       set_fs(KERNEL_DS);
-       ret = __vfs_read(file, buf, count, &offset);
-       set_fs(old_fs);
-
-       return ret;
+       return __kernel_read(file, addr, count, &offset);
 }
 
 /*
index df93ac2..9d94080 100644 (file)
@@ -30,7 +30,7 @@
 
 enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN,
                     IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII };
-enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
+enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8, TPM_PCR10 = 10 };
 
 /* digest size for IMA, fits SHA1 or MD5 */
 #define IMA_DIGEST_SIZE                SHA1_DIGEST_SIZE
index 220b149..011c3c7 100644 (file)
@@ -823,13 +823,26 @@ static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
        if (rc != 0)
                return rc;
 
-       /* cumulative sha1 over tpm registers 0-7 */
+       /* cumulative digest over TPM registers 0-7 */
        for (i = TPM_PCR0; i < TPM_PCR8; i++) {
                ima_pcrread(i, &d);
                /* now accumulate with current aggregate */
                rc = crypto_shash_update(shash, d.digest,
                                         crypto_shash_digestsize(tfm));
        }
+       /*
+        * Extend cumulative digest over TPM registers 8-9, which contain
+        * measurement for the kernel command line (reg. 8) and image (reg. 9)
+        * in a typical PCR allocation. Registers 8-9 are only included in
+        * non-SHA1 boot_aggregate digests to avoid ambiguity.
+        */
+       if (alg_id != TPM_ALG_SHA1) {
+               for (i = TPM_PCR8; i < TPM_PCR10; i++) {
+                       ima_pcrread(i, &d);
+                       rc = crypto_shash_update(shash, d.digest,
+                                               crypto_shash_digestsize(tfm));
+               }
+       }
        if (!rc)
                crypto_shash_final(shash, digest);
        return rc;
index 0ce3e73..70a7ad3 100644 (file)
@@ -1414,7 +1414,22 @@ EXPORT_SYMBOL(security_inode_copy_up);
 
 int security_inode_copy_up_xattr(const char *name)
 {
-       return call_int_hook(inode_copy_up_xattr, -EOPNOTSUPP, name);
+       struct security_hook_list *hp;
+       int rc;
+
+       /*
+        * The implementation can return 0 (accept the xattr), 1 (discard the
+        * xattr), -EOPNOTSUPP if it does not know anything about the xattr or
+        * any other error code incase of an error.
+        */
+       hlist_for_each_entry(hp,
+               &security_hook_heads.inode_copy_up_xattr, list) {
+               rc = hp->hook.inode_copy_up_xattr(name);
+               if (rc != LSM_RET_DEFAULT(inode_copy_up_xattr))
+                       return rc;
+       }
+
+       return LSM_RET_DEFAULT(inode_copy_up_xattr);
 }
 EXPORT_SYMBOL(security_inode_copy_up_xattr);
 
index 509290f..0e53f6f 100644 (file)
@@ -764,6 +764,9 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
 
        retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
        if (!retval) {
+               /* clear flags and stop any drain wait */
+               stream->partial_drain = false;
+               stream->metadata_set = false;
                snd_compr_drain_notify(stream);
                stream->runtime->total_bytes_available = 0;
                stream->runtime->total_bytes_transferred = 0;
@@ -921,6 +924,7 @@ static int snd_compr_partial_drain(struct snd_compr_stream *stream)
        if (stream->next_track == false)
                return -EPERM;
 
+       stream->partial_drain = true;
        retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
        if (retval) {
                pr_debug("Partial drain returned failure\n");
index e69a4ef..08c10ac 100644 (file)
@@ -91,6 +91,8 @@ int snd_opl3_ioctl(struct snd_hwdep * hw, struct file *file,
                {
                        struct snd_dm_fm_info info;
 
+                       memset(&info, 0, sizeof(info));
+
                        info.fm_mode = opl3->fm_mode;
                        info.rhythm = opl3->rhythm;
                        if (copy_to_user(argp, &info, sizeof(struct snd_dm_fm_info)))
index 20b8f6c..99aec73 100644 (file)
@@ -208,8 +208,8 @@ static const struct config_entry config_table[] = {
        },
 #endif
 
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE)
 /* Cometlake-LP */
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_LP)
        {
                .flags = FLAG_SOF,
                .device = 0x02c8,
@@ -240,9 +240,7 @@ static const struct config_entry config_table[] = {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x02c8,
        },
-#endif
 /* Cometlake-H */
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_H)
        {
                .flags = FLAG_SOF,
                .device = 0x06c8,
index 2c6d2be..824f4ac 100644 (file)
@@ -72,6 +72,12 @@ static int compare_input_type(const void *ap, const void *bp)
        if (a->type != b->type)
                return (int)(a->type - b->type);
 
+       /* If has both hs_mic and hp_mic, pick the hs_mic ahead of hp_mic. */
+       if (a->is_headset_mic && b->is_headphone_mic)
+               return -1; /* don't swap */
+       else if (a->is_headphone_mic && b->is_headset_mic)
+               return 1; /* swap */
+
        /* In case one has boost and the other one has not,
           pick the one with boost first. */
        return (int)(b->has_boost_on_pin - a->has_boost_on_pin);
index d20aedd..3565e2a 100644 (file)
@@ -2470,6 +2470,9 @@ static const struct pci_device_id azx_ids[] = {
        /* Icelake */
        { PCI_DEVICE(0x8086, 0x34c8),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* Icelake-H */
+       { PCI_DEVICE(0x8086, 0x3dc8),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Jasperlake */
        { PCI_DEVICE(0x8086, 0x38c8),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
@@ -2478,9 +2481,14 @@ static const struct pci_device_id azx_ids[] = {
        /* Tigerlake */
        { PCI_DEVICE(0x8086, 0xa0c8),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* Tigerlake-H */
+       { PCI_DEVICE(0x8086, 0x43c8),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Elkhart Lake */
        { PCI_DEVICE(0x8086, 0x4b55),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       { PCI_DEVICE(0x8086, 0x4b58),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Broxton-P(Apollolake) */
        { PCI_DEVICE(0x8086, 0x5a98),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON },
index fbd7cc6..41eaa89 100644 (file)
@@ -259,7 +259,7 @@ static int hinfo_to_pcm_index(struct hda_codec *codec,
                if (get_pcm_rec(spec, pcm_idx)->stream == hinfo)
                        return pcm_idx;
 
-       codec_warn(codec, "HDMI: hinfo %p not registered\n", hinfo);
+       codec_warn(codec, "HDMI: hinfo %p not tied to a PCM\n", hinfo);
        return -EINVAL;
 }
 
@@ -277,7 +277,8 @@ static int hinfo_to_pin_index(struct hda_codec *codec,
                        return pin_idx;
        }
 
-       codec_dbg(codec, "HDMI: hinfo %p not registered\n", hinfo);
+       codec_dbg(codec, "HDMI: hinfo %p (pcm %d) not registered\n", hinfo,
+                 hinfo_to_pcm_index(codec, hinfo));
        return -EINVAL;
 }
 
@@ -1804,33 +1805,43 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
 
 static int hdmi_parse_codec(struct hda_codec *codec)
 {
-       hda_nid_t nid;
+       hda_nid_t start_nid;
+       unsigned int caps;
        int i, nodes;
 
-       nodes = snd_hda_get_sub_nodes(codec, codec->core.afg, &nid);
-       if (!nid || nodes < 0) {
+       nodes = snd_hda_get_sub_nodes(codec, codec->core.afg, &start_nid);
+       if (!start_nid || nodes < 0) {
                codec_warn(codec, "HDMI: failed to get afg sub nodes\n");
                return -EINVAL;
        }
 
-       for (i = 0; i < nodes; i++, nid++) {
-               unsigned int caps;
-               unsigned int type;
+       /*
+        * hdmi_add_pin() assumes total amount of converters to
+        * be known, so first discover all converters
+        */
+       for (i = 0; i < nodes; i++) {
+               hda_nid_t nid = start_nid + i;
 
                caps = get_wcaps(codec, nid);
-               type = get_wcaps_type(caps);
 
                if (!(caps & AC_WCAP_DIGITAL))
                        continue;
 
-               switch (type) {
-               case AC_WID_AUD_OUT:
+               if (get_wcaps_type(caps) == AC_WID_AUD_OUT)
                        hdmi_add_cvt(codec, nid);
-                       break;
-               case AC_WID_PIN:
+       }
+
+       /* discover audio pins */
+       for (i = 0; i < nodes; i++) {
+               hda_nid_t nid = start_nid + i;
+
+               caps = get_wcaps(codec, nid);
+
+               if (!(caps & AC_WCAP_DIGITAL))
+                       continue;
+
+               if (get_wcaps_type(caps) == AC_WID_PIN)
                        hdmi_add_pin(codec, nid);
-                       break;
-               }
        }
 
        return 0;
@@ -4145,6 +4156,11 @@ HDA_CODEC_ENTRY(0x10de0095, "GPU 95 HDMI/DP",    patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de0097, "GPU 97 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de0098, "GPU 98 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de0099, "GPU 99 HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de009a, "GPU 9a HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de009d, "GPU 9d HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de009e, "GPU 9e HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de009f, "GPU 9f HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de00a0, "GPU a0 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI",      patch_nvhdmi_2ch),
 HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI",   patch_nvhdmi_2ch),
 HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP",   patch_via_hdmi),
index 6d73f8b..1b06c42 100644 (file)
@@ -2461,6 +2461,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
        SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950),
+       SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1462, 0x1275, "MSI-GL63", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
@@ -6148,6 +6149,11 @@ enum {
        ALC236_FIXUP_HP_MUTE_LED,
        ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
        ALC295_FIXUP_ASUS_MIC_NO_PRESENCE,
+       ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS,
+       ALC269VC_FIXUP_ACER_HEADSET_MIC,
+       ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE,
+       ALC289_FIXUP_ASUS_G401,
+       ALC256_FIXUP_ACER_MIC_NO_PRESENCE,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -7113,7 +7119,7 @@ static const struct hda_fixup alc269_fixups[] = {
                        { }
                },
                .chained = true,
-               .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+               .chain_id = ALC269_FIXUP_HEADSET_MIC
        },
        [ALC294_FIXUP_ASUS_HEADSET_MIC] = {
                .type = HDA_FIXUP_PINS,
@@ -7122,7 +7128,7 @@ static const struct hda_fixup alc269_fixups[] = {
                        { }
                },
                .chained = true,
-               .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+               .chain_id = ALC269_FIXUP_HEADSET_MIC
        },
        [ALC294_FIXUP_ASUS_SPK] = {
                .type = HDA_FIXUP_VERBS,
@@ -7130,6 +7136,8 @@ static const struct hda_fixup alc269_fixups[] = {
                        /* Set EAPD high */
                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x40 },
                        { 0x20, AC_VERB_SET_PROC_COEF, 0x8800 },
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x0f },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x7774 },
                        { }
                },
                .chained = true,
@@ -7326,6 +7334,51 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MODE
        },
+       [ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x14, 0x90100120 }, /* use as internal speaker */
+                       { 0x18, 0x02a111f0 }, /* use as headset mic, without its own jack detect */
+                       { 0x1a, 0x01011020 }, /* use as line out */
+                       { },
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MIC
+       },
+       [ALC269VC_FIXUP_ACER_HEADSET_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x18, 0x02a11030 }, /* use as headset mic */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MIC
+       },
+       [ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x18, 0x01a11130 }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MIC
+       },
+       [ALC289_FIXUP_ASUS_G401] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x03a11020 }, /* headset mic with jack detect */
+                       { }
+               },
+       },
+       [ALC256_FIXUP_ACER_MIC_NO_PRESENCE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x02a11120 }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7341,16 +7394,20 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
        SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
        SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
        SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK),
+       SND_PCI_QUIRK(0x1025, 0x1247, "Acer vCopperbox", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS),
+       SND_PCI_QUIRK(0x1025, 0x1248, "Acer Veriton N4660G", ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
        SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
        SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
@@ -7470,6 +7527,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+       SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+       SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
@@ -7492,6 +7551,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
        SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1043, 0x194e, "ASUS UX563FD", ALC294_FIXUP_ASUS_HPE),
        SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE),
        SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
@@ -7501,6 +7561,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
+       SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_G401),
        SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
        SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
@@ -7520,6 +7581,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
        SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
        SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
+       SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC225_FIXUP_HEADSET_JACK),
        SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
        SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
@@ -7568,8 +7630,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
-       SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
-       SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
+       SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
        SND_PCI_QUIRK(0x17aa, 0x22be, "Thinkpad X1 Carbon 8th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
        SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
index d8f554f..e6386de 100644 (file)
@@ -342,11 +342,34 @@ static int acp3x_dma_close(struct snd_soc_component *component,
 {
        struct snd_soc_pcm_runtime *prtd;
        struct i2s_dev_data *adata;
+       struct i2s_stream_instance *ins;
 
        prtd = substream->private_data;
        component = snd_soc_rtdcom_lookup(prtd, DRV_NAME);
        adata = dev_get_drvdata(component->dev);
+       ins = substream->runtime->private_data;
+       if (!ins)
+               return -EINVAL;
 
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+               switch (ins->i2s_instance) {
+               case I2S_BT_INSTANCE:
+                       adata->play_stream = NULL;
+                       break;
+               case I2S_SP_INSTANCE:
+               default:
+                       adata->i2ssp_play_stream = NULL;
+               }
+       } else {
+               switch (ins->i2s_instance) {
+               case I2S_BT_INSTANCE:
+                       adata->capture_stream = NULL;
+                       break;
+               case I2S_SP_INSTANCE:
+               default:
+                       adata->i2ssp_capture_stream = NULL;
+               }
+       }
 
        /* Disable ACP irq, when the current stream is being closed and
         * another stream is also not active.
@@ -354,13 +377,6 @@ static int acp3x_dma_close(struct snd_soc_component *component,
        if (!adata->play_stream && !adata->capture_stream &&
                !adata->i2ssp_play_stream && !adata->i2ssp_capture_stream)
                rv_writel(0, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB);
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-               adata->play_stream = NULL;
-               adata->i2ssp_play_stream = NULL;
-       } else {
-               adata->capture_stream = NULL;
-               adata->i2ssp_capture_stream = NULL;
-       }
        return 0;
 }
 
index e437193..4a82690 100644 (file)
@@ -2,6 +2,7 @@
 # Renoir platform Support
 snd-rn-pci-acp3x-objs  := rn-pci-acp3x.o
 snd-acp3x-pdm-dma-objs := acp3x-pdm-dma.o
-obj-$(CONFIG_SND_SOC_AMD_RENOIR)        += snd-rn-pci-acp3x.o
-obj-$(CONFIG_SND_SOC_AMD_RENOIR)        += snd-acp3x-pdm-dma.o
-obj-$(CONFIG_SND_SOC_AMD_RENOIR_MACH)  += acp3x-rn.o
+snd-acp3x-rn-objs      := acp3x-rn.o
+obj-$(CONFIG_SND_SOC_AMD_RENOIR)       += snd-rn-pci-acp3x.o
+obj-$(CONFIG_SND_SOC_AMD_RENOIR)       += snd-acp3x-pdm-dma.o
+obj-$(CONFIG_SND_SOC_AMD_RENOIR_MACH)  += snd-acp3x-rn.o
index de003ac..473efe9 100644 (file)
@@ -441,13 +441,13 @@ static int hdac_hda_codec_probe(struct snd_soc_component *component)
        ret = snd_hda_codec_set_name(hcodec, hcodec->preset->name);
        if (ret < 0) {
                dev_err(&hdev->dev, "name failed %s\n", hcodec->preset->name);
-               goto error;
+               goto error_pm;
        }
 
        ret = snd_hdac_regmap_init(&hcodec->core);
        if (ret < 0) {
                dev_err(&hdev->dev, "regmap init failed\n");
-               goto error;
+               goto error_pm;
        }
 
        patch = (hda_codec_patch_t)hcodec->preset->driver_data;
@@ -455,7 +455,7 @@ static int hdac_hda_codec_probe(struct snd_soc_component *component)
                ret = patch(hcodec);
                if (ret < 0) {
                        dev_err(&hdev->dev, "patch failed %d\n", ret);
-                       goto error;
+                       goto error_regmap;
                }
        } else {
                dev_dbg(&hdev->dev, "no patch file found\n");
@@ -467,7 +467,7 @@ static int hdac_hda_codec_probe(struct snd_soc_component *component)
        ret = snd_hda_codec_parse_pcms(hcodec);
        if (ret < 0) {
                dev_err(&hdev->dev, "unable to map pcms to dai %d\n", ret);
-               goto error;
+               goto error_regmap;
        }
 
        /* HDMI controls need to be created in machine drivers */
@@ -476,7 +476,7 @@ static int hdac_hda_codec_probe(struct snd_soc_component *component)
                if (ret < 0) {
                        dev_err(&hdev->dev, "unable to create controls %d\n",
                                ret);
-                       goto error;
+                       goto error_regmap;
                }
        }
 
@@ -496,7 +496,9 @@ static int hdac_hda_codec_probe(struct snd_soc_component *component)
 
        return 0;
 
-error:
+error_regmap:
+       snd_hdac_regmap_exit(hdev);
+error_pm:
        pm_runtime_put(&hdev->dev);
 error_no_pm:
        snd_hdac_ext_bus_link_put(hdev->bus, hlink);
@@ -518,6 +520,8 @@ static void hdac_hda_codec_remove(struct snd_soc_component *component)
 
        pm_runtime_disable(&hdev->dev);
        snd_hdac_ext_bus_link_put(hdev->bus, hlink);
+
+       snd_hdac_regmap_exit(hdev);
 }
 
 static const struct snd_soc_dapm_route hdac_hda_dapm_routes[] = {
index 0d63ebf..e6613b5 100644 (file)
@@ -700,8 +700,8 @@ static bool max98390_readable_register(struct device *dev, unsigned int reg)
        case MAX98390_IRQ_CTRL ... MAX98390_WDOG_CTRL:
        case MAX98390_MEAS_ADC_THERM_WARN_THRESH
                ... MAX98390_BROWNOUT_INFINITE_HOLD:
-       case MAX98390_BROWNOUT_LVL_HOLD ... THERMAL_COILTEMP_RD_BACK_BYTE0:
-       case DSMIG_DEBUZZER_THRESHOLD ... MAX98390_R24FF_REV_ID:
+       case MAX98390_BROWNOUT_LVL_HOLD ... DSMIG_DEBUZZER_THRESHOLD:
+       case DSM_VOL_ENA ... MAX98390_R24FF_REV_ID:
                return true;
        default:
                return false;
@@ -717,7 +717,7 @@ static bool max98390_volatile_reg(struct device *dev, unsigned int reg)
        case MAX98390_BROWNOUT_LOWEST_STATUS:
        case MAX98390_ENV_TRACK_BOOST_VOUT_READ:
        case DSM_STBASS_HPF_B0_BYTE0 ... DSM_DEBUZZER_ATTACK_TIME_BYTE2:
-       case THERMAL_RDC_RD_BACK_BYTE1 ... THERMAL_COILTEMP_RD_BACK_BYTE0:
+       case THERMAL_RDC_RD_BACK_BYTE1 ... DSMIG_DEBUZZER_THRESHOLD:
        case DSM_THERMAL_GAIN ... DSM_WBDRC_GAIN:
                return true;
        default:
index 67e2e94..2cccb31 100644 (file)
@@ -34,30 +34,32 @@ static const struct reg_default rt1015_reg[] = {
        { 0x0000, 0x0000 },
        { 0x0004, 0xa000 },
        { 0x0006, 0x0003 },
-       { 0x000a, 0x0802 },
-       { 0x000c, 0x0020 },
+       { 0x000a, 0x081e },
+       { 0x000c, 0x0006 },
        { 0x000e, 0x0000 },
        { 0x0010, 0x0000 },
        { 0x0012, 0x0000 },
+       { 0x0014, 0x0000 },
+       { 0x0016, 0x0000 },
+       { 0x0018, 0x0000 },
        { 0x0020, 0x8000 },
-       { 0x0022, 0x471b },
-       { 0x006a, 0x0000 },
-       { 0x006c, 0x4020 },
+       { 0x0022, 0x8043 },
        { 0x0076, 0x0000 },
        { 0x0078, 0x0000 },
-       { 0x007a, 0x0000 },
+       { 0x007a, 0x0002 },
        { 0x007c, 0x10ec },
        { 0x007d, 0x1015 },
        { 0x00f0, 0x5000 },
-       { 0x00f2, 0x0774 },
-       { 0x00f3, 0x8400 },
+       { 0x00f2, 0x004c },
+       { 0x00f3, 0xecfe },
        { 0x00f4, 0x0000 },
+       { 0x00f6, 0x0400 },
        { 0x0100, 0x0028 },
        { 0x0102, 0xff02 },
-       { 0x0104, 0x8232 },
+       { 0x0104, 0xa213 },
        { 0x0106, 0x200c },
-       { 0x010c, 0x002f },
-       { 0x010e, 0xc000 },
+       { 0x010c, 0x0000 },
+       { 0x010e, 0x0058 },
        { 0x0111, 0x0200 },
        { 0x0112, 0x0400 },
        { 0x0114, 0x0022 },
@@ -65,38 +67,46 @@ static const struct reg_default rt1015_reg[] = {
        { 0x0118, 0x0000 },
        { 0x011a, 0x0123 },
        { 0x011c, 0x4567 },
-       { 0x0300, 0xdddd },
-       { 0x0302, 0x0000 },
-       { 0x0311, 0x9330 },
-       { 0x0313, 0x0000 },
-       { 0x0314, 0x0000 },
+       { 0x0300, 0x203d },
+       { 0x0302, 0x001e },
+       { 0x0311, 0x0000 },
+       { 0x0313, 0x6014 },
+       { 0x0314, 0x00a2 },
        { 0x031a, 0x00a0 },
        { 0x031c, 0x001f },
        { 0x031d, 0xffff },
        { 0x031e, 0x0000 },
        { 0x031f, 0x0000 },
+       { 0x0320, 0x0000 },
        { 0x0321, 0x0000 },
-       { 0x0322, 0x0000 },
-       { 0x0328, 0x0000 },
-       { 0x0329, 0x0000 },
-       { 0x032a, 0x0000 },
-       { 0x032b, 0x0000 },
-       { 0x032c, 0x0000 },
-       { 0x032d, 0x0000 },
-       { 0x032e, 0x030e },
-       { 0x0330, 0x0080 },
+       { 0x0322, 0xd7df },
+       { 0x0328, 0x10b2 },
+       { 0x0329, 0x0175 },
+       { 0x032a, 0x36ad },
+       { 0x032b, 0x7e55 },
+       { 0x032c, 0x0520 },
+       { 0x032d, 0xaa00 },
+       { 0x032e, 0x570e },
+       { 0x0330, 0xe180 },
        { 0x0332, 0x0034 },
-       { 0x0334, 0x0000 },
-       { 0x0336, 0x0000 },
+       { 0x0334, 0x0001 },
+       { 0x0336, 0x0010 },
+       { 0x0338, 0x0000 },
+       { 0x04fa, 0x0030 },
+       { 0x04fc, 0x35c8 },
+       { 0x04fe, 0x0800 },
+       { 0x0500, 0x0400 },
+       { 0x0502, 0x1000 },
+       { 0x0504, 0x0000 },
        { 0x0506, 0x04ff },
-       { 0x0508, 0x0030 },
-       { 0x050a, 0x0018 },
-       { 0x0519, 0x307f },
-       { 0x051a, 0xffff },
-       { 0x051b, 0x4000 },
+       { 0x0508, 0x0010 },
+       { 0x050a, 0x001a },
+       { 0x0519, 0x1c68 },
+       { 0x051a, 0x0ccc },
+       { 0x051b, 0x0666 },
        { 0x051d, 0x0000 },
        { 0x051f, 0x0000 },
-       { 0x0536, 0x1000 },
+       { 0x0536, 0x061c },
        { 0x0538, 0x0000 },
        { 0x053a, 0x0000 },
        { 0x053c, 0x0000 },
@@ -110,19 +120,18 @@ static const struct reg_default rt1015_reg[] = {
        { 0x0544, 0x0000 },
        { 0x0568, 0x0000 },
        { 0x056a, 0x0000 },
-       { 0x1000, 0x0000 },
-       { 0x1002, 0x6505 },
+       { 0x1000, 0x0040 },
+       { 0x1002, 0x5405 },
        { 0x1006, 0x5515 },
-       { 0x1007, 0x003f },
-       { 0x1009, 0x770f },
-       { 0x100a, 0x01ff },
-       { 0x100c, 0x0000 },
+       { 0x1007, 0x05f7 },
+       { 0x1009, 0x0b0a },
+       { 0x100a, 0x00ef },
        { 0x100d, 0x0003 },
        { 0x1010, 0xa433 },
        { 0x1020, 0x0000 },
-       { 0x1200, 0x3d02 },
-       { 0x1202, 0x0813 },
-       { 0x1204, 0x0211 },
+       { 0x1200, 0x5a01 },
+       { 0x1202, 0x6524 },
+       { 0x1204, 0x1f00 },
        { 0x1206, 0x0000 },
        { 0x1208, 0x0000 },
        { 0x120a, 0x0000 },
@@ -130,16 +139,16 @@ static const struct reg_default rt1015_reg[] = {
        { 0x120e, 0x0000 },
        { 0x1210, 0x0000 },
        { 0x1212, 0x0000 },
-       { 0x1300, 0x0701 },
-       { 0x1302, 0x12f9 },
-       { 0x1304, 0x3405 },
+       { 0x1300, 0x10a1 },
+       { 0x1302, 0x12ff },
+       { 0x1304, 0x0400 },
        { 0x1305, 0x0844 },
-       { 0x1306, 0x1611 },
+       { 0x1306, 0x4611 },
        { 0x1308, 0x555e },
        { 0x130a, 0x0000 },
-       { 0x130c, 0x2400},
-       { 0x130e, 0x7700 },
-       { 0x130f, 0x0000 },
+       { 0x130c, 0x2000 },
+       { 0x130e, 0x0100 },
+       { 0x130f, 0x0001 },
        { 0x1310, 0x0000 },
        { 0x1312, 0x0000 },
        { 0x1314, 0x0000 },
@@ -209,6 +218,9 @@ static bool rt1015_volatile_register(struct device *dev, unsigned int reg)
        case RT1015_DC_CALIB_CLSD7:
        case RT1015_DC_CALIB_CLSD8:
        case RT1015_S_BST_TIMING_INTER1:
+       case RT1015_OSCK_STA:
+       case RT1015_MONO_DYNA_CTRL1:
+       case RT1015_MONO_DYNA_CTRL5:
                return true;
 
        default:
@@ -224,6 +236,12 @@ static bool rt1015_readable_register(struct device *dev, unsigned int reg)
        case RT1015_CLK3:
        case RT1015_PLL1:
        case RT1015_PLL2:
+       case RT1015_DUM_RW1:
+       case RT1015_DUM_RW2:
+       case RT1015_DUM_RW3:
+       case RT1015_DUM_RW4:
+       case RT1015_DUM_RW5:
+       case RT1015_DUM_RW6:
        case RT1015_CLK_DET:
        case RT1015_SIL_DET:
        case RT1015_CUSTOMER_ID:
@@ -235,6 +253,7 @@ static bool rt1015_readable_register(struct device *dev, unsigned int reg)
        case RT1015_PAD_DRV2:
        case RT1015_GAT_BOOST:
        case RT1015_PRO_ALT:
+       case RT1015_OSCK_STA:
        case RT1015_MAN_I2C:
        case RT1015_DAC1:
        case RT1015_DAC2:
@@ -272,6 +291,13 @@ static bool rt1015_readable_register(struct device *dev, unsigned int reg)
        case RT1015_SMART_BST_CTRL2:
        case RT1015_ANA_CTRL1:
        case RT1015_ANA_CTRL2:
+       case RT1015_PWR_STATE_CTRL:
+       case RT1015_MONO_DYNA_CTRL:
+       case RT1015_MONO_DYNA_CTRL1:
+       case RT1015_MONO_DYNA_CTRL2:
+       case RT1015_MONO_DYNA_CTRL3:
+       case RT1015_MONO_DYNA_CTRL4:
+       case RT1015_MONO_DYNA_CTRL5:
        case RT1015_SPK_VOL:
        case RT1015_SHORT_DETTOP1:
        case RT1015_SHORT_DETTOP2:
index 6fbe802..8169962 100644 (file)
 #define RT1015_CLK3                            0x0006
 #define RT1015_PLL1                            0x000a
 #define RT1015_PLL2                            0x000c
+#define RT1015_DUM_RW1                         0x000e
+#define RT1015_DUM_RW2                         0x0010
+#define RT1015_DUM_RW3                         0x0012
+#define RT1015_DUM_RW4                         0x0014
+#define RT1015_DUM_RW5                         0x0016
+#define RT1015_DUM_RW6                         0x0018
 #define RT1015_CLK_DET                         0x0020
 #define RT1015_SIL_DET                         0x0022
 #define RT1015_CUSTOMER_ID                     0x0076
@@ -32,6 +38,7 @@
 #define RT1015_PAD_DRV2                                0x00f2
 #define RT1015_GAT_BOOST                       0x00f3
 #define RT1015_PRO_ALT                         0x00f4
+#define RT1015_OSCK_STA                                0x00f6
 #define RT1015_MAN_I2C                         0x0100
 #define RT1015_DAC1                            0x0102
 #define RT1015_DAC2                            0x0104
 #define RT1015_ANA_CTRL1                       0x0334
 #define RT1015_ANA_CTRL2                       0x0336
 #define RT1015_PWR_STATE_CTRL                  0x0338
-#define RT1015_SPK_VOL                         0x0506
+#define RT1015_MONO_DYNA_CTRL                  0x04fa
+#define RT1015_MONO_DYNA_CTRL1                 0x04fc
+#define RT1015_MONO_DYNA_CTRL2                 0x04fe
+#define RT1015_MONO_DYNA_CTRL3                 0x0500
+#define RT1015_MONO_DYNA_CTRL4                 0x0502
+#define RT1015_MONO_DYNA_CTRL5                 0x0504
+#define RT1015_SPK_VOL                                 0x0506
 #define RT1015_SHORT_DETTOP1                   0x0508
 #define RT1015_SHORT_DETTOP2                   0x050a
 #define RT1015_SPK_DC_DETECT1                  0x0519
index d324512..7d6670a 100644 (file)
@@ -932,7 +932,9 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
                        RT5682_PWR_ANLG_1, RT5682_PWR_FV2, RT5682_PWR_FV2);
                snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3,
                        RT5682_PWR_CBJ, RT5682_PWR_CBJ);
-
+               snd_soc_component_update_bits(component,
+                       RT5682_HP_CHARGE_PUMP_1,
+                       RT5682_OSW_L_MASK | RT5682_OSW_R_MASK, 0);
                snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
                        RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_HIGH);
 
@@ -956,6 +958,11 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
                        rt5682->jack_type = SND_JACK_HEADPHONE;
                        break;
                }
+
+               snd_soc_component_update_bits(component,
+                       RT5682_HP_CHARGE_PUMP_1,
+                       RT5682_OSW_L_MASK | RT5682_OSW_R_MASK,
+                       RT5682_OSW_L_EN | RT5682_OSW_R_EN);
        } else {
                rt5682_enable_push_button_irq(component, false);
                snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
@@ -2829,12 +2836,13 @@ static int rt5682_probe(struct snd_soc_component *component)
                                return ret;
                        }
                        rt5682->mclk = NULL;
-               } else {
-                       /* Register CCF DAI clock control */
-                       ret = rt5682_register_dai_clks(component);
-                       if (ret)
-                               return ret;
                }
+
+               /* Register CCF DAI clock control */
+               ret = rt5682_register_dai_clks(component);
+               if (ret)
+                       return ret;
+
                /* Initial setup for CCF */
                rt5682->lrck[RT5682_AIF1] = CLK_48;
 #endif
index 77665b1..7e1c13c 100644 (file)
@@ -32,6 +32,7 @@ enum asrc_pair_index {
  * @dma_chan: inputer and output DMA channels
  * @dma_data: private dma data
  * @pos: hardware pointer position
+ * @req_dma_chan: flag to release dev_to_dev chan
  * @private: pair private area
  */
 struct fsl_asrc_pair {
@@ -45,6 +46,7 @@ struct fsl_asrc_pair {
        struct dma_chan *dma_chan[2];
        struct imx_dma_data dma_data;
        unsigned int pos;
+       bool req_dma_chan;
 
        void *private;
 };
index d6a3fc5..5f01a58 100644 (file)
@@ -135,6 +135,8 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
        struct snd_dmaengine_dai_dma_data *dma_params_be = NULL;
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct fsl_asrc_pair *pair = runtime->private_data;
+       struct dma_chan *tmp_chan = NULL, *be_chan = NULL;
+       struct snd_soc_component *component_be = NULL;
        struct fsl_asrc *asrc = pair->asrc;
        struct dma_slave_config config_fe, config_be;
        enum asrc_pair_index index = pair->index;
@@ -142,7 +144,6 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
        int stream = substream->stream;
        struct imx_dma_data *tmp_data;
        struct snd_soc_dpcm *dpcm;
-       struct dma_chan *tmp_chan;
        struct device *dev_be;
        u8 dir = tx ? OUT : IN;
        dma_cap_mask_t mask;
@@ -198,17 +199,29 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
        dma_cap_set(DMA_CYCLIC, mask);
 
        /*
+        * The Back-End device might have already requested a DMA channel,
+        * so try to reuse it first, and then request a new one upon NULL.
+        */
+       component_be = snd_soc_lookup_component_nolocked(dev_be, SND_DMAENGINE_PCM_DRV_NAME);
+       if (component_be) {
+               be_chan = soc_component_to_pcm(component_be)->chan[substream->stream];
+               tmp_chan = be_chan;
+       }
+       if (!tmp_chan)
+               tmp_chan = dma_request_slave_channel(dev_be, tx ? "tx" : "rx");
+
+       /*
         * An EDMA DEV_TO_DEV channel is fixed and bound with DMA event of each
         * peripheral, unlike SDMA channel that is allocated dynamically. So no
-        * need to configure dma_request and dma_request2, but get dma_chan via
-        * dma_request_slave_channel directly with dma name of Front-End device
+        * need to configure dma_request and dma_request2, but get dma_chan of
+        * Back-End device directly via dma_request_slave_channel.
         */
        if (!asrc->use_edma) {
                /* Get DMA request of Back-End */
-               tmp_chan = dma_request_slave_channel(dev_be, tx ? "tx" : "rx");
                tmp_data = tmp_chan->private;
                pair->dma_data.dma_request = tmp_data->dma_request;
-               dma_release_channel(tmp_chan);
+               if (!be_chan)
+                       dma_release_channel(tmp_chan);
 
                /* Get DMA request of Front-End */
                tmp_chan = asrc->get_dma_channel(pair, dir);
@@ -220,9 +233,11 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
 
                pair->dma_chan[dir] =
                        dma_request_channel(mask, filter, &pair->dma_data);
+               pair->req_dma_chan = true;
        } else {
-               pair->dma_chan[dir] =
-                       asrc->get_dma_channel(pair, dir);
+               pair->dma_chan[dir] = tmp_chan;
+               /* Do not flag to release if we are reusing the Back-End one */
+               pair->req_dma_chan = !be_chan;
        }
 
        if (!pair->dma_chan[dir]) {
@@ -261,7 +276,8 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
        ret = dmaengine_slave_config(pair->dma_chan[dir], &config_be);
        if (ret) {
                dev_err(dev, "failed to config DMA channel for Back-End\n");
-               dma_release_channel(pair->dma_chan[dir]);
+               if (pair->req_dma_chan)
+                       dma_release_channel(pair->dma_chan[dir]);
                return ret;
        }
 
@@ -273,19 +289,22 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
 static int fsl_asrc_dma_hw_free(struct snd_soc_component *component,
                                struct snd_pcm_substream *substream)
 {
+       bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct fsl_asrc_pair *pair = runtime->private_data;
+       u8 dir = tx ? OUT : IN;
 
        snd_pcm_set_runtime_buffer(substream, NULL);
 
-       if (pair->dma_chan[IN])
-               dma_release_channel(pair->dma_chan[IN]);
+       if (pair->dma_chan[!dir])
+               dma_release_channel(pair->dma_chan[!dir]);
 
-       if (pair->dma_chan[OUT])
-               dma_release_channel(pair->dma_chan[OUT]);
+       /* release dev_to_dev chan if we aren't reusing the Back-End one */
+       if (pair->dma_chan[dir] && pair->req_dma_chan)
+               dma_release_channel(pair->dma_chan[dir]);
 
-       pair->dma_chan[IN] = NULL;
-       pair->dma_chan[OUT] = NULL;
+       pair->dma_chan[!dir] = NULL;
+       pair->dma_chan[dir] = NULL;
 
        return 0;
 }
index 0c813a4..69aeb0e 100644 (file)
@@ -265,12 +265,20 @@ static int fsl_mqs_remove(struct platform_device *pdev)
 static int fsl_mqs_runtime_resume(struct device *dev)
 {
        struct fsl_mqs *mqs_priv = dev_get_drvdata(dev);
+       int ret;
 
-       if (mqs_priv->ipg)
-               clk_prepare_enable(mqs_priv->ipg);
+       ret = clk_prepare_enable(mqs_priv->ipg);
+       if (ret) {
+               dev_err(dev, "failed to enable ipg clock\n");
+               return ret;
+       }
 
-       if (mqs_priv->mclk)
-               clk_prepare_enable(mqs_priv->mclk);
+       ret = clk_prepare_enable(mqs_priv->mclk);
+       if (ret) {
+               dev_err(dev, "failed to enable mclk clock\n");
+               clk_disable_unprepare(mqs_priv->ipg);
+               return ret;
+       }
 
        if (mqs_priv->use_gpr)
                regmap_write(mqs_priv->regmap, IOMUXC_GPR2,
@@ -292,11 +300,8 @@ static int fsl_mqs_runtime_suspend(struct device *dev)
                regmap_read(mqs_priv->regmap, REG_MQS_CTRL,
                            &mqs_priv->reg_mqs_ctrl);
 
-       if (mqs_priv->mclk)
-               clk_disable_unprepare(mqs_priv->mclk);
-
-       if (mqs_priv->ipg)
-               clk_disable_unprepare(mqs_priv->ipg);
+       clk_disable_unprepare(mqs_priv->mclk);
+       clk_disable_unprepare(mqs_priv->ipg);
 
        return 0;
 }
index bad89b0..1a2fa7f 100644 (file)
@@ -678,8 +678,9 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
        struct regmap *regs = ssi->regs;
        u32 pm = 999, div2, psr, stccr, mask, afreq, factor, i;
        unsigned long clkrate, baudrate, tmprate;
-       unsigned int slots = params_channels(hw_params);
-       unsigned int slot_width = 32;
+       unsigned int channels = params_channels(hw_params);
+       unsigned int slot_width = params_width(hw_params);
+       unsigned int slots = 2;
        u64 sub, savesub = 100000;
        unsigned int freq;
        bool baudclk_is_used;
@@ -688,10 +689,14 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
        /* Override slots and slot_width if being specifically set... */
        if (ssi->slots)
                slots = ssi->slots;
-       /* ...but keep 32 bits if slots is 2 -- I2S Master mode */
-       if (ssi->slot_width && slots != 2)
+       if (ssi->slot_width)
                slot_width = ssi->slot_width;
 
+       /* ...but force 32 bits for stereo audio using I2S Master Mode */
+       if (channels == 2 &&
+           (ssi->i2s_net & SSI_SCR_I2S_MODE_MASK) == SSI_SCR_I2S_MODE_MASTER)
+               slot_width = 32;
+
        /* Generate bit clock based on the slot number and slot width */
        freq = slots * slot_width * params_rate(hw_params);
 
index a2a5798..5dc489a 100644 (file)
@@ -492,7 +492,7 @@ config SND_SOC_INTEL_SOF_PCM512x_MACH
 
 endif ## SND_SOC_SOF_HDA_LINK || SND_SOC_SOF_BAYTRAIL
 
-if (SND_SOC_SOF_COMETLAKE_LP && SND_SOC_SOF_HDA_LINK)
+if (SND_SOC_SOF_COMETLAKE && SND_SOC_SOF_HDA_LINK)
 
 config SND_SOC_INTEL_CML_LP_DA7219_MAX98357A_MACH
        tristate "CML_LP with DA7219 and MAX98357A in I2S Mode"
@@ -520,7 +520,7 @@ config SND_SOC_INTEL_SOF_CML_RT1011_RT5682_MACH
          Say Y if you have such a device.
          If unsure select "N".
 
-endif ## SND_SOC_SOF_COMETLAKE_LP && SND_SOC_SOF_HDA_LINK
+endif ## SND_SOC_SOF_COMETLAKE && SND_SOC_SOF_HDA_LINK
 
 if SND_SOC_SOF_JASPERLAKE
 
index 6c20bdd..8ada4ec 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/module.h>
 #include "common.h"
+#include "qdsp6/q6afe.h"
 
 int qcom_snd_parse_of(struct snd_soc_card *card)
 {
@@ -101,6 +102,15 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
                        }
                        link->no_pcm = 1;
                        link->ignore_pmdown_time = 1;
+
+                       if (q6afe_is_rx_port(link->id)) {
+                               link->dpcm_playback = 1;
+                               link->dpcm_capture = 0;
+                       } else {
+                               link->dpcm_playback = 0;
+                               link->dpcm_capture = 1;
+                       }
+
                } else {
                        dlc = devm_kzalloc(dev, sizeof(*dlc), GFP_KERNEL);
                        if (!dlc)
@@ -113,12 +123,12 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
                        link->codecs->dai_name = "snd-soc-dummy-dai";
                        link->codecs->name = "snd-soc-dummy";
                        link->dynamic = 1;
+                       link->dpcm_playback = 1;
+                       link->dpcm_capture = 1;
                }
 
                link->ignore_suspend = 1;
                link->nonatomic = 1;
-               link->dpcm_playback = 1;
-               link->dpcm_capture = 1;
                link->stream_name = link->name;
                link++;
 
index e0945f7..0ce4eb6 100644 (file)
@@ -800,6 +800,14 @@ int q6afe_get_port_id(int index)
 }
 EXPORT_SYMBOL_GPL(q6afe_get_port_id);
 
+int q6afe_is_rx_port(int index)
+{
+       if (index < 0 || index >= AFE_PORT_MAX)
+               return -EINVAL;
+
+       return port_maps[index].is_rx;
+}
+EXPORT_SYMBOL_GPL(q6afe_is_rx_port);
 static int afe_apr_send_pkt(struct q6afe *afe, struct apr_pkt *pkt,
                            struct q6afe_port *port)
 {
index c7ed542..1a0f80a 100644 (file)
@@ -198,6 +198,7 @@ int q6afe_port_start(struct q6afe_port *port);
 int q6afe_port_stop(struct q6afe_port *port);
 void q6afe_port_put(struct q6afe_port *port);
 int q6afe_get_port_id(int index);
+int q6afe_is_rx_port(int index);
 void q6afe_hdmi_port_prepare(struct q6afe_port *port,
                            struct q6afe_hdmi_cfg *cfg);
 void q6afe_slim_port_prepare(struct q6afe_port *port,
index 0e0e8f7..ae4b2ca 100644 (file)
@@ -25,6 +25,7 @@
 #define ASM_STREAM_CMD_FLUSH                   0x00010BCE
 #define ASM_SESSION_CMD_PAUSE                  0x00010BD3
 #define ASM_DATA_CMD_EOS                       0x00010BDB
+#define ASM_DATA_EVENT_RENDERED_EOS            0x00010C1C
 #define ASM_NULL_POPP_TOPOLOGY                 0x00010C68
 #define ASM_STREAM_CMD_FLUSH_READBUFS          0x00010C09
 #define ASM_STREAM_CMD_SET_ENCDEC_PARAM                0x00010C10
@@ -622,9 +623,6 @@ static int32_t q6asm_stream_callback(struct apr_device *adev,
                case ASM_SESSION_CMD_SUSPEND:
                        client_event = ASM_CLIENT_EVENT_CMD_SUSPEND_DONE;
                        break;
-               case ASM_DATA_CMD_EOS:
-                       client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE;
-                       break;
                case ASM_STREAM_CMD_FLUSH:
                        client_event = ASM_CLIENT_EVENT_CMD_FLUSH_DONE;
                        break;
@@ -728,6 +726,9 @@ static int32_t q6asm_stream_callback(struct apr_device *adev,
                }
 
                break;
+       case ASM_DATA_EVENT_RENDERED_EOS:
+               client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE;
+               break;
        }
 
        if (ac->cb)
index 7cd42fc..1707414 100644 (file)
@@ -590,8 +590,10 @@ static int rockchip_pdm_resume(struct device *dev)
        int ret;
 
        ret = pm_runtime_get_sync(dev);
-       if (ret < 0)
+       if (ret < 0) {
+               pm_runtime_put(dev);
                return ret;
+       }
 
        ret = regcache_sync(pdm->regmap);
 
index 7b38720..0f30f5a 100644 (file)
@@ -310,7 +310,7 @@ struct snd_soc_component *snd_soc_rtdcom_lookup(struct snd_soc_pcm_runtime *rtd,
 }
 EXPORT_SYMBOL_GPL(snd_soc_rtdcom_lookup);
 
-static struct snd_soc_component
+struct snd_soc_component
 *snd_soc_lookup_component_nolocked(struct device *dev, const char *driver_name)
 {
        struct snd_soc_component *component;
@@ -329,6 +329,7 @@ static struct snd_soc_component
 
        return found_component;
 }
+EXPORT_SYMBOL_GPL(snd_soc_lookup_component_nolocked);
 
 struct snd_soc_component *snd_soc_lookup_component(struct device *dev,
                                                   const char *driver_name)
index a9ea172..11e5d79 100644 (file)
@@ -9,6 +9,43 @@
 #include <sound/soc.h>
 #include <sound/dmaengine_pcm.h>
 
+static void devm_dai_release(struct device *dev, void *res)
+{
+       snd_soc_unregister_dai(*(struct snd_soc_dai **)res);
+}
+
+/**
+ * devm_snd_soc_register_dai - resource-managed dai registration
+ * @dev: Device used to manage component
+ * @component: The component the DAIs are registered for
+ * @dai_drv: DAI driver to use for the DAI
+ * @legacy_dai_naming: if %true, use legacy single-name format;
+ *     if %false, use multiple-name format;
+ */
+struct snd_soc_dai *devm_snd_soc_register_dai(struct device *dev,
+                                             struct snd_soc_component *component,
+                                             struct snd_soc_dai_driver *dai_drv,
+                                             bool legacy_dai_naming)
+{
+       struct snd_soc_dai **ptr;
+       struct snd_soc_dai *dai;
+
+       ptr = devres_alloc(devm_dai_release, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return NULL;
+
+       dai = snd_soc_register_dai(component, dai_drv, legacy_dai_naming);
+       if (dai) {
+               *ptr = dai;
+               devres_add(dev, ptr);
+       } else {
+               devres_free(ptr);
+       }
+
+       return dai;
+}
+EXPORT_SYMBOL_GPL(devm_snd_soc_register_dai);
+
 static void devm_component_release(struct device *dev, void *res)
 {
        snd_soc_unregister_component(*(struct device **)res);
index f728309..80a4e71 100644 (file)
  */
 #define SND_DMAENGINE_PCM_FLAG_NO_RESIDUE BIT(31)
 
-struct dmaengine_pcm {
-       struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1];
-       const struct snd_dmaengine_pcm_config *config;
-       struct snd_soc_component component;
-       unsigned int flags;
-};
-
-static struct dmaengine_pcm *soc_component_to_pcm(struct snd_soc_component *p)
-{
-       return container_of(p, struct dmaengine_pcm, component);
-}
-
 static struct device *dmaengine_dma_dev(struct dmaengine_pcm *pcm,
        struct snd_pcm_substream *substream)
 {
index 2c114b4..c517064 100644 (file)
@@ -2630,15 +2630,15 @@ static int soc_dpcm_fe_runtime_update(struct snd_soc_pcm_runtime *fe, int new)
        int count, paths;
        int ret;
 
+       if (!fe->dai_link->dynamic)
+               return 0;
+
        if (fe->num_cpus > 1) {
                dev_err(fe->dev,
                        "%s doesn't support Multi CPU yet\n", __func__);
                return -EINVAL;
        }
 
-       if (!fe->dai_link->dynamic)
-               return 0;
-
        /* only check active links */
        if (!snd_soc_dai_active(asoc_rtd_to_cpu(fe, 0)))
                return 0;
index 9e89633..43e5745 100644 (file)
@@ -1851,7 +1851,7 @@ static int soc_tplg_dai_create(struct soc_tplg *tplg,
        list_add(&dai_drv->dobj.list, &tplg->comp->dobj_list);
 
        /* register the DAI to the component */
-       dai = snd_soc_register_dai(tplg->comp, dai_drv, false);
+       dai = devm_snd_soc_register_dai(tplg->comp->dev, tplg->comp, dai_drv, false);
        if (!dai)
                return -ENOMEM;
 
@@ -1859,7 +1859,6 @@ static int soc_tplg_dai_create(struct soc_tplg *tplg,
        ret = snd_soc_dapm_new_dai_widgets(dapm, dai);
        if (ret != 0) {
                dev_err(dai->dev, "Failed to create DAI widgets %d\n", ret);
-               snd_soc_unregister_dai(dai);
                return ret;
        }
 
index c9a2bee..3aaf25e 100644 (file)
@@ -25,8 +25,7 @@ config SND_SOC_SOF_INTEL_PCI
        select SND_SOC_SOF_CANNONLAKE  if SND_SOC_SOF_CANNONLAKE_SUPPORT
        select SND_SOC_SOF_COFFEELAKE  if SND_SOC_SOF_COFFEELAKE_SUPPORT
        select SND_SOC_SOF_ICELAKE     if SND_SOC_SOF_ICELAKE_SUPPORT
-       select SND_SOC_SOF_COMETLAKE_LP if SND_SOC_SOF_COMETLAKE_LP_SUPPORT
-       select SND_SOC_SOF_COMETLAKE_H if SND_SOC_SOF_COMETLAKE_H_SUPPORT
+       select SND_SOC_SOF_COMETLAKE   if SND_SOC_SOF_COMETLAKE_SUPPORT
        select SND_SOC_SOF_TIGERLAKE   if SND_SOC_SOF_TIGERLAKE_SUPPORT
        select SND_SOC_SOF_ELKHARTLAKE if SND_SOC_SOF_ELKHARTLAKE_SUPPORT
        select SND_SOC_SOF_JASPERLAKE  if SND_SOC_SOF_JASPERLAKE_SUPPORT
@@ -201,34 +200,22 @@ config SND_SOC_SOF_ICELAKE
          This option is not user-selectable but automagically handled by
          'select' statements at a higher level
 
-config SND_SOC_SOF_COMETLAKE_LP
+config SND_SOC_SOF_COMETLAKE
        tristate
        select SND_SOC_SOF_HDA_COMMON
        help
          This option is not user-selectable but automagically handled by
          'select' statements at a higher level
 
-config SND_SOC_SOF_COMETLAKE_LP_SUPPORT
-       bool "SOF support for CometLake-LP"
-       help
-         This adds support for Sound Open Firmware for Intel(R) platforms
-         using the Cometlake-LP processors.
-         Say Y if you have such a device.
-         If unsure select "N".
+config SND_SOC_SOF_COMETLAKE_SUPPORT
+       bool
 
-config SND_SOC_SOF_COMETLAKE_H
-       tristate
-       select SND_SOC_SOF_HDA_COMMON
-       help
-         This option is not user-selectable but automagically handled by
-         'select' statements at a higher level
-
-config SND_SOC_SOF_COMETLAKE_H_SUPPORT
-       bool "SOF support for CometLake-H"
+config SND_SOC_SOF_COMETLAKE_LP_SUPPORT
+       bool "SOF support for CometLake"
+       select SND_SOC_SOF_COMETLAKE_SUPPORT
        help
          This adds support for Sound Open Firmware for Intel(R) platforms
-         using the Cometlake-H processors.
-         Say Y if you have such a device.
+         using the Cometlake processors.
          If unsure select "N".
 
 config SND_SOC_SOF_TIGERLAKE_SUPPORT
index 7f65dcc..1bda14c 100644 (file)
@@ -653,11 +653,16 @@ irqreturn_t hda_dsp_stream_threaded_handler(int irq, void *context)
                if (status & AZX_INT_CTRL_EN) {
                        rirb_status = snd_hdac_chip_readb(bus, RIRBSTS);
                        if (rirb_status & RIRB_INT_MASK) {
+                               /*
+                                * Clearing the interrupt status here ensures
+                                * that no interrupt gets masked after the RIRB
+                                * wp is read in snd_hdac_bus_update_rirb.
+                                */
+                               snd_hdac_chip_writeb(bus, RIRBSTS,
+                                                    RIRB_INT_MASK);
                                active = true;
                                if (rirb_status & RIRB_INT_RESPONSE)
                                        snd_hdac_bus_update_rirb(bus);
-                               snd_hdac_chip_writeb(bus, RIRBSTS,
-                                                    RIRB_INT_MASK);
                        }
                }
 #endif
index b13697d..aa3532b 100644 (file)
@@ -151,9 +151,7 @@ static const struct sof_dev_desc cfl_desc = {
 };
 #endif
 
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_LP) || \
-       IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_H)
-
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE)
 static const struct sof_dev_desc cml_desc = {
        .machines               = snd_soc_acpi_intel_cml_machines,
        .alt_machines           = snd_soc_acpi_intel_cml_sdw_machines,
@@ -411,8 +409,11 @@ static const struct pci_device_id sof_pci_ids[] = {
                .driver_data = (unsigned long)&cfl_desc},
 #endif
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_ICELAKE)
-       { PCI_DEVICE(0x8086, 0x34C8),
+       { PCI_DEVICE(0x8086, 0x34C8), /* ICL-LP */
+               .driver_data = (unsigned long)&icl_desc},
+       { PCI_DEVICE(0x8086, 0x3dc8), /* ICL-H */
                .driver_data = (unsigned long)&icl_desc},
+
 #endif
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_JASPERLAKE)
        { PCI_DEVICE(0x8086, 0x38c8),
@@ -420,17 +421,20 @@ static const struct pci_device_id sof_pci_ids[] = {
        { PCI_DEVICE(0x8086, 0x4dc8),
                .driver_data = (unsigned long)&jsl_desc},
 #endif
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_LP)
-       { PCI_DEVICE(0x8086, 0x02c8),
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE)
+       { PCI_DEVICE(0x8086, 0x02c8), /* CML-LP */
                .driver_data = (unsigned long)&cml_desc},
-#endif
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_H)
-       { PCI_DEVICE(0x8086, 0x06c8),
+       { PCI_DEVICE(0x8086, 0x06c8), /* CML-H */
+               .driver_data = (unsigned long)&cml_desc},
+       { PCI_DEVICE(0x8086, 0xa3f0), /* CML-S */
                .driver_data = (unsigned long)&cml_desc},
 #endif
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_TIGERLAKE)
-       { PCI_DEVICE(0x8086, 0xa0c8),
+       { PCI_DEVICE(0x8086, 0xa0c8), /* TGL-LP */
+               .driver_data = (unsigned long)&tgl_desc},
+       { PCI_DEVICE(0x8086, 0x43c8), /* TGL-H */
                .driver_data = (unsigned long)&tgl_desc},
+
 #endif
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_ELKHARTLAKE)
        { PCI_DEVICE(0x8086, 0x4b55),
index d6219fb..de43267 100644 (file)
@@ -84,10 +84,10 @@ struct snd_usb_endpoint {
        dma_addr_t sync_dma;            /* DMA address of syncbuf */
 
        unsigned int pipe;              /* the data i/o pipe */
-       unsigned int framesize[2];      /* small/large frame sizes in samples */
-       unsigned int sample_rem;        /* remainder from division fs/fps */
+       unsigned int packsize[2];       /* small/large packet sizes in samples */
+       unsigned int sample_rem;        /* remainder from division fs/pps */
        unsigned int sample_accum;      /* sample accumulator */
-       unsigned int fps;               /* frames per second */
+       unsigned int pps;               /* packets per second */
        unsigned int freqn;             /* nominal sampling rate in fs/fps in Q16.16 format */
        unsigned int freqm;             /* momentary sampling rate in fs/fps in Q16.16 format */
        int        freqshift;           /* how much to shift the feedback value to get Q16.16 */
index 9bea7d3..8876026 100644 (file)
@@ -159,11 +159,11 @@ int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep)
                return ep->maxframesize;
 
        ep->sample_accum += ep->sample_rem;
-       if (ep->sample_accum >= ep->fps) {
-               ep->sample_accum -= ep->fps;
-               ret = ep->framesize[1];
+       if (ep->sample_accum >= ep->pps) {
+               ep->sample_accum -= ep->pps;
+               ret = ep->packsize[1];
        } else {
-               ret = ep->framesize[0];
+               ret = ep->packsize[0];
        }
 
        return ret;
@@ -1088,15 +1088,15 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
 
        if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_FULL) {
                ep->freqn = get_usb_full_speed_rate(rate);
-               ep->fps = 1000;
+               ep->pps = 1000 >> ep->datainterval;
        } else {
                ep->freqn = get_usb_high_speed_rate(rate);
-               ep->fps = 8000;
+               ep->pps = 8000 >> ep->datainterval;
        }
 
-       ep->sample_rem = rate % ep->fps;
-       ep->framesize[0] = rate / ep->fps;
-       ep->framesize[1] = (rate + (ep->fps - 1)) / ep->fps;
+       ep->sample_rem = rate % ep->pps;
+       ep->packsize[0] = rate / ep->pps;
+       ep->packsize[1] = (rate + (ep->pps - 1)) / ep->pps;
 
        /* calculate the frequency in 16.16 format */
        ep->freqm = ep->freqn;
index 5ffb457..1b28d01 100644 (file)
@@ -394,8 +394,9 @@ skip_rate:
        return nr_rates;
 }
 
-/* Line6 Helix series don't support the UAC2_CS_RANGE usb function
- * call. Return a static table of known clock rates.
+/* Line6 Helix series and the Rode Rodecaster Pro don't support the
+ * UAC2_CS_RANGE usb function call. Return a static table of known
+ * clock rates.
  */
 static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip,
                                                struct audioformat *fp)
@@ -408,6 +409,7 @@ static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip,
        case USB_ID(0x0e41, 0x4248): /* Line6 Helix >= fw 2.82 */
        case USB_ID(0x0e41, 0x4249): /* Line6 Helix Rack >= fw 2.82 */
        case USB_ID(0x0e41, 0x424a): /* Line6 Helix LT >= fw 2.82 */
+       case USB_ID(0x19f7, 0x0011): /* Rode Rodecaster Pro */
                return set_fixed_rate(fp, 48000, SNDRV_PCM_RATE_48000);
        }
 
index 663d608..970c9bd 100644 (file)
@@ -286,6 +286,8 @@ int line6_create_audio_in_urbs(struct snd_line6_pcm *line6pcm)
                urb->interval = LINE6_ISO_INTERVAL;
                urb->error_count = 0;
                urb->complete = audio_in_callback;
+               if (usb_urb_ep_type_check(urb))
+                       return -EINVAL;
        }
 
        return 0;
index 7629116..2746d96 100644 (file)
@@ -840,7 +840,7 @@ void line6_disconnect(struct usb_interface *interface)
        if (WARN_ON(usbdev != line6->usbdev))
                return;
 
-       cancel_delayed_work(&line6->startup_work);
+       cancel_delayed_work_sync(&line6->startup_work);
 
        if (line6->urb_listen != NULL)
                line6_stop_listen(line6);
index 01930ce..8233c61 100644 (file)
@@ -431,6 +431,8 @@ int line6_create_audio_out_urbs(struct snd_line6_pcm *line6pcm)
                urb->interval = LINE6_ISO_INTERVAL;
                urb->error_count = 0;
                urb->complete = audio_out_callback;
+               if (usb_urb_ep_type_check(urb))
+                       return -EINVAL;
        }
 
        return 0;
index 047b905..354f576 100644 (file)
@@ -1499,6 +1499,8 @@ void snd_usbmidi_disconnect(struct list_head *p)
        spin_unlock_irq(&umidi->disc_lock);
        up_write(&umidi->disc_rwsem);
 
+       del_timer_sync(&umidi->error_timer);
+
        for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) {
                struct snd_usb_midi_endpoint *ep = &umidi->endpoints[i];
                if (ep->out)
@@ -1525,7 +1527,6 @@ void snd_usbmidi_disconnect(struct list_head *p)
                        ep->in = NULL;
                }
        }
-       del_timer_sync(&umidi->error_timer);
 }
 EXPORT_SYMBOL(snd_usbmidi_disconnect);
 
@@ -2301,16 +2302,22 @@ void snd_usbmidi_input_stop(struct list_head *p)
 }
 EXPORT_SYMBOL(snd_usbmidi_input_stop);
 
-static void snd_usbmidi_input_start_ep(struct snd_usb_midi_in_endpoint *ep)
+static void snd_usbmidi_input_start_ep(struct snd_usb_midi *umidi,
+                                      struct snd_usb_midi_in_endpoint *ep)
 {
        unsigned int i;
+       unsigned long flags;
 
        if (!ep)
                return;
        for (i = 0; i < INPUT_URBS; ++i) {
                struct urb *urb = ep->urbs[i];
-               urb->dev = ep->umidi->dev;
-               snd_usbmidi_submit_urb(urb, GFP_KERNEL);
+               spin_lock_irqsave(&umidi->disc_lock, flags);
+               if (!atomic_read(&urb->use_count)) {
+                       urb->dev = ep->umidi->dev;
+                       snd_usbmidi_submit_urb(urb, GFP_ATOMIC);
+               }
+               spin_unlock_irqrestore(&umidi->disc_lock, flags);
        }
 }
 
@@ -2326,7 +2333,7 @@ void snd_usbmidi_input_start(struct list_head *p)
        if (umidi->input_running || !umidi->opened[1])
                return;
        for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i)
-               snd_usbmidi_input_start_ep(umidi->endpoints[i].in);
+               snd_usbmidi_input_start_ep(umidi, umidi->endpoints[i].in);
        umidi->input_running = 1;
 }
 EXPORT_SYMBOL(snd_usbmidi_input_start);
index 15769f2..eab0fd4 100644 (file)
@@ -581,8 +581,9 @@ static int check_matrix_bitmap(unsigned char *bmap,
  * if failed, give up and free the control instance.
  */
 
-int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
-                             struct snd_kcontrol *kctl)
+int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list,
+                          struct snd_kcontrol *kctl,
+                          bool is_std_info)
 {
        struct usb_mixer_interface *mixer = list->mixer;
        int err;
@@ -596,6 +597,7 @@ int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
                return err;
        }
        list->kctl = kctl;
+       list->is_std_info = is_std_info;
        list->next_id_elem = mixer->id_elems[list->id];
        mixer->id_elems[list->id] = list;
        return 0;
@@ -3234,8 +3236,11 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid)
        unitid = delegate_notify(mixer, unitid, NULL, NULL);
 
        for_each_mixer_elem(list, mixer, unitid) {
-               struct usb_mixer_elem_info *info =
-                       mixer_elem_list_to_info(list);
+               struct usb_mixer_elem_info *info;
+
+               if (!list->is_std_info)
+                       continue;
+               info = mixer_elem_list_to_info(list);
                /* invalidate cache, so the value is read from the device */
                info->cached = 0;
                snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
@@ -3315,6 +3320,8 @@ static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer,
 
                if (!list->kctl)
                        continue;
+               if (!list->is_std_info)
+                       continue;
 
                info = mixer_elem_list_to_info(list);
                if (count > 1 && info->control != control)
index 41ec9dc..c29e27a 100644 (file)
@@ -66,6 +66,7 @@ struct usb_mixer_elem_list {
        struct usb_mixer_elem_list *next_id_elem; /* list of controls with same id */
        struct snd_kcontrol *kctl;
        unsigned int id;
+       bool is_std_info;
        usb_mixer_elem_dump_func_t dump;
        usb_mixer_elem_resume_func_t resume;
 };
@@ -103,8 +104,12 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid);
 int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
                                int request, int validx, int value_set);
 
-int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
-                             struct snd_kcontrol *kctl);
+int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list,
+                          struct snd_kcontrol *kctl,
+                          bool is_std_info);
+
+#define snd_usb_mixer_add_control(list, kctl) \
+       snd_usb_mixer_add_list(list, kctl, true)
 
 void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list,
                                 struct usb_mixer_interface *mixer,
index b6bcf2f..cec1cfd 100644 (file)
@@ -158,7 +158,8 @@ static int add_single_ctl_with_resume(struct usb_mixer_interface *mixer,
                return -ENOMEM;
        }
        kctl->private_free = snd_usb_mixer_elem_free;
-       return snd_usb_mixer_add_control(list, kctl);
+       /* don't use snd_usb_mixer_add_control() here, this is a special list element */
+       return snd_usb_mixer_add_list(list, kctl, false);
 }
 
 /*
index 8a05dcb..40b7cd1 100644 (file)
@@ -367,6 +367,8 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
                ifnum = 0;
                goto add_sync_ep_from_ifnum;
        case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
+       case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */
+       case USB_ID(0x0d9a, 0x00df): /* RTX6001 */
                ep = 0x81;
                ifnum = 2;
                goto add_sync_ep_from_ifnum;
@@ -1786,6 +1788,7 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream
                return 0;
        case SNDRV_PCM_TRIGGER_STOP:
                stop_endpoints(subs);
+               subs->data_endpoint->retire_data_urb = NULL;
                subs->running = 0;
                return 0;
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
index 4ec4910..9092cc0 100644 (file)
@@ -3633,4 +3633,56 @@ ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */
        }
 },
 
+/*
+ * MacroSilicon MS2109 based HDMI capture cards
+ *
+ * These claim 96kHz 1ch in the descriptors, but are actually 48kHz 2ch.
+ * They also need QUIRK_AUDIO_ALIGN_TRANSFER, which makes one wonder if
+ * they pretend to be 96kHz mono as a workaround for stereo being broken
+ * by that...
+ *
+ * They also have swapped L-R channels, but that's for userspace to deal
+ * with.
+ */
+{
+       USB_DEVICE(0x534d, 0x2109),
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .vendor_name = "MacroSilicon",
+               .product_name = "MS2109",
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = &(const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_AUDIO_ALIGN_TRANSFER,
+                       },
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_AUDIO_STANDARD_MIXER,
+                       },
+                       {
+                               .ifnum = 3,
+                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+                               .data = &(const struct audioformat) {
+                                       .formats = SNDRV_PCM_FMTBIT_S16_LE,
+                                       .channels = 2,
+                                       .iface = 3,
+                                       .altsetting = 1,
+                                       .altset_idx = 1,
+                                       .attributes = 0,
+                                       .endpoint = 0x82,
+                                       .ep_attr = USB_ENDPOINT_XFER_ISOC |
+                                               USB_ENDPOINT_SYNC_ASYNC,
+                                       .rates = SNDRV_PCM_RATE_CONTINUOUS,
+                                       .rate_min = 48000,
+                                       .rate_max = 48000,
+                               }
+                       },
+                       {
+                               .ifnum = -1
+                       }
+               }
+       }
+},
+
 #undef USB_DEVICE_VENDOR_SPEC
index bca0179..fca7273 100644 (file)
@@ -1532,6 +1532,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
 static bool is_itf_usb_dsd_dac(unsigned int id)
 {
        switch (id) {
+       case USB_ID(0x154e, 0x1002): /* Denon DCD-1500RE */
        case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */
        case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
        case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
@@ -1673,6 +1674,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
             chip->usb_id == USB_ID(0x0951, 0x16ad)) &&
            (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
                usleep_range(1000, 2000);
+
+       /*
+        * Samsung USBC Headset (AKG) need a tiny delay after each
+        * class compliant request. (Model number: AAM625R or AAM627R)
+        */
+       if (chip->usb_id == USB_ID(0x04e8, 0xa051) &&
+           (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+               usleep_range(5000, 6000);
 }
 
 /*
@@ -1856,6 +1865,7 @@ struct registration_quirk {
 static const struct registration_quirk registration_quirks[] = {
        REG_QUIRK_ENTRY(0x0951, 0x16d8, 2),     /* Kingston HyperX AMP */
        REG_QUIRK_ENTRY(0x0951, 0x16ed, 2),     /* Kingston HyperX Cloud Alpha S */
+       REG_QUIRK_ENTRY(0x0951, 0x16ea, 2),     /* Kingston HyperX Cloud Flight S */
        { 0 }                                   /* terminator */
 };
 
index 17c5a03..0780f97 100644 (file)
@@ -408,14 +408,15 @@ struct kvm_vmx_nested_state_data {
 };
 
 struct kvm_vmx_nested_state_hdr {
-       __u32 flags;
        __u64 vmxon_pa;
        __u64 vmcs12_pa;
-       __u64 preemption_timer_deadline;
 
        struct {
                __u16 flags;
        } smm;
+
+       __u32 flags;
+       __u64 preemption_timer_deadline;
 };
 
 struct kvm_svm_nested_state_data {
index df767af..45f8e1b 100644 (file)
@@ -8,6 +8,8 @@
 #include <asm/alternative-asm.h>
 #include <asm/export.h>
 
+.pushsection .noinstr.text, "ax"
+
 /*
  * We build a jump to memcpy_orig by default which gets NOPped out on
  * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
@@ -184,6 +186,8 @@ SYM_FUNC_START(memcpy_orig)
        retq
 SYM_FUNC_END(memcpy_orig)
 
+.popsection
+
 #ifndef CONFIG_UML
 
 MCSAFE_TEST_CTL
index 3110164..70c78fa 100644 (file)
@@ -49,7 +49,7 @@ MAP COMMANDS
 |              | **lru_percpu_hash** | **lpm_trie** | **array_of_maps** | **hash_of_maps**
 |              | **devmap** | **devmap_hash** | **sockmap** | **cpumap** | **xskmap** | **sockhash**
 |              | **cgroup_storage** | **reuseport_sockarray** | **percpu_cgroup_storage**
-|              | **queue** | **stack** | **sk_storage** | **struct_ops** }
+|              | **queue** | **stack** | **sk_storage** | **struct_ops** | **ringbuf** }
 
 DESCRIPTION
 ===========
index c5fac80..1d3b606 100644 (file)
@@ -49,6 +49,7 @@ const char * const map_type_name[] = {
        [BPF_MAP_TYPE_STACK]                    = "stack",
        [BPF_MAP_TYPE_SK_STORAGE]               = "sk_storage",
        [BPF_MAP_TYPE_STRUCT_OPS]               = "struct_ops",
+       [BPF_MAP_TYPE_RINGBUF]                  = "ringbuf",
 };
 
 const size_t map_type_name_size = ARRAY_SIZE(map_type_name);
@@ -1590,7 +1591,7 @@ static int do_help(int argc, char **argv)
                "                 lru_percpu_hash | lpm_trie | array_of_maps | hash_of_maps |\n"
                "                 devmap | devmap_hash | sockmap | cpumap | xskmap | sockhash |\n"
                "                 cgroup_storage | reuseport_sockarray | percpu_cgroup_storage |\n"
-               "                 queue | stack | sk_storage | struct_ops }\n"
+               "                 queue | stack | sk_storage | struct_ops | ringbuf }\n"
                "       " HELP_SPEC_OPTIONS "\n"
                "",
                bin_name, argv[-2]);
index 4671fbf..7f475d5 100644 (file)
@@ -18,8 +18,7 @@
  * position @h. For example
  * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
  */
-#if !defined(__ASSEMBLY__) && \
-       (!defined(CONFIG_CC_IS_GCC) || CONFIG_GCC_VERSION >= 49000)
+#if !defined(__ASSEMBLY__)
 #include <linux/build_bug.h>
 #define GENMASK_INPUT_CHECK(h, l) \
        (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
index 1968481..8bd3305 100644 (file)
@@ -3168,16 +3168,15 @@ union bpf_attr {
  *     Return
  *             The id is returned or 0 in case the id could not be retrieved.
  *
- * void *bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
+ * int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
  *     Description
  *             Copy *size* bytes from *data* into a ring buffer *ringbuf*.
- *             If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
- *             new data availability is sent.
- *             IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
- *             new data availability is sent unconditionally.
+ *             If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ *             of new data availability is sent.
+ *             If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ *             of new data availability is sent unconditionally.
  *     Return
- *             0, on success;
- *             < 0, on error.
+ *             0 on success, or a negative error in case of failure.
  *
  * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags)
  *     Description
@@ -3189,20 +3188,20 @@ union bpf_attr {
  * void bpf_ringbuf_submit(void *data, u64 flags)
  *     Description
  *             Submit reserved ring buffer sample, pointed to by *data*.
- *             If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
- *             new data availability is sent.
- *             IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
- *             new data availability is sent unconditionally.
+ *             If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ *             of new data availability is sent.
+ *             If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ *             of new data availability is sent unconditionally.
  *     Return
  *             Nothing. Always succeeds.
  *
  * void bpf_ringbuf_discard(void *data, u64 flags)
  *     Description
  *             Discard reserved ring buffer sample, pointed to by *data*.
- *             If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
- *             new data availability is sent.
- *             IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
- *             new data availability is sent unconditionally.
+ *             If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ *             of new data availability is sent.
+ *             If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ *             of new data availability is sent unconditionally.
  *     Return
  *             Nothing. Always succeeds.
  *
@@ -3210,16 +3209,18 @@ union bpf_attr {
  *     Description
  *             Query various characteristics of provided ring buffer. What
  *             exactly is queries is determined by *flags*:
- *               - BPF_RB_AVAIL_DATA - amount of data not yet consumed;
- *               - BPF_RB_RING_SIZE - the size of ring buffer;
- *               - BPF_RB_CONS_POS - consumer position (can wrap around);
- *               - BPF_RB_PROD_POS - producer(s) position (can wrap around);
- *             Data returned is just a momentary snapshots of actual values
+ *
+ *             * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed.
+ *             * **BPF_RB_RING_SIZE**: The size of ring buffer.
+ *             * **BPF_RB_CONS_POS**: Consumer position (can wrap around).
+ *             * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around).
+ *
+ *             Data returned is just a momentary snapshot of actual values
  *             and could be inaccurate, so this facility should be used to
  *             power heuristics and for reporting, not to make 100% correct
  *             calculation.
  *     Return
- *             Requested value, or 0, if flags are not recognized.
+ *             Requested value, or 0, if *flags* are not recognized.
  *
  * int bpf_csum_level(struct sk_buff *skb, u64 level)
  *     Description
index 1b6015b..dbef24e 100644 (file)
@@ -233,6 +233,8 @@ LIBBPF_API int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf,
 LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
                                 __u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
                                 __u64 *probe_offset, __u64 *probe_addr);
+
+enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */
 LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type);
 
 #ifdef __cplusplus
index df59fd4..e0af36b 100644 (file)
 #include <stdbool.h>
 #include <stddef.h>
 #include <limits.h>
-#ifndef __WORDSIZE
-#define __WORDSIZE (__SIZEOF_LONG__ * 8)
-#endif
 
 static inline size_t hash_bits(size_t h, int bits)
 {
        /* shuffle bits and return requested number of upper bits */
-       return (h * 11400714819323198485llu) >> (__WORDSIZE - bits);
+#if (__SIZEOF_SIZE_T__ == __SIZEOF_LONG_LONG__)
+       /* LP64 case */
+       return (h * 11400714819323198485llu) >> (__SIZEOF_LONG_LONG__ * 8 - bits);
+#elif (__SIZEOF_SIZE_T__ <= __SIZEOF_LONG__)
+       return (h * 2654435769lu) >> (__SIZEOF_LONG__ * 8 - bits);
+#else
+#      error "Unsupported size_t size"
+#endif
 }
 
 typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx);
index 477c679..11e4725 100644 (file)
@@ -4818,7 +4818,13 @@ bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
                        err = -EINVAL;
                        goto out;
                }
-               prog = bpf_object__find_program_by_title(obj, sec_name);
+               prog = NULL;
+               for (i = 0; i < obj->nr_programs; i++) {
+                       if (!strcmp(obj->programs[i].section_name, sec_name)) {
+                               prog = &obj->programs[i];
+                               break;
+                       }
+               }
                if (!prog) {
                        pr_warn("failed to find program '%s' for CO-RE offset relocation\n",
                                sec_name);
@@ -6653,7 +6659,7 @@ static const struct bpf_sec_def section_defs[] = {
                .expected_attach_type = BPF_TRACE_ITER,
                .is_attach_btf = true,
                .attach_fn = attach_iter),
-       BPF_EAPROG_SEC("xdp_devmap",            BPF_PROG_TYPE_XDP,
+       BPF_EAPROG_SEC("xdp_devmap/",           BPF_PROG_TYPE_XDP,
                                                BPF_XDP_DEVMAP),
        BPF_PROG_SEC("xdp",                     BPF_PROG_TYPE_XDP),
        BPF_PROG_SEC("perf_event",              BPF_PROG_TYPE_PERF_EVENT),
index dbb9efb..39ebf61 100644 (file)
@@ -237,6 +237,9 @@ static int get_value(struct parse_opt_ctx_t *p,
                return err;
 
        case OPTION_CALLBACK:
+               if (opt->set)
+                       *(bool *)opt->set = true;
+
                if (unset)
                        return (*opt->callback)(opt, NULL, 1) ? (-1) : 0;
                if (opt->flags & PARSE_OPT_NOARG)
index 27f3b07..f1640d6 100644 (file)
@@ -361,6 +361,7 @@ translate_data(struct kbuffer *kbuf, void *data, void **rptr,
                break;
 
        case KBUFFER_TYPE_TIME_EXTEND:
+       case KBUFFER_TYPE_TIME_STAMP:
                extend = read_4(kbuf, data);
                data += 4;
                extend <<= TS_SHIFT;
@@ -369,10 +370,6 @@ translate_data(struct kbuffer *kbuf, void *data, void **rptr,
                *length = 0;
                break;
 
-       case KBUFFER_TYPE_TIME_STAMP:
-               data += 12;
-               *length = 0;
-               break;
        case 0:
                *length = read_4(kbuf, data) - 4;
                *length = (*length + 3) & ~3;
@@ -397,7 +394,11 @@ static unsigned int update_pointers(struct kbuffer *kbuf)
 
        type_len = translate_data(kbuf, ptr, &ptr, &delta, &length);
 
-       kbuf->timestamp += delta;
+       if (type_len == KBUFFER_TYPE_TIME_STAMP)
+               kbuf->timestamp = delta;
+       else
+               kbuf->timestamp += delta;
+
        kbuf->index = calc_index(kbuf, ptr);
        kbuf->next = kbuf->index + length;
 
@@ -454,7 +455,9 @@ static int __next_event(struct kbuffer *kbuf)
                if (kbuf->next >= kbuf->size)
                        return -1;
                type = update_pointers(kbuf);
-       } while (type == KBUFFER_TYPE_TIME_EXTEND || type == KBUFFER_TYPE_PADDING);
+       } while (type == KBUFFER_TYPE_TIME_EXTEND ||
+                type == KBUFFER_TYPE_TIME_STAMP ||
+                type == KBUFFER_TYPE_PADDING);
 
        return 0;
 }
@@ -547,6 +550,34 @@ int kbuffer_load_subbuffer(struct kbuffer *kbuf, void *subbuffer)
 }
 
 /**
+ * kbuffer_subbuf_timestamp - read the timestamp from a sub buffer
+ * @kbuf:      The kbuffer to load
+ * @subbuf:    The subbuffer to read from.
+ *
+ * Return the timestamp from a subbuffer.
+ */
+unsigned long long kbuffer_subbuf_timestamp(struct kbuffer *kbuf, void *subbuf)
+{
+       return kbuf->read_8(subbuf);
+}
+
+/**
+ * kbuffer_ptr_delta - read the delta field from a record
+ * @kbuf:      The kbuffer to load
+ * @ptr:       The record in the buffe.
+ *
+ * Return the timestamp delta from a record
+ */
+unsigned int kbuffer_ptr_delta(struct kbuffer *kbuf, void *ptr)
+{
+       unsigned int type_len_ts;
+
+       type_len_ts = read_4(kbuf, ptr);
+       return ts4host(kbuf, type_len_ts);
+}
+
+
+/**
  * kbuffer_read_event - read the next event in the kbuffer subbuffer
  * @kbuf:      The kbuffer to read from
  * @ts:                The address to store the timestamp of the event (may be NULL to ignore)
index ed4d697..5fa8292 100644 (file)
@@ -49,6 +49,8 @@ int kbuffer_load_subbuffer(struct kbuffer *kbuf, void *subbuffer);
 void *kbuffer_read_event(struct kbuffer *kbuf, unsigned long long *ts);
 void *kbuffer_next_event(struct kbuffer *kbuf, unsigned long long *ts);
 unsigned long long kbuffer_timestamp(struct kbuffer *kbuf);
+unsigned long long kbuffer_subbuf_timestamp(struct kbuffer *kbuf, void *subbuf);
+unsigned int kbuffer_ptr_delta(struct kbuffer *kbuf, void *ptr);
 
 void *kbuffer_translate_data(int swap, void *data, unsigned int *size);
 
index eda15a5..3c59677 100644 (file)
@@ -84,4 +84,6 @@ unsigned long arch_jump_destination(struct instruction *insn);
 
 unsigned long arch_dest_rela_offset(int addend);
 
+const char *arch_nop_insn(int len);
+
 #endif /* _ARCH_H */
index 4b504fc..9872195 100644 (file)
@@ -565,3 +565,21 @@ void arch_initial_func_cfi_state(struct cfi_init_state *state)
        state->regs[16].base = CFI_CFA;
        state->regs[16].offset = -8;
 }
+
+const char *arch_nop_insn(int len)
+{
+       static const char nops[5][5] = {
+               /* 1 */ { 0x90 },
+               /* 2 */ { 0x66, 0x90 },
+               /* 3 */ { 0x0f, 0x1f, 0x00 },
+               /* 4 */ { 0x0f, 0x1f, 0x40, 0x00 },
+               /* 5 */ { 0x0f, 0x1f, 0x44, 0x00, 0x00 },
+       };
+
+       if (len < 1 || len > 5) {
+               WARN("invalid NOP size: %d\n", len);
+               return NULL;
+       }
+
+       return nops[len-1];
+}
diff --git a/tools/objtool/arch/x86/include/arch_elf.h b/tools/objtool/arch/x86/include/arch_elf.h
new file mode 100644 (file)
index 0000000..69cc426
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef _OBJTOOL_ARCH_ELF
+#define _OBJTOOL_ARCH_ELF
+
+#define R_NONE R_X86_64_NONE
+
+#endif /* _OBJTOOL_ARCH_ELF */
index 5fbb90a..5e0d70a 100644 (file)
@@ -12,6 +12,7 @@
 #include "check.h"
 #include "special.h"
 #include "warn.h"
+#include "arch_elf.h"
 
 #include <linux/hashtable.h>
 #include <linux/kernel.h>
@@ -766,6 +767,24 @@ static int add_call_destinations(struct objtool_file *file)
                        insn->call_dest = rela->sym;
 
                /*
+                * Many compilers cannot disable KCOV with a function attribute
+                * so they need a little help, NOP out any KCOV calls from noinstr
+                * text.
+                */
+               if (insn->sec->noinstr &&
+                   !strncmp(insn->call_dest->name, "__sanitizer_cov_", 16)) {
+                       if (rela) {
+                               rela->type = R_NONE;
+                               elf_write_rela(file->elf, rela);
+                       }
+
+                       elf_write_insn(file->elf, insn->sec,
+                                      insn->offset, insn->len,
+                                      arch_nop_insn(insn->len));
+                       insn->type = INSN_NOP;
+               }
+
+               /*
                 * Whatever stack impact regular CALLs have, should be undone
                 * by the RETURN of the called function.
                 *
@@ -2190,10 +2209,36 @@ static inline const char *call_dest_name(struct instruction *insn)
        return "{dynamic}";
 }
 
+static inline bool noinstr_call_dest(struct symbol *func)
+{
+       /*
+        * We can't deal with indirect function calls at present;
+        * assume they're instrumented.
+        */
+       if (!func)
+               return false;
+
+       /*
+        * If the symbol is from a noinstr section; we good.
+        */
+       if (func->sec->noinstr)
+               return true;
+
+       /*
+        * The __ubsan_handle_*() calls are like WARN(), they only happen when
+        * something 'BAD' happened. At the risk of taking the machine down,
+        * let them proceed to get the message out.
+        */
+       if (!strncmp(func->name, "__ubsan_handle_", 15))
+               return true;
+
+       return false;
+}
+
 static int validate_call(struct instruction *insn, struct insn_state *state)
 {
        if (state->noinstr && state->instr <= 0 &&
-           (!insn->call_dest || !insn->call_dest->sec->noinstr)) {
+           !noinstr_call_dest(insn->call_dest)) {
                WARN_FUNC("call to %s() leaves .noinstr.text section",
                                insn->sec, insn->offset, call_dest_name(insn));
                return 1;
@@ -2740,13 +2785,13 @@ int check(const char *_objname, bool orc)
 
        objname = _objname;
 
-       file.elf = elf_open_read(objname, orc ? O_RDWR : O_RDONLY);
+       file.elf = elf_open_read(objname, O_RDWR);
        if (!file.elf)
                return 1;
 
        INIT_LIST_HEAD(&file.insn_list);
        hash_init(file.insn_hash);
-       file.c_file = find_section_by_name(file.elf, ".comment");
+       file.c_file = !vmlinux && find_section_by_name(file.elf, ".comment");
        file.ignore_unreachables = no_unreachable;
        file.hints = false;
 
@@ -2801,7 +2846,9 @@ int check(const char *_objname, bool orc)
                ret = create_orc_sections(&file);
                if (ret < 0)
                        goto out;
+       }
 
+       if (file.elf->changed) {
                ret = elf_write(file.elf);
                if (ret < 0)
                        goto out;
index 8422567..26d11d8 100644 (file)
@@ -529,8 +529,9 @@ static int read_relas(struct elf *elf)
                        rela->addend = rela->rela.r_addend;
                        rela->offset = rela->rela.r_offset;
                        symndx = GELF_R_SYM(rela->rela.r_info);
-                       rela->sym = find_symbol_by_index(elf, symndx);
                        rela->sec = sec;
+                       rela->idx = i;
+                       rela->sym = find_symbol_by_index(elf, symndx);
                        if (!rela->sym) {
                                WARN("can't find rela entry symbol %d for %s",
                                     symndx, sec->name);
@@ -713,6 +714,8 @@ struct section *elf_create_section(struct elf *elf, const char *name,
        elf_hash_add(elf->section_hash, &sec->hash, sec->idx);
        elf_hash_add(elf->section_name_hash, &sec->name_hash, str_hash(sec->name));
 
+       elf->changed = true;
+
        return sec;
 }
 
@@ -746,7 +749,7 @@ struct section *elf_create_rela_section(struct elf *elf, struct section *base)
        return sec;
 }
 
-int elf_rebuild_rela_section(struct section *sec)
+int elf_rebuild_rela_section(struct elf *elf, struct section *sec)
 {
        struct rela *rela;
        int nr, idx = 0, size;
@@ -763,6 +766,9 @@ int elf_rebuild_rela_section(struct section *sec)
                return -1;
        }
 
+       sec->changed = true;
+       elf->changed = true;
+
        sec->data->d_buf = relas;
        sec->data->d_size = size;
 
@@ -779,7 +785,44 @@ int elf_rebuild_rela_section(struct section *sec)
        return 0;
 }
 
-int elf_write(const struct elf *elf)
+int elf_write_insn(struct elf *elf, struct section *sec,
+                  unsigned long offset, unsigned int len,
+                  const char *insn)
+{
+       Elf_Data *data = sec->data;
+
+       if (data->d_type != ELF_T_BYTE || data->d_off) {
+               WARN("write to unexpected data for section: %s", sec->name);
+               return -1;
+       }
+
+       memcpy(data->d_buf + offset, insn, len);
+       elf_flagdata(data, ELF_C_SET, ELF_F_DIRTY);
+
+       elf->changed = true;
+
+       return 0;
+}
+
+int elf_write_rela(struct elf *elf, struct rela *rela)
+{
+       struct section *sec = rela->sec;
+
+       rela->rela.r_info = GELF_R_INFO(rela->sym->idx, rela->type);
+       rela->rela.r_addend = rela->addend;
+       rela->rela.r_offset = rela->offset;
+
+       if (!gelf_update_rela(sec->data, rela->idx, &rela->rela)) {
+               WARN_ELF("gelf_update_rela");
+               return -1;
+       }
+
+       elf->changed = true;
+
+       return 0;
+}
+
+int elf_write(struct elf *elf)
 {
        struct section *sec;
        Elf_Scn *s;
@@ -796,6 +839,8 @@ int elf_write(const struct elf *elf)
                                WARN_ELF("gelf_update_shdr");
                                return -1;
                        }
+
+                       sec->changed = false;
                }
        }
 
@@ -808,6 +853,8 @@ int elf_write(const struct elf *elf)
                return -1;
        }
 
+       elf->changed = false;
+
        return 0;
 }
 
index f4fe1d6..7324e77 100644 (file)
@@ -64,9 +64,10 @@ struct rela {
        GElf_Rela rela;
        struct section *sec;
        struct symbol *sym;
-       unsigned int type;
        unsigned long offset;
+       unsigned int type;
        int addend;
+       int idx;
        bool jump_table_start;
 };
 
@@ -76,6 +77,7 @@ struct elf {
        Elf *elf;
        GElf_Ehdr ehdr;
        int fd;
+       bool changed;
        char *name;
        struct list_head sections;
        DECLARE_HASHTABLE(symbol_hash, ELF_HASH_BITS);
@@ -118,7 +120,11 @@ struct elf *elf_open_read(const char *name, int flags);
 struct section *elf_create_section(struct elf *elf, const char *name, size_t entsize, int nr);
 struct section *elf_create_rela_section(struct elf *elf, struct section *base);
 void elf_add_rela(struct elf *elf, struct rela *rela);
-int elf_write(const struct elf *elf);
+int elf_write_insn(struct elf *elf, struct section *sec,
+                  unsigned long offset, unsigned int len,
+                  const char *insn);
+int elf_write_rela(struct elf *elf, struct rela *rela);
+int elf_write(struct elf *elf);
 void elf_close(struct elf *elf);
 
 struct section *find_section_by_name(const struct elf *elf, const char *name);
@@ -130,7 +136,7 @@ struct rela *find_rela_by_dest(const struct elf *elf, struct section *sec, unsig
 struct rela *find_rela_by_dest_range(const struct elf *elf, struct section *sec,
                                     unsigned long offset, unsigned int len);
 struct symbol *find_func_containing(struct section *sec, unsigned long offset);
-int elf_rebuild_rela_section(struct section *sec);
+int elf_rebuild_rela_section(struct elf *elf, struct section *sec);
 
 #define for_each_sec(file, sec)                                                \
        list_for_each_entry(sec, &file->elf->sections, list)
index c954998..4c37f80 100644 (file)
@@ -222,7 +222,7 @@ int create_orc_sections(struct objtool_file *file)
                }
        }
 
-       if (elf_rebuild_rela_section(ip_relasec))
+       if (elf_rebuild_rela_section(file->elf, ip_relasec))
                return -1;
 
        return 0;
index 839ef52..6ce4512 100644 (file)
@@ -641,6 +641,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
                        }
                        evsel->core.attr.freq = 0;
                        evsel->core.attr.sample_period = 1;
+                       evsel->no_aux_samples = true;
                        intel_pt_evsel = evsel;
                        opts->full_auxtrace = true;
                }
index e108d90..a37e791 100644 (file)
@@ -852,20 +852,20 @@ static int record__open(struct record *rec)
         * event synthesis.
         */
        if (opts->initial_delay || target__has_cpu(&opts->target)) {
-               if (perf_evlist__add_dummy(evlist))
-                       return -ENOMEM;
+               pos = perf_evlist__get_tracking_event(evlist);
+               if (!evsel__is_dummy_event(pos)) {
+                       /* Set up dummy event. */
+                       if (perf_evlist__add_dummy(evlist))
+                               return -ENOMEM;
+                       pos = evlist__last(evlist);
+                       perf_evlist__set_tracking_event(evlist, pos);
+               }
 
-               /* Disable tracking of mmaps on lead event. */
-               pos = evlist__first(evlist);
-               pos->tracking = 0;
-               /* Set up dummy event. */
-               pos = evlist__last(evlist);
-               pos->tracking = 1;
                /*
                 * Enable the dummy event when the process is forked for
                 * initial_delay, immediately for system wide.
                 */
-               if (opts->initial_delay)
+               if (opts->initial_delay && !pos->immediate)
                        pos->core.attr.enable_on_exec = 1;
                else
                        pos->immediate = 1;
index 181d65e..4474577 100644 (file)
@@ -462,7 +462,7 @@ static int perf_evsel__check_attr(struct evsel *evsel, struct perf_session *sess
                return -EINVAL;
 
        if (PRINT_FIELD(IREGS) &&
-           evsel__check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS", PERF_OUTPUT_IREGS))
+           evsel__do_check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS", PERF_OUTPUT_IREGS, allow_user_set))
                return -EINVAL;
 
        if (PRINT_FIELD(UREGS) &&
index 7bd73a9..d187e46 100644 (file)
@@ -1055,7 +1055,7 @@ def cbr(id, raw_buf):
        cbr = data[0]
        MHz = (data[4] + 500) / 1000
        percent = ((cbr * 1000 / data[2]) + 5) / 10
-       value = struct.pack("!hiqiiiiii", 4, 8, id, 4, cbr, 4, MHz, 4, percent)
+       value = struct.pack("!hiqiiiiii", 4, 8, id, 4, cbr, 4, int(MHz), 4, int(percent))
        cbr_file.write(value)
 
 def mwait(id, raw_buf):
index 26d7be7..7daa8bb 100755 (executable)
@@ -768,7 +768,8 @@ class CallGraphModel(CallGraphModelBase):
                                                " FROM calls"
                                                " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
                                                " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
-                                               " WHERE symbols.name" + match +
+                                               " WHERE calls.id <> 0"
+                                               " AND symbols.name" + match +
                                                " GROUP BY comm_id, thread_id, call_path_id"
                                                " ORDER BY comm_id, thread_id, call_path_id")
 
@@ -963,7 +964,8 @@ class CallTreeModel(CallGraphModelBase):
                                                " FROM calls"
                                                " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
                                                " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
-                                               " WHERE symbols.name" + match +
+                                               " WHERE calls.id <> 0"
+                                               " AND symbols.name" + match +
                                                " ORDER BY comm_id, thread_id, call_time, calls.id")
 
        def FindPath(self, query):
@@ -1050,6 +1052,7 @@ class TreeWindowBase(QMdiSubWindow):
                                child = self.model.index(row, 0, parent)
                                if child.internalPointer().dbid == dbid:
                                        found = True
+                                       self.view.setExpanded(parent, True)
                                        self.view.setCurrentIndex(child)
                                        parent = child
                                        break
@@ -1127,6 +1130,7 @@ class CallTreeWindow(TreeWindowBase):
                                child = self.model.index(row, 0, parent)
                                if child.internalPointer().dbid == dbid:
                                        found = True
+                                       self.view.setExpanded(parent, True)
                                        self.view.setCurrentIndex(child)
                                        parent = child
                                        break
@@ -1139,6 +1143,7 @@ class CallTreeWindow(TreeWindowBase):
                                return
                        last_child = None
                        for row in xrange(n):
+                               self.view.setExpanded(parent, True)
                                child = self.model.index(row, 0, parent)
                                child_call_time = child.internalPointer().call_time
                                if child_call_time < time:
@@ -1151,9 +1156,11 @@ class CallTreeWindow(TreeWindowBase):
                        if not last_child:
                                if not found:
                                        child = self.model.index(0, 0, parent)
+                                       self.view.setExpanded(parent, True)
                                        self.view.setCurrentIndex(child)
                                return
                        found = True
+                       self.view.setExpanded(parent, True)
                        self.view.setCurrentIndex(last_child)
                        parent = last_child
 
index 61f3be9..6578001 100755 (executable)
@@ -17,6 +17,7 @@
 from __future__ import print_function
 import sys
 import os
+import io
 import argparse
 import json
 
@@ -81,7 +82,7 @@ class FlameGraphCLI:
 
         if self.args.format == "html":
             try:
-                with open(self.args.template) as f:
+                with io.open(self.args.template, encoding="utf-8") as f:
                     output_str = f.read().replace("/** @flamegraph_json **/",
                                                   json_str)
             except IOError as e:
@@ -93,11 +94,12 @@ class FlameGraphCLI:
             output_fn = self.args.output or "stacks.json"
 
         if output_fn == "-":
-            sys.stdout.write(output_str)
+            with io.open(sys.stdout.fileno(), "w", encoding="utf-8", closefd=False) as out:
+                out.write(output_str)
         else:
             print("dumping data to {}".format(output_fn))
             try:
-                with open(output_fn, "w") as out:
+                with io.open(output_fn, "w", encoding="utf-8") as out:
                     out.write(output_str)
             except IOError as e:
                 print("Error writing output file: {}".format(e), file=sys.stderr)
index f98a118..be9c4c0 100644 (file)
@@ -2288,6 +2288,11 @@ static struct thread *hist_browser__selected_thread(struct hist_browser *browser
        return browser->he_selection->thread;
 }
 
+static struct res_sample *hist_browser__selected_res_sample(struct hist_browser *browser)
+{
+       return browser->he_selection ? browser->he_selection->res_samples : NULL;
+}
+
 /* Check whether the browser is for 'top' or 'report' */
 static inline bool is_report_browser(void *timer)
 {
@@ -3357,16 +3362,16 @@ skip_annotation:
                                             &options[nr_options], NULL, NULL, evsel);
                nr_options += add_res_sample_opt(browser, &actions[nr_options],
                                                 &options[nr_options],
-                                hist_browser__selected_entry(browser)->res_samples,
-                                evsel, A_NORMAL);
+                                                hist_browser__selected_res_sample(browser),
+                                                evsel, A_NORMAL);
                nr_options += add_res_sample_opt(browser, &actions[nr_options],
                                                 &options[nr_options],
-                                hist_browser__selected_entry(browser)->res_samples,
-                                evsel, A_ASM);
+                                                hist_browser__selected_res_sample(browser),
+                                                evsel, A_ASM);
                nr_options += add_res_sample_opt(browser, &actions[nr_options],
                                                 &options[nr_options],
-                                hist_browser__selected_entry(browser)->res_samples,
-                                evsel, A_SOURCE);
+                                                hist_browser__selected_res_sample(browser),
+                                                evsel, A_SOURCE);
                nr_options += add_switch_opt(browser, &actions[nr_options],
                                             &options[nr_options]);
 skip_scripting:
@@ -3598,6 +3603,23 @@ static int __perf_evlist__tui_browse_hists(struct evlist *evlist,
                                    hbt, warn_lost_event);
 }
 
+static bool perf_evlist__single_entry(struct evlist *evlist)
+{
+       int nr_entries = evlist->core.nr_entries;
+
+       if (nr_entries == 1)
+              return true;
+
+       if (nr_entries == 2) {
+               struct evsel *last = evlist__last(evlist);
+
+               if (evsel__is_dummy_event(last))
+                       return true;
+       }
+
+       return false;
+}
+
 int perf_evlist__tui_browse_hists(struct evlist *evlist, const char *help,
                                  struct hist_browser_timer *hbt,
                                  float min_pcnt,
@@ -3608,7 +3630,7 @@ int perf_evlist__tui_browse_hists(struct evlist *evlist, const char *help,
        int nr_entries = evlist->core.nr_entries;
 
 single_entry:
-       if (nr_entries == 1) {
+       if (perf_evlist__single_entry(evlist)) {
                struct evsel *first = evlist__first(evlist);
 
                return perf_evsel__hists_browse(first, nr_entries, help,
index 173b4f0..ab48be4 100644 (file)
@@ -1566,6 +1566,18 @@ void perf_evlist__to_front(struct evlist *evlist,
        list_splice(&move, &evlist->core.entries);
 }
 
+struct evsel *perf_evlist__get_tracking_event(struct evlist *evlist)
+{
+       struct evsel *evsel;
+
+       evlist__for_each_entry(evlist, evsel) {
+               if (evsel->tracking)
+                       return evsel;
+       }
+
+       return evlist__first(evlist);
+}
+
 void perf_evlist__set_tracking_event(struct evlist *evlist,
                                     struct evsel *tracking_evsel)
 {
index b6f325d..a8081df 100644 (file)
@@ -335,6 +335,7 @@ void perf_evlist__to_front(struct evlist *evlist,
        evlist__cpu_iter_start(evlist);                 \
        perf_cpu_map__for_each_cpu (cpu, index, (evlist)->core.all_cpus)
 
+struct evsel *perf_evlist__get_tracking_event(struct evlist *evlist);
 void perf_evlist__set_tracking_event(struct evlist *evlist,
                                     struct evsel *tracking_evsel);
 
index 96e5171..ef802f6 100644 (file)
@@ -898,12 +898,6 @@ static void evsel__apply_config_terms(struct evsel *evsel,
        }
 }
 
-static bool is_dummy_event(struct evsel *evsel)
-{
-       return (evsel->core.attr.type == PERF_TYPE_SOFTWARE) &&
-              (evsel->core.attr.config == PERF_COUNT_SW_DUMMY);
-}
-
 struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evsel_term_type type)
 {
        struct evsel_config_term *term, *found_term = NULL;
@@ -1020,12 +1014,12 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
        if (callchain && callchain->enabled && !evsel->no_aux_samples)
                evsel__config_callchain(evsel, opts, callchain);
 
-       if (opts->sample_intr_regs) {
+       if (opts->sample_intr_regs && !evsel->no_aux_samples) {
                attr->sample_regs_intr = opts->sample_intr_regs;
                evsel__set_sample_bit(evsel, REGS_INTR);
        }
 
-       if (opts->sample_user_regs) {
+       if (opts->sample_user_regs && !evsel->no_aux_samples) {
                attr->sample_regs_user |= opts->sample_user_regs;
                evsel__set_sample_bit(evsel, REGS_USER);
        }
@@ -1161,7 +1155,7 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
         * The software event will trigger -EOPNOTSUPP error out,
         * if BRANCH_STACK bit is set.
         */
-       if (is_dummy_event(evsel))
+       if (evsel__is_dummy_event(evsel))
                evsel__reset_sample_bit(evsel, BRANCH_STACK);
 }
 
index 0f963c2..35e3f6d 100644 (file)
@@ -399,6 +399,12 @@ static inline bool evsel__has_br_stack(const struct evsel *evsel)
               evsel->synth_sample_type & PERF_SAMPLE_BRANCH_STACK;
 }
 
+static inline bool evsel__is_dummy_event(struct evsel *evsel)
+{
+       return (evsel->core.attr.type == PERF_TYPE_SOFTWARE) &&
+              (evsel->core.attr.config == PERF_COUNT_SW_DUMMY);
+}
+
 struct perf_env *evsel__env(struct evsel *evsel);
 
 int evsel__store_ids(struct evsel *evsel, struct evlist *evlist);
index df59fd4..e0af36b 100644 (file)
 #include <stdbool.h>
 #include <stddef.h>
 #include <limits.h>
-#ifndef __WORDSIZE
-#define __WORDSIZE (__SIZEOF_LONG__ * 8)
-#endif
 
 static inline size_t hash_bits(size_t h, int bits)
 {
        /* shuffle bits and return requested number of upper bits */
-       return (h * 11400714819323198485llu) >> (__WORDSIZE - bits);
+#if (__SIZEOF_SIZE_T__ == __SIZEOF_LONG_LONG__)
+       /* LP64 case */
+       return (h * 11400714819323198485llu) >> (__SIZEOF_LONG_LONG__ * 8 - bits);
+#elif (__SIZEOF_SIZE_T__ <= __SIZEOF_LONG__)
+       return (h * 2654435769lu) >> (__SIZEOF_LONG__ * 8 - bits);
+#else
+#      error "Unsupported size_t size"
+#endif
 }
 
 typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx);
index e4dd8bf..cb3c1e5 100644 (file)
@@ -1735,6 +1735,7 @@ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
        u64 sample_type = evsel->core.attr.sample_type;
        u64 id = evsel->core.id[0];
        u8 cpumode;
+       u64 regs[8 * sizeof(sample.intr_regs.mask)];
 
        if (intel_pt_skip_event(pt))
                return 0;
@@ -1784,8 +1785,8 @@ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
        }
 
        if (sample_type & PERF_SAMPLE_REGS_INTR &&
-           items->mask[INTEL_PT_GP_REGS_POS]) {
-               u64 regs[sizeof(sample.intr_regs.mask)];
+           (items->mask[INTEL_PT_GP_REGS_POS] ||
+            items->mask[INTEL_PT_XMM_POS])) {
                u64 regs_mask = evsel->core.attr.sample_regs_intr;
                u64 *pos;
 
index eec23fa..83844f8 100644 (file)
@@ -47,7 +47,7 @@ static int transfer_size;
 static int iterations;
 static int interval = 5; /* interval in seconds for showing transfer rate */
 
-uint8_t default_tx[] = {
+static uint8_t default_tx[] = {
        0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
        0x40, 0x00, 0x00, 0x00, 0x00, 0x95,
        0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
@@ -56,8 +56,8 @@ uint8_t default_tx[] = {
        0xF0, 0x0D,
 };
 
-uint8_t default_rx[ARRAY_SIZE(default_tx)] = {0, };
-char *input_tx;
+static uint8_t default_rx[ARRAY_SIZE(default_tx)] = {0, };
+static char *input_tx;
 
 static void hex_dump(const void *src, size_t length, size_t line_size,
                     char *prefix)
@@ -461,8 +461,8 @@ int main(int argc, char *argv[])
                pabort("can't get max speed hz");
 
        printf("spi mode: 0x%x\n", mode);
-       printf("bits per word: %d\n", bits);
-       printf("max speed: %d Hz (%d KHz)\n", speed, speed/1000);
+       printf("bits per word: %u\n", bits);
+       printf("max speed: %u Hz (%u kHz)\n", speed, speed/1000);
 
        if (input_tx)
                transfer_escaped_string(fd, input_tx);
index 787b6d4..f9b769f 100755 (executable)
@@ -82,7 +82,9 @@ def build_tests(linux: kunit_kernel.LinuxSourceTree,
                                        request.make_options)
        build_end = time.time()
        if not success:
-               return KunitResult(KunitStatus.BUILD_FAILURE, 'could not build kernel')
+               return KunitResult(KunitStatus.BUILD_FAILURE,
+                                  'could not build kernel',
+                                  build_end - build_start)
        if not success:
                return KunitResult(KunitStatus.BUILD_FAILURE,
                                   'could not build kernel',
index e75063d..02ffc3a 100644 (file)
@@ -10,7 +10,7 @@ import collections
 import re
 
 CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_(\w+) is not set$'
-CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+)$'
+CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+|".*")$'
 
 KconfigEntryBase = collections.namedtuple('KconfigEntry', ['name', 'value'])
 
index 64aac9d..f13e0c0 100644 (file)
@@ -265,11 +265,9 @@ def bubble_up_suite_errors(test_suite_list: List[TestSuite]) -> TestStatus:
        return bubble_up_errors(lambda x: x.status, test_suite_list)
 
 def parse_test_result(lines: List[str]) -> TestResult:
-       if not lines:
-               return TestResult(TestStatus.NO_TESTS, [], lines)
        consume_non_diagnositic(lines)
-       if not parse_tap_header(lines):
-               return None
+       if not lines or not parse_tap_header(lines):
+               return TestResult(TestStatus.NO_TESTS, [], lines)
        test_suites = []
        test_suite = parse_test_suite(lines)
        while test_suite:
@@ -282,6 +280,8 @@ def parse_run_tests(kernel_output) -> TestResult:
        failed_tests = 0
        crashed_tests = 0
        test_result = parse_test_result(list(isolate_kunit_output(kernel_output)))
+       if test_result.status == TestStatus.NO_TESTS:
+               print_with_timestamp(red('[ERROR] ') + 'no kunit output detected')
        for test_suite in test_result.suites:
                if test_suite.status == TestStatus.SUCCESS:
                        print_suite_divider(green('[PASSED] ') + test_suite.name)
index 5bb7b11..f9eeaea 100755 (executable)
@@ -170,6 +170,17 @@ class KUnitParserTest(unittest.TestCase):
                        result.status)
                file.close()
 
+       def test_no_kunit_output(self):
+               crash_log = get_absolute_path(
+                       'test_data/test_insufficient_memory.log')
+               file = open(crash_log)
+               print_mock = mock.patch('builtins.print').start()
+               result = kunit_parser.parse_run_tests(
+                       kunit_parser.isolate_kunit_output(file.readlines()))
+               print_mock.assert_any_call(StrContains("no kunit output detected"))
+               print_mock.stop()
+               file.close()
+
        def test_crashed_test(self):
                crashed_log = get_absolute_path(
                        'test_data/test_is_test_passed-crash.log')
diff --git a/tools/testing/kunit/test_data/test_insufficient_memory.log b/tools/testing/kunit/test_data/test_insufficient_memory.log
new file mode 100644 (file)
index 0000000..e69de29
index b497cfe..ac4ad00 100644 (file)
@@ -21,10 +21,6 @@ include ../../lib.mk
 $(TEST_GEN_PROGS): $(PROGS)
        cp $(PROGS) $(OUTPUT)/
 
-clean:
-       $(CLEAN)
-       rm -f $(PROGS)
-
 # Common test-unit targets to build common-layout test-cases executables
 # Needs secondary expansion to properly include the testcase c-file in pre-reqs
 .SECONDEXPANSION:
index 83493bd..109d034 100644 (file)
@@ -36,7 +36,7 @@ void test_fentry_fexit(void)
        fentry_res = (__u64 *)fentry_skel->bss;
        fexit_res = (__u64 *)fexit_skel->bss;
        printf("%lld\n", fentry_skel->bss->test1_result);
-       for (i = 0; i < 6; i++) {
+       for (i = 0; i < 8; i++) {
                CHECK(fentry_res[i] != 1, "result",
                      "fentry_test%d failed err %lld\n", i + 1, fentry_res[i]);
                CHECK(fexit_res[i] != 1, "result",
index ea14e3e..f11f187 100644 (file)
@@ -527,8 +527,8 @@ static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd)
 
        run_tests_skb_less(tap_fd, skel->maps.last_dissection);
 
-       err = bpf_prog_detach(prog_fd, BPF_FLOW_DISSECTOR);
-       CHECK(err, "bpf_prog_detach", "err %d errno %d\n", err, errno);
+       err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
+       CHECK(err, "bpf_prog_detach2", "err %d errno %d\n", err, errno);
 }
 
 static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd)
index 15cb554..172c586 100644 (file)
@@ -1,9 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Test that the flow_dissector program can be updated with a single
- * syscall by attaching a new program that replaces the existing one.
- *
- * Corner case - the same program cannot be attached twice.
+ * Tests for attaching, detaching, and replacing flow_dissector BPF program.
  */
 
 #define _GNU_SOURCE
@@ -116,7 +113,7 @@ static void test_prog_attach_prog_attach(int netns, int prog1, int prog2)
        CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog2));
 
 out_detach:
-       err = bpf_prog_detach(0, BPF_FLOW_DISSECTOR);
+       err = bpf_prog_detach2(prog2, 0, BPF_FLOW_DISSECTOR);
        if (CHECK_FAIL(err))
                perror("bpf_prog_detach");
        CHECK_FAIL(prog_is_attached(netns));
@@ -152,7 +149,7 @@ static void test_prog_attach_link_create(int netns, int prog1, int prog2)
        DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts);
        int err, link;
 
-       err = bpf_prog_attach(prog1, -1, BPF_FLOW_DISSECTOR, 0);
+       err = bpf_prog_attach(prog1, 0, BPF_FLOW_DISSECTOR, 0);
        if (CHECK_FAIL(err)) {
                perror("bpf_prog_attach(prog1)");
                return;
@@ -168,7 +165,7 @@ static void test_prog_attach_link_create(int netns, int prog1, int prog2)
                close(link);
        CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
 
-       err = bpf_prog_detach(-1, BPF_FLOW_DISSECTOR);
+       err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR);
        if (CHECK_FAIL(err))
                perror("bpf_prog_detach");
        CHECK_FAIL(prog_is_attached(netns));
@@ -188,7 +185,7 @@ static void test_link_create_prog_attach(int netns, int prog1, int prog2)
 
        /* Expect failure attaching prog when link exists */
        errno = 0;
-       err = bpf_prog_attach(prog2, -1, BPF_FLOW_DISSECTOR, 0);
+       err = bpf_prog_attach(prog2, 0, BPF_FLOW_DISSECTOR, 0);
        if (CHECK_FAIL(!err || errno != EEXIST))
                perror("bpf_prog_attach(prog2) expected EEXIST");
        CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
@@ -211,7 +208,7 @@ static void test_link_create_prog_detach(int netns, int prog1, int prog2)
 
        /* Expect failure detaching prog when link exists */
        errno = 0;
-       err = bpf_prog_detach(-1, BPF_FLOW_DISSECTOR);
+       err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR);
        if (CHECK_FAIL(!err || errno != EINVAL))
                perror("bpf_prog_detach expected EINVAL");
        CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
@@ -231,7 +228,7 @@ static void test_prog_attach_detach_query(int netns, int prog1, int prog2)
        }
        CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
 
-       err = bpf_prog_detach(0, BPF_FLOW_DISSECTOR);
+       err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR);
        if (CHECK_FAIL(err)) {
                perror("bpf_prog_detach");
                return;
@@ -308,6 +305,31 @@ static void test_link_update_replace_old_prog(int netns, int prog1, int prog2)
        CHECK_FAIL(prog_is_attached(netns));
 }
 
+static void test_link_update_same_prog(int netns, int prog1, int prog2)
+{
+       DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts);
+       DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts);
+       int err, link;
+
+       link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts);
+       if (CHECK_FAIL(link < 0)) {
+               perror("bpf_link_create(prog1)");
+               return;
+       }
+       CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
+
+       /* Expect success updating the prog with the same one */
+       update_opts.flags = 0;
+       update_opts.old_prog_fd = 0;
+       err = bpf_link_update(link, prog1, &update_opts);
+       if (CHECK_FAIL(err))
+               perror("bpf_link_update");
+       CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
+
+       close(link);
+       CHECK_FAIL(prog_is_attached(netns));
+}
+
 static void test_link_update_invalid_opts(int netns, int prog1, int prog2)
 {
        DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts);
@@ -571,6 +593,8 @@ static void run_tests(int netns)
                  test_link_update_no_old_prog },
                { "link update with replace old prog",
                  test_link_update_replace_old_prog },
+               { "link update with same prog",
+                 test_link_update_same_prog },
                { "link update invalid opts",
                  test_link_update_invalid_opts },
                { "link update invalid prog",
index 2061a6b..5f54c6a 100644 (file)
@@ -13,6 +13,7 @@ static int getsetsockopt(void)
                char cc[16]; /* TCP_CA_NAME_MAX */
        } buf = {};
        socklen_t optlen;
+       char *big_buf = NULL;
 
        fd = socket(AF_INET, SOCK_STREAM, 0);
        if (fd < 0) {
@@ -22,24 +23,31 @@ static int getsetsockopt(void)
 
        /* IP_TOS - BPF bypass */
 
-       buf.u8[0] = 0x08;
-       err = setsockopt(fd, SOL_IP, IP_TOS, &buf, 1);
+       optlen = getpagesize() * 2;
+       big_buf = calloc(1, optlen);
+       if (!big_buf) {
+               log_err("Couldn't allocate two pages");
+               goto err;
+       }
+
+       *(int *)big_buf = 0x08;
+       err = setsockopt(fd, SOL_IP, IP_TOS, big_buf, optlen);
        if (err) {
                log_err("Failed to call setsockopt(IP_TOS)");
                goto err;
        }
 
-       buf.u8[0] = 0x00;
+       memset(big_buf, 0, optlen);
        optlen = 1;
-       err = getsockopt(fd, SOL_IP, IP_TOS, &buf, &optlen);
+       err = getsockopt(fd, SOL_IP, IP_TOS, big_buf, &optlen);
        if (err) {
                log_err("Failed to call getsockopt(IP_TOS)");
                goto err;
        }
 
-       if (buf.u8[0] != 0x08) {
-               log_err("Unexpected getsockopt(IP_TOS) buf[0] 0x%02x != 0x08",
-                       buf.u8[0]);
+       if (*(int *)big_buf != 0x08) {
+               log_err("Unexpected getsockopt(IP_TOS) optval 0x%x != 0x08",
+                       *(int *)big_buf);
                goto err;
        }
 
@@ -78,6 +86,28 @@ static int getsetsockopt(void)
                goto err;
        }
 
+       /* IP_FREEBIND - BPF can't access optval past PAGE_SIZE */
+
+       optlen = getpagesize() * 2;
+       memset(big_buf, 0, optlen);
+
+       err = setsockopt(fd, SOL_IP, IP_FREEBIND, big_buf, optlen);
+       if (err != 0) {
+               log_err("Failed to call setsockopt, ret=%d", err);
+               goto err;
+       }
+
+       err = getsockopt(fd, SOL_IP, IP_FREEBIND, big_buf, &optlen);
+       if (err != 0) {
+               log_err("Failed to call getsockopt, ret=%d", err);
+               goto err;
+       }
+
+       if (optlen != 1 || *(__u8 *)big_buf != 0x55) {
+               log_err("Unexpected IP_FREEBIND getsockopt, optlen=%d, optval=0x%x",
+                       optlen, *(__u8 *)big_buf);
+       }
+
        /* SO_SNDBUF is overwritten */
 
        buf.u32 = 0x01010101;
@@ -124,9 +154,11 @@ static int getsetsockopt(void)
                goto err;
        }
 
+       free(big_buf);
        close(fd);
        return 0;
 err:
+       free(big_buf);
        close(fd);
        return -1;
 }
index 7897c8f..ef57408 100644 (file)
@@ -480,10 +480,9 @@ static __always_inline void hystart_update(struct sock *sk, __u32 delay)
 
        if (hystart_detect & HYSTART_DELAY) {
                /* obtain the minimum delay of more than sampling packets */
+               if (ca->curr_rtt > delay)
+                       ca->curr_rtt = delay;
                if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
-                       if (ca->curr_rtt > delay)
-                               ca->curr_rtt = delay;
-
                        ca->sample_cnt++;
                } else {
                        if (ca->curr_rtt > ca->delay_min +
index e7b8753..75ecf95 100644 (file)
@@ -25,7 +25,7 @@ struct bpf_iter__netlink {
        struct netlink_sock *sk;
 } __attribute__((preserve_access_index));
 
-static inline struct inode *SOCK_INODE(struct socket *socket)
+static __attribute__((noinline)) struct inode *SOCK_INODE(struct socket *socket)
 {
        return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
 }
index 9365b68..5f645fd 100644 (file)
@@ -55,3 +55,25 @@ int BPF_PROG(test6, __u64 a, void *b, short c, int d, void * e, __u64 f)
                e == (void *)20 && f == 21;
        return 0;
 }
+
+struct bpf_fentry_test_t {
+       struct bpf_fentry_test_t *a;
+};
+
+__u64 test7_result = 0;
+SEC("fentry/bpf_fentry_test7")
+int BPF_PROG(test7, struct bpf_fentry_test_t *arg)
+{
+       if (arg == 0)
+               test7_result = 1;
+       return 0;
+}
+
+__u64 test8_result = 0;
+SEC("fentry/bpf_fentry_test8")
+int BPF_PROG(test8, struct bpf_fentry_test_t *arg)
+{
+       if (arg->a == 0)
+               test8_result = 1;
+       return 0;
+}
index bd1e17d..0952aff 100644 (file)
@@ -56,3 +56,25 @@ int BPF_PROG(test6, __u64 a, void *b, short c, int d, void *e, __u64 f, int ret)
                e == (void *)20 && f == 21 && ret == 111;
        return 0;
 }
+
+struct bpf_fentry_test_t {
+       struct bpf_fentry_test *a;
+};
+
+__u64 test7_result = 0;
+SEC("fexit/bpf_fentry_test7")
+int BPF_PROG(test7, struct bpf_fentry_test_t *arg)
+{
+       if (arg == 0)
+               test7_result = 1;
+       return 0;
+}
+
+__u64 test8_result = 0;
+SEC("fexit/bpf_fentry_test8")
+int BPF_PROG(test8, struct bpf_fentry_test_t *arg)
+{
+       if (arg->a == 0)
+               test8_result = 1;
+       return 0;
+}
index d5a5eeb..712df7b 100644 (file)
@@ -8,6 +8,10 @@
 char _license[] SEC("license") = "GPL";
 __u32 _version SEC("version") = 1;
 
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
 #define SOL_CUSTOM                     0xdeadbeef
 
 struct sockopt_sk {
@@ -28,12 +32,14 @@ int _getsockopt(struct bpf_sockopt *ctx)
        __u8 *optval = ctx->optval;
        struct sockopt_sk *storage;
 
-       if (ctx->level == SOL_IP && ctx->optname == IP_TOS)
+       if (ctx->level == SOL_IP && ctx->optname == IP_TOS) {
                /* Not interested in SOL_IP:IP_TOS;
                 * let next BPF program in the cgroup chain or kernel
                 * handle it.
                 */
+               ctx->optlen = 0; /* bypass optval>PAGE_SIZE */
                return 1;
+       }
 
        if (ctx->level == SOL_SOCKET && ctx->optname == SO_SNDBUF) {
                /* Not interested in SOL_SOCKET:SO_SNDBUF;
@@ -51,6 +57,26 @@ int _getsockopt(struct bpf_sockopt *ctx)
                return 1;
        }
 
+       if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) {
+               if (optval + 1 > optval_end)
+                       return 0; /* EPERM, bounds check */
+
+               ctx->retval = 0; /* Reset system call return value to zero */
+
+               /* Always export 0x55 */
+               optval[0] = 0x55;
+               ctx->optlen = 1;
+
+               /* Userspace buffer is PAGE_SIZE * 2, but BPF
+                * program can only see the first PAGE_SIZE
+                * bytes of data.
+                */
+               if (optval_end - optval != PAGE_SIZE)
+                       return 0; /* EPERM, unexpected data size */
+
+               return 1;
+       }
+
        if (ctx->level != SOL_CUSTOM)
                return 0; /* EPERM, deny everything except custom level */
 
@@ -81,12 +107,14 @@ int _setsockopt(struct bpf_sockopt *ctx)
        __u8 *optval = ctx->optval;
        struct sockopt_sk *storage;
 
-       if (ctx->level == SOL_IP && ctx->optname == IP_TOS)
+       if (ctx->level == SOL_IP && ctx->optname == IP_TOS) {
                /* Not interested in SOL_IP:IP_TOS;
                 * let next BPF program in the cgroup chain or kernel
                 * handle it.
                 */
+               ctx->optlen = 0; /* bypass optval>PAGE_SIZE */
                return 1;
+       }
 
        if (ctx->level == SOL_SOCKET && ctx->optname == SO_SNDBUF) {
                /* Overwrite SO_SNDBUF value */
@@ -112,6 +140,28 @@ int _setsockopt(struct bpf_sockopt *ctx)
                return 1;
        }
 
+       if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) {
+               /* Original optlen is larger than PAGE_SIZE. */
+               if (ctx->optlen != PAGE_SIZE * 2)
+                       return 0; /* EPERM, unexpected data size */
+
+               if (optval + 1 > optval_end)
+                       return 0; /* EPERM, bounds check */
+
+               /* Make sure we can trim the buffer. */
+               optval[0] = 0;
+               ctx->optlen = 1;
+
+               /* Usepace buffer is PAGE_SIZE * 2, but BPF
+                * program can only see the first PAGE_SIZE
+                * bytes of data.
+                */
+               if (optval_end - optval != PAGE_SIZE)
+                       return 0; /* EPERM, unexpected data size */
+
+               return 1;
+       }
+
        if (ctx->level != SOL_CUSTOM)
                return 0; /* EPERM, deny everything except custom level */
 
index 057036c..3dca4c2 100644 (file)
@@ -79,7 +79,7 @@ struct {
 
 struct {
        __uint(type, BPF_MAP_TYPE_ARRAY);
-       __uint(max_entries, 2);
+       __uint(max_entries, 3);
        __type(key, int);
        __type(value, int);
 } sock_skb_opts SEC(".maps");
@@ -94,6 +94,12 @@ struct {
 SEC("sk_skb1")
 int bpf_prog1(struct __sk_buff *skb)
 {
+       int *f, two = 2;
+
+       f = bpf_map_lookup_elem(&sock_skb_opts, &two);
+       if (f && *f) {
+               return *f;
+       }
        return skb->len;
 }
 
index 3308112..0ac0864 100644 (file)
@@ -27,7 +27,7 @@ int xdp_dummy_prog(struct xdp_md *ctx)
 /* valid program on DEVMAP entry via SEC name;
  * has access to egress and ingress ifindex
  */
-SEC("xdp_devmap")
+SEC("xdp_devmap/map_prog")
 int xdp_dummy_dm(struct xdp_md *ctx)
 {
        char fmt[] = "devmap redirect: dev %u -> dev %u len %u\n";
index 6a12a0e..754cf61 100644 (file)
@@ -789,19 +789,19 @@ static void test_sockmap(unsigned int tasks, void *data)
        }
 
        err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_PARSER);
-       if (err) {
+       if (!err) {
                printf("Failed empty parser prog detach\n");
                goto out_sockmap;
        }
 
        err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_VERDICT);
-       if (err) {
+       if (!err) {
                printf("Failed empty verdict prog detach\n");
                goto out_sockmap;
        }
 
        err = bpf_prog_detach(fd, BPF_SK_MSG_VERDICT);
-       if (err) {
+       if (!err) {
                printf("Failed empty msg verdict prog detach\n");
                goto out_sockmap;
        }
@@ -1090,19 +1090,19 @@ static void test_sockmap(unsigned int tasks, void *data)
                assert(status == 0);
        }
 
-       err = bpf_prog_detach(map_fd_rx, __MAX_BPF_ATTACH_TYPE);
+       err = bpf_prog_detach2(parse_prog, map_fd_rx, __MAX_BPF_ATTACH_TYPE);
        if (!err) {
                printf("Detached an invalid prog type.\n");
                goto out_sockmap;
        }
 
-       err = bpf_prog_detach(map_fd_rx, BPF_SK_SKB_STREAM_PARSER);
+       err = bpf_prog_detach2(parse_prog, map_fd_rx, BPF_SK_SKB_STREAM_PARSER);
        if (err) {
                printf("Failed parser prog detach\n");
                goto out_sockmap;
        }
 
-       err = bpf_prog_detach(map_fd_rx, BPF_SK_SKB_STREAM_VERDICT);
+       err = bpf_prog_detach2(verdict_prog, map_fd_rx, BPF_SK_SKB_STREAM_VERDICT);
        if (err) {
                printf("Failed parser prog detach\n");
                goto out_sockmap;
index 37695fc..78789b2 100644 (file)
@@ -85,6 +85,7 @@ int txmsg_ktls_skb_drop;
 int txmsg_ktls_skb_redir;
 int ktls;
 int peek_flag;
+int skb_use_parser;
 
 static const struct option long_options[] = {
        {"help",        no_argument,            NULL, 'h' },
@@ -174,6 +175,7 @@ static void test_reset(void)
        txmsg_apply = txmsg_cork = 0;
        txmsg_ingress = txmsg_redir_skb = 0;
        txmsg_ktls_skb = txmsg_ktls_skb_drop = txmsg_ktls_skb_redir = 0;
+       skb_use_parser = 0;
 }
 
 static int test_start_subtest(const struct _test *t, struct sockmap_options *o)
@@ -1211,6 +1213,11 @@ run:
                }
        }
 
+       if (skb_use_parser) {
+               i = 2;
+               err = bpf_map_update_elem(map_fd[7], &i, &skb_use_parser, BPF_ANY);
+       }
+
        if (txmsg_drop)
                options->drop_expected = true;
 
@@ -1650,6 +1657,16 @@ static void test_txmsg_cork(int cgrp, struct sockmap_options *opt)
        test_send(opt, cgrp);
 }
 
+static void test_txmsg_ingress_parser(int cgrp, struct sockmap_options *opt)
+{
+       txmsg_pass = 1;
+       skb_use_parser = 512;
+       opt->iov_length = 256;
+       opt->iov_count = 1;
+       opt->rate = 2;
+       test_exec(cgrp, opt);
+}
+
 char *map_names[] = {
        "sock_map",
        "sock_map_txmsg",
@@ -1748,6 +1765,7 @@ struct _test test[] = {
        {"txmsg test pull-data", test_txmsg_pull},
        {"txmsg test pop-data", test_txmsg_pop},
        {"txmsg test push/pop data", test_txmsg_push_pop},
+       {"txmsg text ingress parser", test_txmsg_ingress_parser},
 };
 
 static int check_whitelist(struct _test *t, struct sockmap_options *opt)
index 3702dbc..c82aa77 100755 (executable)
@@ -63,6 +63,8 @@ ALL_TESTS="$ALL_TESTS 0008:150:1"
 ALL_TESTS="$ALL_TESTS 0009:150:1"
 ALL_TESTS="$ALL_TESTS 0010:1:1"
 ALL_TESTS="$ALL_TESTS 0011:1:1"
+ALL_TESTS="$ALL_TESTS 0012:1:1"
+ALL_TESTS="$ALL_TESTS 0013:1:1"
 
 # Kselftest framework requirement - SKIP code is 4.
 ksft_skip=4
@@ -470,6 +472,38 @@ kmod_test_0011()
        echo "$MODPROBE" > /proc/sys/kernel/modprobe
 }
 
+kmod_check_visibility()
+{
+       local name="$1"
+       local cmd="$2"
+
+       modprobe $DEFAULT_KMOD_DRIVER
+
+       local priv=$(eval $cmd)
+       local unpriv=$(capsh --drop=CAP_SYSLOG -- -c "$cmd")
+
+       if [ "$priv" = "$unpriv" ] || \
+          [ "${priv:0:3}" = "0x0" ] || \
+          [ "${unpriv:0:3}" != "0x0" ] ; then
+               echo "${FUNCNAME[0]}: FAIL, $name visible to unpriv: '$priv' vs '$unpriv'" >&2
+               exit 1
+       else
+               echo "${FUNCNAME[0]}: OK!"
+       fi
+}
+
+kmod_test_0012()
+{
+       kmod_check_visibility /proc/modules \
+               "grep '^${DEFAULT_KMOD_DRIVER}\b' /proc/modules | awk '{print \$NF}'"
+}
+
+kmod_test_0013()
+{
+       kmod_check_visibility '/sys/module/*/sections/*' \
+               "cat /sys/module/${DEFAULT_KMOD_DRIVER}/sections/.*text | head -n1"
+}
+
 list_tests()
 {
        echo "Test ID list:"
@@ -489,6 +523,8 @@ list_tests()
        echo "0009 x $(get_test_count 0009) - multithreaded - push kmod_concurrent over max_modprobes for get_fs_type()"
        echo "0010 x $(get_test_count 0010) - test nonexistent modprobe path"
        echo "0011 x $(get_test_count 0011) - test completely disabling module autoloading"
+       echo "0012 x $(get_test_count 0012) - test /proc/modules address visibility under CAP_SYSLOG"
+       echo "0013 x $(get_test_count 0013) - test /sys/module/*/sections/* visibility under CAP_SYSLOG"
 }
 
 usage()
index 0ac49d9..862eee7 100644 (file)
@@ -36,7 +36,7 @@ struct ksft_count {
 static struct ksft_count ksft_cnt;
 static unsigned int ksft_plan;
 
-static inline int ksft_test_num(void)
+static inline unsigned int ksft_test_num(void)
 {
        return ksft_cnt.ksft_pass + ksft_cnt.ksft_fail +
                ksft_cnt.ksft_xfail + ksft_cnt.ksft_xpass +
index dee567f..22dc2f3 100755 (executable)
@@ -747,6 +747,19 @@ ipv6_fcnal_runtime()
        run_cmd "$IP nexthop add id 86 via 2001:db8:91::2 dev veth1"
        run_cmd "$IP ro add 2001:db8:101::1/128 nhid 81"
 
+       # rpfilter and default route
+       $IP nexthop flush >/dev/null 2>&1
+       run_cmd "ip netns exec me ip6tables -t mangle -I PREROUTING 1 -m rpfilter --invert -j DROP"
+       run_cmd "$IP nexthop add id 91 via 2001:db8:91::2 dev veth1"
+       run_cmd "$IP nexthop add id 92 via 2001:db8:92::2 dev veth3"
+       run_cmd "$IP nexthop add id 93 group 91/92"
+       run_cmd "$IP -6 ro add default nhid 91"
+       run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1"
+       log_test $? 0 "Nexthop with default route and rpfilter"
+       run_cmd "$IP -6 ro replace default nhid 93"
+       run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1"
+       log_test $? 0 "Nexthop with multipath default route and rpfilter"
+
        # TO-DO:
        # existing route with old nexthop; append route with new nexthop
        # existing route with old nexthop; replace route with new
index 383bac0..ceaad78 100644 (file)
@@ -15,8 +15,9 @@
 #include <inttypes.h>
 #include <linux/net_tstamp.h>
 #include <linux/errqueue.h>
+#include <linux/if_ether.h>
 #include <linux/ipv6.h>
-#include <linux/tcp.h>
+#include <linux/udp.h>
 #include <stdbool.h>
 #include <stdlib.h>
 #include <stdio.h>
@@ -140,8 +141,8 @@ static void do_recv_errqueue_timeout(int fdt)
 {
        char control[CMSG_SPACE(sizeof(struct sock_extended_err)) +
                     CMSG_SPACE(sizeof(struct sockaddr_in6))] = {0};
-       char data[sizeof(struct ipv6hdr) +
-                 sizeof(struct tcphdr) + 1];
+       char data[sizeof(struct ethhdr) + sizeof(struct ipv6hdr) +
+                 sizeof(struct udphdr) + 1];
        struct sock_extended_err *err;
        struct msghdr msg = {0};
        struct iovec iov = {0};
@@ -159,6 +160,8 @@ static void do_recv_errqueue_timeout(int fdt)
        msg.msg_controllen = sizeof(control);
 
        while (1) {
+               const char *reason;
+
                ret = recvmsg(fdt, &msg, MSG_ERRQUEUE);
                if (ret == -1 && errno == EAGAIN)
                        break;
@@ -176,14 +179,30 @@ static void do_recv_errqueue_timeout(int fdt)
                err = (struct sock_extended_err *)CMSG_DATA(cm);
                if (err->ee_origin != SO_EE_ORIGIN_TXTIME)
                        error(1, 0, "errqueue: origin 0x%x\n", err->ee_origin);
-               if (err->ee_code != ECANCELED)
-                       error(1, 0, "errqueue: code 0x%x\n", err->ee_code);
+
+               switch (err->ee_errno) {
+               case ECANCELED:
+                       if (err->ee_code != SO_EE_CODE_TXTIME_MISSED)
+                               error(1, 0, "errqueue: unknown ECANCELED %u\n",
+                                     err->ee_code);
+                       reason = "missed txtime";
+               break;
+               case EINVAL:
+                       if (err->ee_code != SO_EE_CODE_TXTIME_INVALID_PARAM)
+                               error(1, 0, "errqueue: unknown EINVAL %u\n",
+                                     err->ee_code);
+                       reason = "invalid txtime";
+               break;
+               default:
+                       error(1, 0, "errqueue: errno %u code %u\n",
+                             err->ee_errno, err->ee_code);
+               };
 
                tstamp = ((int64_t) err->ee_data) << 32 | err->ee_info;
                tstamp -= (int64_t) glob_tstart;
                tstamp /= 1000 * 1000;
-               fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped\n",
-                               data[ret - 1], tstamp);
+               fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped: %s\n",
+                       data[ret - 1], tstamp, reason);
 
                msg.msg_flags = 0;
                msg.msg_controllen = sizeof(control);
index 9c0f758..a179f0d 100644 (file)
@@ -3,7 +3,7 @@
 
 TEST_PROGS := nft_trans_stress.sh nft_nat.sh bridge_brouter.sh \
        conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \
-       nft_concat_range.sh \
+       nft_concat_range.sh nft_conntrack_helper.sh \
        nft_queue.sh
 
 LDLIBS = -lmnl
diff --git a/tools/testing/selftests/netfilter/nft_conntrack_helper.sh b/tools/testing/selftests/netfilter/nft_conntrack_helper.sh
new file mode 100755 (executable)
index 0000000..edf0a48
--- /dev/null
@@ -0,0 +1,175 @@
+#!/bin/bash
+#
+# This tests connection tracking helper assignment:
+# 1. can attach ftp helper to a connection from nft ruleset.
+# 2. auto-assign still works.
+#
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+sfx=$(mktemp -u "XXXXXXXX")
+ns1="ns1-$sfx"
+ns2="ns2-$sfx"
+testipv6=1
+
+cleanup()
+{
+       ip netns del ${ns1}
+       ip netns del ${ns2}
+}
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without nft tool"
+       exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without ip tool"
+       exit $ksft_skip
+fi
+
+conntrack -V > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without conntrack tool"
+       exit $ksft_skip
+fi
+
+which nc >/dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without netcat tool"
+       exit $ksft_skip
+fi
+
+trap cleanup EXIT
+
+ip netns add ${ns1}
+ip netns add ${ns2}
+
+ip link add veth0 netns ${ns1} type veth peer name veth0 netns ${ns2} > /dev/null 2>&1
+if [ $? -ne 0 ];then
+    echo "SKIP: No virtual ethernet pair device support in kernel"
+    exit $ksft_skip
+fi
+
+ip -net ${ns1} link set lo up
+ip -net ${ns1} link set veth0 up
+
+ip -net ${ns2} link set lo up
+ip -net ${ns2} link set veth0 up
+
+ip -net ${ns1} addr add 10.0.1.1/24 dev veth0
+ip -net ${ns1} addr add dead:1::1/64 dev veth0
+
+ip -net ${ns2} addr add 10.0.1.2/24 dev veth0
+ip -net ${ns2} addr add dead:1::2/64 dev veth0
+
+load_ruleset_family() {
+       local family=$1
+       local ns=$2
+
+ip netns exec ${ns} nft -f - <<EOF
+table $family raw {
+       ct helper ftp {
+             type "ftp" protocol tcp
+        }
+       chain pre {
+               type filter hook prerouting priority 0; policy accept;
+               tcp dport 2121 ct helper set "ftp"
+       }
+       chain output {
+               type filter hook output priority 0; policy accept;
+               tcp dport 2121 ct helper set "ftp"
+       }
+}
+EOF
+       return $?
+}
+
+check_for_helper()
+{
+       local netns=$1
+       local message=$2
+       local port=$3
+
+       ip netns exec ${netns} conntrack -L -p tcp --dport $port 2> /dev/null |grep -q 'helper=ftp'
+       if [ $? -ne 0 ] ; then
+               echo "FAIL: ${netns} did not show attached helper $message" 1>&2
+               ret=1
+       fi
+
+       echo "PASS: ${netns} connection on port $port has ftp helper attached" 1>&2
+       return 0
+}
+
+test_helper()
+{
+       local port=$1
+       local msg=$2
+
+       sleep 3 | ip netns exec ${ns2} nc -w 2 -l -p $port > /dev/null &
+
+       sleep 1
+       sleep 1 | ip netns exec ${ns1} nc -w 2 10.0.1.2 $port > /dev/null &
+
+       check_for_helper "$ns1" "ip $msg" $port
+       check_for_helper "$ns2" "ip $msg" $port
+
+       wait
+
+       if [ $testipv6 -eq 0 ] ;then
+               return 0
+       fi
+
+       ip netns exec ${ns1} conntrack -F 2> /dev/null
+       ip netns exec ${ns2} conntrack -F 2> /dev/null
+
+       sleep 3 | ip netns exec ${ns2} nc -w 2 -6 -l -p $port > /dev/null &
+
+       sleep 1
+       sleep 1 | ip netns exec ${ns1} nc -w 2 -6 dead:1::2 $port > /dev/null &
+
+       check_for_helper "$ns1" "ipv6 $msg" $port
+       check_for_helper "$ns2" "ipv6 $msg" $port
+
+       wait
+}
+
+load_ruleset_family ip ${ns1}
+if [ $? -ne 0 ];then
+       echo "FAIL: ${ns1} cannot load ip ruleset" 1>&2
+       exit 1
+fi
+
+load_ruleset_family ip6 ${ns1}
+if [ $? -ne 0 ];then
+       echo "SKIP: ${ns1} cannot load ip6 ruleset" 1>&2
+       testipv6=0
+fi
+
+load_ruleset_family inet ${ns2}
+if [ $? -ne 0 ];then
+       echo "SKIP: ${ns1} cannot load inet ruleset" 1>&2
+       load_ruleset_family ip ${ns2}
+       if [ $? -ne 0 ];then
+               echo "FAIL: ${ns2} cannot load ip ruleset" 1>&2
+               exit 1
+       fi
+
+       if [ $testipv6 -eq 1 ] ;then
+               load_ruleset_family ip6 ${ns2}
+               if [ $? -ne 0 ];then
+                       echo "FAIL: ${ns2} cannot load ip6 ruleset" 1>&2
+                       exit 1
+               fi
+       fi
+fi
+
+test_helper 2121 "set via ruleset"
+ip netns exec ${ns1} sysctl -q 'net.netfilter.nf_conntrack_helper=1'
+ip netns exec ${ns2} sysctl -q 'net.netfilter.nf_conntrack_helper=1'
+test_helper 21 "auto-assign"
+
+exit $ret
index c1921a5..8d728ed 100644 (file)
@@ -95,4 +95,9 @@ static inline int sys_pidfd_getfd(int pidfd, int fd, int flags)
        return syscall(__NR_pidfd_getfd, pidfd, fd, flags);
 }
 
+static inline int sys_memfd_create(const char *name, unsigned int flags)
+{
+       return syscall(__NR_memfd_create, name, flags);
+}
+
 #endif /* __PIDFD_H */
index 401a7c1..84b65ec 100644 (file)
@@ -34,11 +34,6 @@ static int sys_kcmp(pid_t pid1, pid_t pid2, int type, unsigned long idx1,
        return syscall(__NR_kcmp, pid1, pid2, type, idx1, idx2);
 }
 
-static int sys_memfd_create(const char *name, unsigned int flags)
-{
-       return syscall(__NR_memfd_create, name, flags);
-}
-
 static int __child(int sk, int memfd)
 {
        int ret;
index 133ec5b..9418108 100644 (file)
@@ -470,4 +470,16 @@ TEST_F(current_nsset, no_foul_play)
        }
 }
 
+TEST(setns_einval)
+{
+       int fd;
+
+       fd = sys_memfd_create("rostock", 0);
+       EXPECT_GT(fd, 0);
+
+       ASSERT_NE(setns(fd, 0), 0);
+       EXPECT_EQ(errno, EINVAL);
+       close(fd);
+}
+
 TEST_HARNESS_MAIN
index 6ee0fde..7c23d3d 100644 (file)
@@ -698,13 +698,13 @@ restart_nx:
 
        switch (cc) {
 
-       case ERR_NX_TRANSLATION:
+       case ERR_NX_AT_FAULT:
 
                /* We touched the pages ahead of time.  In the most common case
                 * we shouldn't be here.  But may be some pages were paged out.
                 * Kernel should have placed the faulting address to fsaddr.
                 */
-               NXPRT(fprintf(stderr, "ERR_NX_TRANSLATION %p\n",
+               NXPRT(fprintf(stderr, "ERR_NX_AT_FAULT %p\n",
                              (void *)cmdp->crb.csb.fsaddr));
 
                if (pgfault_retries == NX_MAX_FAULTS) {
index 7496a83..02dffb6 100644 (file)
@@ -306,13 +306,13 @@ int compress_file(int argc, char **argv, void *handle)
                        lzcounts, cmdp, handle);
 
                if (cc != ERR_NX_OK && cc != ERR_NX_TPBC_GT_SPBC &&
-                   cc != ERR_NX_TRANSLATION) {
+                   cc != ERR_NX_AT_FAULT) {
                        fprintf(stderr, "nx error: cc= %d\n", cc);
                        exit(-1);
                }
 
                /* Page faults are handled by the user code */
-               if (cc == ERR_NX_TRANSLATION) {
+               if (cc == ERR_NX_AT_FAULT) {
                        NXPRT(fprintf(stderr, "page fault: cc= %d, ", cc));
                        NXPRT(fprintf(stderr, "try= %d, fsa= %08llx\n",
                                  fault_tries,
index ca35dd8..af3df79 100644 (file)
@@ -7,7 +7,7 @@ noarg:
 # The EBB handler is 64-bit code and everything links against it
 CFLAGS += -m64
 
-TMPOUT = $(OUTPUT)/
+TMPOUT = $(OUTPUT)/TMPDIR/
 # Toolchains may build PIE by default which breaks the assembly
 no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
         $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
index 47a3082..503982b 100644 (file)
                 255
             ]
         ],
-        "cmdUnderTest": "$TC action add action bpf bytecode '4,40 0 0 12,21 0 1 2054,6 0 0 262144,6 0 0 0' index 4294967296 cookie 12345",
+        "cmdUnderTest": "$TC action add action bpf bytecode '4,40 0 0 12,21 0 1 2054,6 0 0 262144,6 0 0 0' index 4294967296 cookie 123456",
         "expExitCode": "255",
         "verifyCmd": "$TC action ls action bpf",
-        "matchPattern": "action order [0-9]*: bpf bytecode '4,40 0 0 12,21 0 1 2048,6 0 0 262144,6 0 0 0' default-action pipe.*cookie 12345",
+        "matchPattern": "action order [0-9]*: bpf bytecode '4,40 0 0 12,21 0 1 2048,6 0 0 262144,6 0 0 0' default-action pipe.*cookie 123456",
         "matchCount": "0",
         "teardown": [
             "$TC action flush action bpf"
index 88ec134..072febf 100644 (file)
                 255
             ]
         ],
-        "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum tcp continue index \\$i cookie aaabbbcccdddeee \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"",
+        "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum tcp continue index \\$i cookie 123456789abcde \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"",
         "expExitCode": "0",
         "verifyCmd": "$TC actions ls action csum",
         "matchPattern": "^[ \t]+index [0-9]* ref",
                 1,
                 255
             ],
-            "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum tcp continue index \\$i cookie aaabbbcccdddeee \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\""
+            "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum tcp continue index \\$i cookie 123456789abcde \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\""
         ],
         "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum index \\$i \\\"; args=\"\\$args\\$cmd\"; done && $TC actions del \\$args\"",
         "expExitCode": "0",
index fbeb919..d063469 100644 (file)
         "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022 index 1",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action tunnel_key index 1",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022.*index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:00880022.*index 1",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action tunnel_key"
         "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344 index 1",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action tunnel_key index 1",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344.*index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344.*index 1",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action tunnel_key"
         "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 824212:80:00880022 index 1",
         "expExitCode": "255",
         "verifyCmd": "$TC actions get action tunnel_key index 1",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 824212:80:00880022.*index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 824212:80:00880022.*index 1",
         "matchCount": "0",
         "teardown": [
             "$TC actions flush action tunnel_key"
         "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:4224:00880022 index 1",
         "expExitCode": "255",
         "verifyCmd": "$TC actions get action tunnel_key index 1",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:4224:00880022.*index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:4224:00880022.*index 1",
         "matchCount": "0",
         "teardown": [
             "$TC actions flush action tunnel_key"
         "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:4288 index 1",
         "expExitCode": "255",
         "verifyCmd": "$TC actions get action tunnel_key index 1",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:4288.*index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:4288.*index 1",
         "matchCount": "0",
         "teardown": [
             "$TC actions flush action tunnel_key"
         "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:4288428822 index 1",
         "expExitCode": "255",
         "verifyCmd": "$TC actions get action tunnel_key index 1",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:4288428822.*index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:4288428822.*index 1",
         "matchCount": "0",
         "teardown": [
             "$TC actions flush action tunnel_key"
         "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022,0408:42: index 1",
         "expExitCode": "255",
         "verifyCmd": "$TC actions get action tunnel_key index 1",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022,0408:42:.*index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:00880022,0408:42:.*index 1",
         "matchCount": "0",
         "teardown": [
             "$TC actions flush action tunnel_key"
                 1,
                 255
             ],
-            "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 index 1 cookie aabbccddeeff112233445566778800a"
+            "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 index 1 cookie 123456"
         ],
-        "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 11.11.11.1 dst_ip 21.21.21.2 dst_port 3129 id 11 csum reclassify index 1 cookie a1b1c1d1",
+        "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 11.11.11.1 dst_ip 21.21.21.2 dst_port 3129 id 11 csum reclassify index 1 cookie 123456",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action tunnel_key index 1",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 11.11.11.1.*dst_ip 21.21.21.2.*key_id 11.*dst_port 3129.*csum reclassify.*index 1.*cookie a1b1c1d1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 11.11.11.1.*dst_ip 21.21.21.2.*key_id 11.*dst_port 3129.*csum reclassify.*index 1.*cookie 123456",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action tunnel_key"
index 6630627..3e5ff29 100755 (executable)
@@ -1,15 +1,10 @@
-#!/bin/bash
+#!/bin/sh
 # SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
 
 # Kselftest framework requirement - SKIP code is 4.
 ksft_skip=4
 
-[ -f /dev/tpm0 ] || exit $ksft_skip
+[ -e /dev/tpm0 ] || exit $ksft_skip
 
-python -m unittest -v tpm2_tests.SmokeTest
-python -m unittest -v tpm2_tests.AsyncTest
-
-CLEAR_CMD=$(which tpm2_clear)
-if [ -n $CLEAR_CMD ]; then
-       tpm2_clear -T device
-fi
+python3 -m unittest -v tpm2_tests.SmokeTest
+python3 -m unittest -v tpm2_tests.AsyncTest
index 36c9d03..04c47b1 100755 (executable)
@@ -1,9 +1,9 @@
-#!/bin/bash
+#!/bin/sh
 # SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
 
 # Kselftest framework requirement - SKIP code is 4.
 ksft_skip=4
 
-[ -f /dev/tpmrm0 ] || exit $ksft_skip
+[ -e /dev/tpmrm0 ] || exit $ksft_skip
 
-python -m unittest -v tpm2_tests.SpaceTest
+python3 -m unittest -v tpm2_tests.SpaceTest
index d0fcb66..f34486c 100644 (file)
@@ -247,14 +247,14 @@ class ProtocolError(Exception):
 class AuthCommand(object):
     """TPMS_AUTH_COMMAND"""
 
-    def __init__(self, session_handle=TPM2_RS_PW, nonce='', session_attributes=0,
-                 hmac=''):
+    def __init__(self, session_handle=TPM2_RS_PW, nonce=bytes(),
+                 session_attributes=0, hmac=bytes()):
         self.session_handle = session_handle
         self.nonce = nonce
         self.session_attributes = session_attributes
         self.hmac = hmac
 
-    def __str__(self):
+    def __bytes__(self):
         fmt = '>I H%us B H%us' % (len(self.nonce), len(self.hmac))
         return struct.pack(fmt, self.session_handle, len(self.nonce),
                            self.nonce, self.session_attributes, len(self.hmac),
@@ -268,11 +268,11 @@ class AuthCommand(object):
 class SensitiveCreate(object):
     """TPMS_SENSITIVE_CREATE"""
 
-    def __init__(self, user_auth='', data=''):
+    def __init__(self, user_auth=bytes(), data=bytes()):
         self.user_auth = user_auth
         self.data = data
 
-    def __str__(self):
+    def __bytes__(self):
         fmt = '>H%us H%us' % (len(self.user_auth), len(self.data))
         return struct.pack(fmt, len(self.user_auth), self.user_auth,
                            len(self.data), self.data)
@@ -296,8 +296,9 @@ class Public(object):
         return '>HHIH%us%usH%us' % \
             (len(self.auth_policy), len(self.parameters), len(self.unique))
 
-    def __init__(self, object_type, name_alg, object_attributes, auth_policy='',
-                 parameters='', unique=''):
+    def __init__(self, object_type, name_alg, object_attributes,
+                 auth_policy=bytes(), parameters=bytes(),
+                 unique=bytes()):
         self.object_type = object_type
         self.name_alg = name_alg
         self.object_attributes = object_attributes
@@ -305,7 +306,7 @@ class Public(object):
         self.parameters = parameters
         self.unique = unique
 
-    def __str__(self):
+    def __bytes__(self):
         return struct.pack(self.__fmt(),
                            self.object_type,
                            self.name_alg,
@@ -343,7 +344,7 @@ def get_algorithm(name):
 
 def hex_dump(d):
     d = [format(ord(x), '02x') for x in d]
-    d = [d[i: i + 16] for i in xrange(0, len(d), 16)]
+    d = [d[i: i + 16] for i in range(0, len(d), 16)]
     d = [' '.join(x) for x in d]
     d = os.linesep.join(d)
 
@@ -401,7 +402,7 @@ class Client:
         pcrsel_len = max((i >> 3) + 1, 3)
         pcrsel = [0] * pcrsel_len
         pcrsel[i >> 3] = 1 << (i & 7)
-        pcrsel = ''.join(map(chr, pcrsel))
+        pcrsel = ''.join(map(chr, pcrsel)).encode()
 
         fmt = '>HII IHB%us' % (pcrsel_len)
         cmd = struct.pack(fmt,
@@ -443,7 +444,7 @@ class Client:
             TPM2_CC_PCR_EXTEND,
             i,
             len(auth_cmd),
-            str(auth_cmd),
+            bytes(auth_cmd),
             1, bank_alg, dig)
 
         self.send_cmd(cmd)
@@ -457,7 +458,7 @@ class Client:
                           TPM2_RH_NULL,
                           TPM2_RH_NULL,
                           16,
-                          '\0' * 16,
+                          ('\0' * 16).encode(),
                           0,
                           session_type,
                           TPM2_ALG_NULL,
@@ -472,7 +473,7 @@ class Client:
 
         for i in pcrs:
             pcr = self.read_pcr(i, bank_alg)
-            if pcr == None:
+            if pcr is None:
                 return None
             x += pcr
 
@@ -489,7 +490,7 @@ class Client:
         pcrsel = [0] * pcrsel_len
         for i in pcrs:
             pcrsel[i >> 3] |= 1 << (i & 7)
-        pcrsel = ''.join(map(chr, pcrsel))
+        pcrsel = ''.join(map(chr, pcrsel)).encode()
 
         fmt = '>HII IH%usIHB3s' % ds
         cmd = struct.pack(fmt,
@@ -497,7 +498,8 @@ class Client:
                           struct.calcsize(fmt),
                           TPM2_CC_POLICY_PCR,
                           handle,
-                          len(dig), str(dig),
+                          len(dig),
+                          bytes(dig),
                           1,
                           bank_alg,
                           pcrsel_len, pcrsel)
@@ -534,7 +536,7 @@ class Client:
 
         self.send_cmd(cmd)
 
-    def create_root_key(self, auth_value = ''):
+    def create_root_key(self, auth_value = bytes()):
         attributes = \
             Public.FIXED_TPM | \
             Public.FIXED_PARENT | \
@@ -570,11 +572,11 @@ class Client:
             TPM2_CC_CREATE_PRIMARY,
             TPM2_RH_OWNER,
             len(auth_cmd),
-            str(auth_cmd),
+            bytes(auth_cmd),
             len(sensitive),
-            str(sensitive),
+            bytes(sensitive),
             len(public),
-            str(public),
+            bytes(public),
             0, 0)
 
         return struct.unpack('>I', self.send_cmd(cmd)[10:14])[0]
@@ -587,7 +589,7 @@ class Client:
         attributes = 0
         if not policy_dig:
             attributes |= Public.USER_WITH_AUTH
-            policy_dig = ''
+            policy_dig = bytes()
 
         auth_cmd =  AuthCommand()
         sensitive = SensitiveCreate(user_auth=auth_value, data=data)
@@ -608,11 +610,11 @@ class Client:
             TPM2_CC_CREATE,
             parent_key,
             len(auth_cmd),
-            str(auth_cmd),
+            bytes(auth_cmd),
             len(sensitive),
-            str(sensitive),
+            bytes(sensitive),
             len(public),
-            str(public),
+            bytes(public),
             0, 0)
 
         rsp = self.send_cmd(cmd)
@@ -635,7 +637,7 @@ class Client:
             TPM2_CC_LOAD,
             parent_key,
             len(auth_cmd),
-            str(auth_cmd),
+            bytes(auth_cmd),
             blob)
 
         data_handle = struct.unpack('>I', self.send_cmd(cmd)[10:14])[0]
@@ -653,7 +655,7 @@ class Client:
             TPM2_CC_UNSEAL,
             data_handle,
             len(auth_cmd),
-            str(auth_cmd))
+            bytes(auth_cmd))
 
         try:
             rsp = self.send_cmd(cmd)
@@ -675,7 +677,7 @@ class Client:
             TPM2_CC_DICTIONARY_ATTACK_LOCK_RESET,
             TPM2_RH_LOCKOUT,
             len(auth_cmd),
-            str(auth_cmd))
+            bytes(auth_cmd))
 
         self.send_cmd(cmd)
 
@@ -693,7 +695,7 @@ class Client:
         more_data, cap, cnt = struct.unpack('>BII', rsp[:9])
         rsp = rsp[9:]
 
-        for i in xrange(0, cnt):
+        for i in range(0, cnt):
             handle = struct.unpack('>I', rsp[:4])[0]
             handles.append(handle)
             rsp = rsp[4:]
index 728be7c..9d76430 100644 (file)
@@ -20,8 +20,8 @@ class SmokeTest(unittest.TestCase):
         self.client.close()
 
     def test_seal_with_auth(self):
-        data = 'X' * 64
-        auth = 'A' * 15
+        data = ('X' * 64).encode()
+        auth = ('A' * 15).encode()
 
         blob = self.client.seal(self.root_key, data, auth, None)
         result = self.client.unseal(self.root_key, blob, auth, None)
@@ -30,8 +30,8 @@ class SmokeTest(unittest.TestCase):
     def test_seal_with_policy(self):
         handle = self.client.start_auth_session(tpm2.TPM2_SE_TRIAL)
 
-        data = 'X' * 64
-        auth = 'A' * 15
+        data = ('X' * 64).encode()
+        auth = ('A' * 15).encode()
         pcrs = [16]
 
         try:
@@ -58,14 +58,15 @@ class SmokeTest(unittest.TestCase):
         self.assertEqual(data, result)
 
     def test_unseal_with_wrong_auth(self):
-        data = 'X' * 64
-        auth = 'A' * 20
+        data = ('X' * 64).encode()
+        auth = ('A' * 20).encode()
         rc = 0
 
         blob = self.client.seal(self.root_key, data, auth, None)
         try:
-            result = self.client.unseal(self.root_key, blob, auth[:-1] + 'B', None)
-        except ProtocolError, e:
+            result = self.client.unseal(self.root_key, blob,
+                        auth[:-1] + 'B'.encode(), None)
+        except ProtocolError as e:
             rc = e.rc
 
         self.assertEqual(rc, tpm2.TPM2_RC_AUTH_FAIL)
@@ -73,8 +74,8 @@ class SmokeTest(unittest.TestCase):
     def test_unseal_with_wrong_policy(self):
         handle = self.client.start_auth_session(tpm2.TPM2_SE_TRIAL)
 
-        data = 'X' * 64
-        auth = 'A' * 17
+        data = ('X' * 64).encode()
+        auth = ('A' * 17).encode()
         pcrs = [16]
 
         try:
@@ -91,7 +92,7 @@ class SmokeTest(unittest.TestCase):
         # This should succeed.
 
         ds = tpm2.get_digest_size(tpm2.TPM2_ALG_SHA1)
-        self.client.extend_pcr(1, 'X' * ds)
+        self.client.extend_pcr(1, ('X' * ds).encode())
 
         handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY)
 
@@ -108,7 +109,7 @@ class SmokeTest(unittest.TestCase):
 
         # Then, extend a PCR that is part of the policy and try to unseal.
         # This should fail.
-        self.client.extend_pcr(16, 'X' * ds)
+        self.client.extend_pcr(16, ('X' * ds).encode())
 
         handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY)
 
@@ -119,7 +120,7 @@ class SmokeTest(unittest.TestCase):
             self.client.policy_password(handle)
 
             result = self.client.unseal(self.root_key, blob, auth, handle)
-        except ProtocolError, e:
+        except ProtocolError as e:
             rc = e.rc
             self.client.flush_context(handle)
         except:
@@ -130,13 +131,13 @@ class SmokeTest(unittest.TestCase):
 
     def test_seal_with_too_long_auth(self):
         ds = tpm2.get_digest_size(tpm2.TPM2_ALG_SHA1)
-        data = 'X' * 64
-        auth = 'A' * (ds + 1)
+        data = ('X' * 64).encode()
+        auth = ('A' * (ds + 1)).encode()
 
         rc = 0
         try:
             blob = self.client.seal(self.root_key, data, auth, None)
-        except ProtocolError, e:
+        except ProtocolError as e:
             rc = e.rc
 
         self.assertEqual(rc, tpm2.TPM2_RC_SIZE)
@@ -152,7 +153,7 @@ class SmokeTest(unittest.TestCase):
                               0xDEADBEEF)
 
             self.client.send_cmd(cmd)
-        except IOError, e:
+        except IOError as e:
             rejected = True
         except:
             pass
@@ -212,7 +213,7 @@ class SmokeTest(unittest.TestCase):
             self.client.tpm.write(cmd)
             rsp = self.client.tpm.read()
 
-        except IOError, e:
+        except IOError as e:
             # read the response
             rsp = self.client.tpm.read()
             rejected = True
@@ -283,7 +284,7 @@ class SpaceTest(unittest.TestCase):
         rc = 0
         try:
             space1.send_cmd(cmd)
-        except ProtocolError, e:
+        except ProtocolError as e:
             rc = e.rc
 
         self.assertEqual(rc, tpm2.TPM2_RC_COMMAND_CODE |
index 17a1f53..d77f482 100755 (executable)
@@ -587,9 +587,20 @@ ip0 link set wg0 up
 kill $ncat_pid
 ip0 link del wg0
 
+# Ensure there aren't circular reference loops
+ip1 link add wg1 type wireguard
+ip2 link add wg2 type wireguard
+ip1 link set wg1 netns $netns2
+ip2 link set wg2 netns $netns1
+pp ip netns delete $netns1
+pp ip netns delete $netns2
+pp ip netns add $netns1
+pp ip netns add $netns2
+
+sleep 2 # Wait for cleanup and grace periods
 declare -A objects
 while read -t 0.1 -r line 2>/dev/null || [[ $? -ne 142 ]]; do
-       [[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ [0-9]+)\ .*(created|destroyed).* ]] || continue
+       [[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ ?[0-9]*)\ .*(created|destroyed).* ]] || continue
        objects["${BASH_REMATCH[1]}"]+="${BASH_REMATCH[2]}"
 done < /dev/kmsg
 alldeleted=1
index 5f16821..d2796ea 100644 (file)
@@ -70,10 +70,10 @@ all_64: $(BINARIES_64)
 
 EXTRA_CLEAN := $(BINARIES_32) $(BINARIES_64)
 
-$(BINARIES_32): $(OUTPUT)/%_32: %.c
+$(BINARIES_32): $(OUTPUT)/%_32: %.c helpers.h
        $(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl -lm
 
-$(BINARIES_64): $(OUTPUT)/%_64: %.c
+$(BINARIES_64): $(OUTPUT)/%_64: %.c helpers.h
        $(CC) -m64 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl
 
 # x86_64 users should be encouraged to install 32-bit libraries
diff --git a/tools/testing/selftests/x86/helpers.h b/tools/testing/selftests/x86/helpers.h
new file mode 100644 (file)
index 0000000..f5ff2a2
--- /dev/null
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __SELFTESTS_X86_HELPERS_H
+#define __SELFTESTS_X86_HELPERS_H
+
+#include <asm/processor-flags.h>
+
+static inline unsigned long get_eflags(void)
+{
+       unsigned long eflags;
+
+       asm volatile (
+#ifdef __x86_64__
+               "subq $128, %%rsp\n\t"
+               "pushfq\n\t"
+               "popq %0\n\t"
+               "addq $128, %%rsp"
+#else
+               "pushfl\n\t"
+               "popl %0"
+#endif
+               : "=r" (eflags) :: "memory");
+
+       return eflags;
+}
+
+static inline void set_eflags(unsigned long eflags)
+{
+       asm volatile (
+#ifdef __x86_64__
+               "subq $128, %%rsp\n\t"
+               "pushq %0\n\t"
+               "popfq\n\t"
+               "addq $128, %%rsp"
+#else
+               "pushl %0\n\t"
+               "popfl"
+#endif
+               :: "r" (eflags) : "flags", "memory");
+}
+
+#endif /* __SELFTESTS_X86_HELPERS_H */
index 1063328..120ac74 100644 (file)
@@ -31,6 +31,8 @@
 #include <sys/ptrace.h>
 #include <sys/user.h>
 
+#include "helpers.h"
+
 static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
                       int flags)
 {
@@ -67,21 +69,6 @@ static unsigned char altstack_data[SIGSTKSZ];
 # define INT80_CLOBBERS
 #endif
 
-static unsigned long get_eflags(void)
-{
-       unsigned long eflags;
-       asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags));
-       return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
-       asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH
-                     : : "rm" (eflags) : "flags");
-}
-
-#define X86_EFLAGS_TF (1UL << 8)
-
 static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
 {
        ucontext_t *ctx = (ucontext_t*)ctx_void;
index bc0ecc2..5b7abeb 100644 (file)
 #include <setjmp.h>
 #include <errno.h>
 
-#ifdef __x86_64__
-# define WIDTH "q"
-#else
-# define WIDTH "l"
-#endif
+#include "helpers.h"
 
 /* Our sigaltstack scratch space. */
 static unsigned char altstack_data[SIGSTKSZ];
 
-static unsigned long get_eflags(void)
-{
-       unsigned long eflags;
-       asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags));
-       return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
-       asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH
-                     : : "rm" (eflags) : "flags");
-}
-
-#define X86_EFLAGS_TF (1UL << 8)
-
 static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
                       int flags)
 {
index 02309a1..a108b80 100644 (file)
 #include <signal.h>
 #include <err.h>
 #include <sys/syscall.h>
-#include <asm/processor-flags.h>
 
-#ifdef __x86_64__
-# define WIDTH "q"
-#else
-# define WIDTH "l"
-#endif
+#include "helpers.h"
 
 static unsigned int nerrs;
 
-static unsigned long get_eflags(void)
-{
-       unsigned long eflags;
-       asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags));
-       return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
-       asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH
-                     : : "rm" (eflags) : "flags");
-}
-
 static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
                       int flags)
 {
@@ -59,6 +41,7 @@ static void do_it(unsigned long extraflags)
        set_eflags(get_eflags() | extraflags);
        syscall(SYS_getpid);
        flags = get_eflags();
+       set_eflags(X86_EFLAGS_IF | X86_EFLAGS_FIXED);
        if ((flags & extraflags) == extraflags) {
                printf("[OK]\tThe syscall worked and flags are still set\n");
        } else {
@@ -73,6 +56,12 @@ int main(void)
        printf("[RUN]\tSet NT and issue a syscall\n");
        do_it(X86_EFLAGS_NT);
 
+       printf("[RUN]\tSet AC and issue a syscall\n");
+       do_it(X86_EFLAGS_AC);
+
+       printf("[RUN]\tSet NT|AC and issue a syscall\n");
+       do_it(X86_EFLAGS_NT | X86_EFLAGS_AC);
+
        /*
         * Now try it again with TF set -- TF forces returns via IRET in all
         * cases except non-ptregs-using 64-bit full fast path syscalls.
@@ -80,8 +69,28 @@ int main(void)
 
        sethandler(SIGTRAP, sigtrap, 0);
 
+       printf("[RUN]\tSet TF and issue a syscall\n");
+       do_it(X86_EFLAGS_TF);
+
        printf("[RUN]\tSet NT|TF and issue a syscall\n");
        do_it(X86_EFLAGS_NT | X86_EFLAGS_TF);
 
+       printf("[RUN]\tSet AC|TF and issue a syscall\n");
+       do_it(X86_EFLAGS_AC | X86_EFLAGS_TF);
+
+       printf("[RUN]\tSet NT|AC|TF and issue a syscall\n");
+       do_it(X86_EFLAGS_NT | X86_EFLAGS_AC | X86_EFLAGS_TF);
+
+       /*
+        * Now try DF.  This is evil and it's plausible that we will crash
+        * glibc, but glibc would have to do something rather surprising
+        * for this to happen.
+        */
+       printf("[RUN]\tSet DF and issue a syscall\n");
+       do_it(X86_EFLAGS_DF);
+
+       printf("[RUN]\tSet TF|DF and issue a syscall\n");
+       do_it(X86_EFLAGS_TF | X86_EFLAGS_DF);
+
        return nerrs == 0 ? 0 : 1;
 }
index a4f4d4c..c41f24b 100644 (file)
@@ -20,6 +20,8 @@
 #include <setjmp.h>
 #include <sys/uio.h>
 
+#include "helpers.h"
+
 #ifdef __x86_64__
 # define VSYS(x) (x)
 #else
@@ -493,21 +495,8 @@ static int test_process_vm_readv(void)
 }
 
 #ifdef __x86_64__
-#define X86_EFLAGS_TF (1UL << 8)
 static volatile sig_atomic_t num_vsyscall_traps;
 
-static unsigned long get_eflags(void)
-{
-       unsigned long eflags;
-       asm volatile ("pushfq\n\tpopq %0" : "=rm" (eflags));
-       return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
-       asm volatile ("pushq %0\n\tpopfq" : : "rm" (eflags) : "flags");
-}
-
 static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
 {
        ucontext_t *ctx = (ucontext_t *)ctx_void;
index 0075ccd..4c311e1 100644 (file)
@@ -11,6 +11,8 @@
 #include <features.h>
 #include <stdio.h>
 
+#include "helpers.h"
+
 #if defined(__GLIBC__) && __GLIBC__ == 2 && __GLIBC_MINOR__ < 16
 
 int main()
@@ -53,27 +55,6 @@ static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
                err(1, "sigaction");
 }
 
-#ifdef __x86_64__
-# define WIDTH "q"
-#else
-# define WIDTH "l"
-#endif
-
-static unsigned long get_eflags(void)
-{
-       unsigned long eflags;
-       asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags));
-       return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
-       asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH
-                     : : "rm" (eflags) : "flags");
-}
-
-#define X86_EFLAGS_TF (1UL << 8)
-
 static volatile sig_atomic_t nerrs;
 static unsigned long sysinfo;
 static bool got_sysinfo = false;
index 6683b4a..caab980 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <linux/compiler.h>
 #include <linux/types.h>
+#include <linux/list.h>
 #include <linux/printk.h>
 #include <linux/bug.h>
 #include <errno.h>
@@ -135,10 +136,4 @@ static inline void free_page(unsigned long addr)
        (void) (&_min1 == &_min2);              \
        _min1 < _min2 ? _min1 : _min2; })
 
-/* TODO: empty stubs for now. Broken but enough for virtio_ring.c */
-#define list_add_tail(a, b) do {} while (0)
-#define list_del(a) do {} while (0)
-#define list_for_each_entry(a, b, c) while (0)
-/* end of stubs */
-
 #endif /* KERNEL_H */
index b751350..5d90254 100644 (file)
@@ -11,12 +11,11 @@ struct device {
 struct virtio_device {
        struct device dev;
        u64 features;
+       struct list_head vqs;
 };
 
 struct virtqueue {
-       /* TODO: commented as list macros are empty stubs for now.
-        * Broken but enough for virtio_ring.c
-        * struct list_head list; */
+       struct list_head list;
        void (*callback)(struct virtqueue *vq);
        const char *name;
        struct virtio_device *vdev;
index b427def..cb3f29c 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 #define _GNU_SOURCE
 #include <getopt.h>
+#include <limits.h>
 #include <string.h>
 #include <poll.h>
 #include <sys/eventfd.h>
@@ -18,6 +19,8 @@
 #include <linux/virtio_ring.h>
 #include "../../drivers/vhost/test.h"
 
+#define RANDOM_BATCH -1
+
 /* Unused */
 void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end;
 
@@ -43,6 +46,10 @@ struct vdev_info {
        struct vhost_memory *mem;
 };
 
+static const struct vhost_vring_file no_backend = { .fd = -1 },
+                                    backend = { .fd = 1 };
+static const struct vhost_vring_state null_state = {};
+
 bool vq_notify(struct virtqueue *vq)
 {
        struct vq_info *info = vq->priv;
@@ -88,6 +95,19 @@ void vhost_vq_setup(struct vdev_info *dev, struct vq_info *info)
        assert(r >= 0);
 }
 
+static void vq_reset(struct vq_info *info, int num, struct virtio_device *vdev)
+{
+       if (info->vq)
+               vring_del_virtqueue(info->vq);
+
+       memset(info->ring, 0, vring_size(num, 4096));
+       vring_init(&info->vring, num, info->ring, 4096);
+       info->vq = __vring_new_virtqueue(info->idx, info->vring, vdev, true,
+                                        false, vq_notify, vq_callback, "test");
+       assert(info->vq);
+       info->vq->priv = info;
+}
+
 static void vq_info_add(struct vdev_info *dev, int num)
 {
        struct vq_info *info = &dev->vqs[dev->nvqs];
@@ -97,14 +117,7 @@ static void vq_info_add(struct vdev_info *dev, int num)
        info->call = eventfd(0, EFD_NONBLOCK);
        r = posix_memalign(&info->ring, 4096, vring_size(num, 4096));
        assert(r >= 0);
-       memset(info->ring, 0, vring_size(num, 4096));
-       vring_init(&info->vring, num, info->ring, 4096);
-       info->vq = vring_new_virtqueue(info->idx,
-                                      info->vring.num, 4096, &dev->vdev,
-                                      true, false, info->ring,
-                                      vq_notify, vq_callback, "test");
-       assert(info->vq);
-       info->vq->priv = info;
+       vq_reset(info, num, &dev->vdev);
        vhost_vq_setup(dev, info);
        dev->fds[info->idx].fd = info->call;
        dev->fds[info->idx].events = POLLIN;
@@ -116,6 +129,7 @@ static void vdev_info_init(struct vdev_info* dev, unsigned long long features)
        int r;
        memset(dev, 0, sizeof *dev);
        dev->vdev.features = features;
+       INIT_LIST_HEAD(&dev->vdev.vqs);
        dev->buf_size = 1024;
        dev->buf = malloc(dev->buf_size);
        assert(dev->buf);
@@ -152,41 +166,93 @@ static void wait_for_interrupt(struct vdev_info *dev)
 }
 
 static void run_test(struct vdev_info *dev, struct vq_info *vq,
-                    bool delayed, int bufs)
+                    bool delayed, int batch, int reset_n, int bufs)
 {
        struct scatterlist sl;
-       long started = 0, completed = 0;
-       long completed_before;
+       long started = 0, completed = 0, next_reset = reset_n;
+       long completed_before, started_before;
        int r, test = 1;
        unsigned len;
        long long spurious = 0;
+       const bool random_batch = batch == RANDOM_BATCH;
+
        r = ioctl(dev->control, VHOST_TEST_RUN, &test);
        assert(r >= 0);
+       if (!reset_n) {
+               next_reset = INT_MAX;
+       }
+
        for (;;) {
                virtqueue_disable_cb(vq->vq);
                completed_before = completed;
+               started_before = started;
                do {
-                       if (started < bufs) {
+                       const bool reset = completed > next_reset;
+                       if (random_batch)
+                               batch = (random() % vq->vring.num) + 1;
+
+                       while (started < bufs &&
+                              (started - completed) < batch) {
                                sg_init_one(&sl, dev->buf, dev->buf_size);
                                r = virtqueue_add_outbuf(vq->vq, &sl, 1,
                                                         dev->buf + started,
                                                         GFP_ATOMIC);
-                               if (likely(r == 0)) {
-                                       ++started;
-                                       if (unlikely(!virtqueue_kick(vq->vq)))
+                               if (unlikely(r != 0)) {
+                                       if (r == -ENOSPC &&
+                                           started > started_before)
+                                               r = 0;
+                                       else
                                                r = -1;
+                                       break;
                                }
-                       } else
+
+                               ++started;
+
+                               if (unlikely(!virtqueue_kick(vq->vq))) {
+                                       r = -1;
+                                       break;
+                               }
+                       }
+
+                       if (started >= bufs)
                                r = -1;
 
+                       if (reset) {
+                               r = ioctl(dev->control, VHOST_TEST_SET_BACKEND,
+                                         &no_backend);
+                               assert(!r);
+                       }
+
                        /* Flush out completed bufs if any */
-                       if (virtqueue_get_buf(vq->vq, &len)) {
+                       while (virtqueue_get_buf(vq->vq, &len)) {
                                ++completed;
                                r = 0;
                        }
 
+                       if (reset) {
+                               struct vhost_vring_state s = { .index = 0 };
+
+                               vq_reset(vq, vq->vring.num, &dev->vdev);
+
+                               r = ioctl(dev->control, VHOST_GET_VRING_BASE,
+                                         &s);
+                               assert(!r);
+
+                               s.num = 0;
+                               r = ioctl(dev->control, VHOST_SET_VRING_BASE,
+                                         &null_state);
+                               assert(!r);
+
+                               r = ioctl(dev->control, VHOST_TEST_SET_BACKEND,
+                                         &backend);
+                               assert(!r);
+
+                               started = completed;
+                               while (completed > next_reset)
+                                       next_reset += completed;
+                       }
                } while (r == 0);
-               if (completed == completed_before)
+               if (completed == completed_before && started == started_before)
                        ++spurious;
                assert(completed <= bufs);
                assert(started <= bufs);
@@ -203,7 +269,9 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
        test = 0;
        r = ioctl(dev->control, VHOST_TEST_RUN, &test);
        assert(r >= 0);
-       fprintf(stderr, "spurious wakeups: 0x%llx\n", spurious);
+       fprintf(stderr,
+               "spurious wakeups: 0x%llx started=0x%lx completed=0x%lx\n",
+               spurious, started, completed);
 }
 
 const char optstring[] = "h";
@@ -245,6 +313,16 @@ const struct option longopts[] = {
                .val = 'd',
        },
        {
+               .name = "batch",
+               .val = 'b',
+               .has_arg = required_argument,
+       },
+       {
+               .name = "reset",
+               .val = 'r',
+               .has_arg = optional_argument,
+       },
+       {
        }
 };
 
@@ -255,6 +333,8 @@ static void help(void)
                " [--no-event-idx]"
                " [--no-virtio-1]"
                " [--delayed-interrupt]"
+               " [--batch=random/N]"
+               " [--reset=N]"
                "\n");
 }
 
@@ -263,6 +343,7 @@ int main(int argc, char **argv)
        struct vdev_info dev;
        unsigned long long features = (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
                (1ULL << VIRTIO_RING_F_EVENT_IDX) | (1ULL << VIRTIO_F_VERSION_1);
+       long batch = 1, reset = 0;
        int o;
        bool delayed = false;
 
@@ -289,6 +370,24 @@ int main(int argc, char **argv)
                case 'D':
                        delayed = true;
                        break;
+               case 'b':
+                       if (0 == strcmp(optarg, "random")) {
+                               batch = RANDOM_BATCH;
+                       } else {
+                               batch = strtol(optarg, NULL, 10);
+                               assert(batch > 0);
+                               assert(batch < (long)INT_MAX + 1);
+                       }
+                       break;
+               case 'r':
+                       if (!optarg) {
+                               reset = 1;
+                       } else {
+                               reset = strtol(optarg, NULL, 10);
+                               assert(reset > 0);
+                               assert(reset < (long)INT_MAX + 1);
+                       }
+                       break;
                default:
                        assert(0);
                        break;
@@ -298,6 +397,6 @@ int main(int argc, char **argv)
 done:
        vdev_info_init(&dev, features);
        vq_info_add(&dev, 256);
-       run_test(&dev, &dev.vqs[0], delayed, 0x100000);
+       run_test(&dev, &dev.vqs[0], delayed, batch, reset, 0x100000);
        return 0;
 }
index 2936534..fa87b58 100644 (file)
@@ -307,6 +307,7 @@ static int parallel_test(u64 features,
                close(to_host[0]);
 
                gvdev.vdev.features = features;
+               INIT_LIST_HEAD(&gvdev.vdev.vqs);
                gvdev.to_host_fd = to_host[1];
                gvdev.notifies = 0;
 
@@ -453,6 +454,7 @@ int main(int argc, char *argv[])
 
        getrange = getrange_iov;
        vdev.features = 0;
+       INIT_LIST_HEAD(&vdev.vqs);
 
        while (argv[1]) {
                if (strcmp(argv[1], "--indirect") == 0)
index a852af5..0a68c9d 100644 (file)
@@ -3350,7 +3350,8 @@ static long kvm_vcpu_compat_ioctl(struct file *filp,
                        if (kvm_sigmask.len != sizeof(compat_sigset_t))
                                goto out;
                        r = -EFAULT;
-                       if (get_compat_sigset(&sigset, (void *)sigmask_arg->sigset))
+                       if (get_compat_sigset(&sigset,
+                                             (compat_sigset_t __user *)sigmask_arg->sigset))
                                goto out;
                        r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
                } else