Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 15 Mar 2019 22:00:28 +0000 (15:00 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 15 Mar 2019 22:00:28 +0000 (15:00 -0700)
Pull KVM updates from Paolo Bonzini:
 "ARM:
   - some cleanups
   - direct physical timer assignment
   - cache sanitization for 32-bit guests

  s390:
   - interrupt cleanup
   - introduction of the Guest Information Block
   - preparation for processor subfunctions in cpu models

  PPC:
   - bug fixes and improvements, especially related to machine checks
     and protection keys

  x86:
   - many, many cleanups, including removing a bunch of MMU code for
     unnecessary optimizations
   - AVIC fixes

  Generic:
   - memcg accounting"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (147 commits)
  kvm: vmx: fix formatting of a comment
  KVM: doc: Document the life cycle of a VM and its resources
  MAINTAINERS: Add KVM selftests to existing KVM entry
  Revert "KVM/MMU: Flush tlb directly in the kvm_zap_gfn_range()"
  KVM: PPC: Book3S: Add count cache flush parameters to kvmppc_get_cpu_char()
  KVM: PPC: Fix compilation when KVM is not enabled
  KVM: Minor cleanups for kvm_main.c
  KVM: s390: add debug logging for cpu model subfunctions
  KVM: s390: implement subfunction processor calls
  arm64: KVM: Fix architecturally invalid reset value for FPEXC32_EL2
  KVM: arm/arm64: Remove unused timer variable
  KVM: PPC: Book3S: Improve KVM reference counting
  KVM: PPC: Book3S HV: Fix build failure without IOMMU support
  Revert "KVM: Eliminate extra function calls in kvm_get_dirty_log_protect()"
  x86: kvmguest: use TSC clocksource if invariant TSC is exposed
  KVM: Never start grow vCPU halt_poll_ns from value below halt_poll_ns_grow_start
  KVM: Expose the initial start value in grow_halt_poll_ns() as a module parameter
  KVM: grow_halt_poll_ns() should never shrink vCPU halt_poll_ns
  KVM: x86/mmu: Consolidate kvm_mmu_zap_all() and kvm_mmu_zap_mmio_sptes()
  KVM: x86/mmu: WARN if zapping a MMIO spte results in zapping children
  ...

17 files changed:
1  2 
MAINTAINERS
arch/arm/include/asm/arch_gicv3.h
arch/arm/include/asm/kvm_host.h
arch/arm/kvm/coproc.c
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/hyp/sysreg-sr.c
arch/arm64/kvm/sys_regs.c
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/cpuid.c
arch/x86/kvm/mmu.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
drivers/clocksource/arm_arch_timer.c
virt/kvm/arm/arm.c
virt/kvm/arm/mmu.c
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/kvm_main.c

diff --combined MAINTAINERS
@@@ -331,7 -331,6 +331,7 @@@ ACPI APE
  M:    "Rafael J. Wysocki" <rjw@rjwysocki.net>
  M:    Len Brown <lenb@kernel.org>
  L:    linux-acpi@vger.kernel.org
 +R:    James Morse <james.morse@arm.com>
  R:    Tony Luck <tony.luck@intel.com>
  R:    Borislav Petkov <bp@alien8.de>
  F:    drivers/acpi/apei/
@@@ -366,7 -365,6 +366,7 @@@ M: Lorenzo Pieralisi <lorenzo.pieralisi
  M:    Hanjun Guo <hanjun.guo@linaro.org>
  M:    Sudeep Holla <sudeep.holla@arm.com>
  L:    linux-acpi@vger.kernel.org
 +L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  F:    drivers/acpi/arm64
  
@@@ -411,7 -409,8 +411,7 @@@ F: drivers/platform/x86/wmi.
  F:    include/uapi/linux/wmi.h
  
  AD1889 ALSA SOUND DRIVER
 -M:    Thibaut Varene <T-Bone@parisc-linux.org>
 -W:    http://wiki.parisc-linux.org/AD1889
 +W:    https://parisc.wiki.kernel.org/index.php/AD1889
  L:    linux-parisc@vger.kernel.org
  S:    Maintained
  F:    sound/pci/ad1889.*
@@@ -767,13 -766,6 +767,13 @@@ S:       Supporte
  F:    Documentation/hwmon/fam15h_power
  F:    drivers/hwmon/fam15h_power.c
  
 +AMD FCH GPIO DRIVER
 +M:    Enrico Weigelt, metux IT consult <info@metux.net>
 +L:    linux-gpio@vger.kernel.org
 +S:    Maintained
 +F:    drivers/gpio/gpio-amd-fch.c
 +F:    include/linux/platform_data/gpio/gpio-amd-fch.h
 +
  AMD GEODE CS5536 USB DEVICE CONTROLLER DRIVER
  L:    linux-geode@lists.infradead.org (moderated for non-subscribers)
  S:    Orphan
@@@ -862,22 -854,6 +862,22 @@@ S:       Supporte
  F:    drivers/iio/adc/ad7124.c
  F:    Documentation/devicetree/bindings/iio/adc/adi,ad7124.txt
  
 +ANALOG DEVICES INC AD7606 DRIVER
 +M:    Stefan Popa <stefan.popa@analog.com>
 +L:    linux-iio@vger.kernel.org
 +W:    http://ez.analog.com/community/linux-device-drivers
 +S:    Supported
 +F:    drivers/iio/adc/ad7606.c
 +F:    Documentation/devicetree/bindings/iio/adc/ad7606.txt
 +
 +ANALOG DEVICES INC AD7768-1 DRIVER
 +M:    Stefan Popa <stefan.popa@analog.com>
 +L:    linux-iio@vger.kernel.org
 +W:    http://ez.analog.com/community/linux-device-drivers
 +S:    Supported
 +F:    drivers/iio/adc/ad7768-1.c
 +F:    Documentation/devicetree/bindings/iio/adc/adi,ad7768-1.txt
 +
  ANALOG DEVICES INC AD9389B DRIVER
  M:    Hans Verkuil <hans.verkuil@cisco.com>
  L:    linux-media@vger.kernel.org
@@@ -1059,30 -1035,28 +1059,30 @@@ L:   netdev@vger.kernel.or
  S:    Odd fixes
  F:    drivers/net/appletalk/
  F:    net/appletalk/
 +F:    include/linux/atalk.h
 +F:    include/uapi/linux/atalk.h
  
  APPLIED MICRO (APM) X-GENE DEVICE TREE SUPPORT
 -M:    Duc Dang <dhdang@apm.com>
 +M:    Khuong Dinh <khuong@os.amperecomputing.com>
  S:    Supported
  F:    arch/arm64/boot/dts/apm/
  
  APPLIED MICRO (APM) X-GENE SOC EDAC
 -M:    Loc Ho <lho@apm.com>
 +M:    Khuong Dinh <khuong@os.amperecomputing.com>
  S:    Supported
  F:    drivers/edac/xgene_edac.c
  F:    Documentation/devicetree/bindings/edac/apm-xgene-edac.txt
  
  APPLIED MICRO (APM) X-GENE SOC ETHERNET (V2) DRIVER
 -M:    Iyappan Subramanian <isubramanian@apm.com>
 -M:    Keyur Chudgar <kchudgar@apm.com>
 +M:    Iyappan Subramanian <iyappan@os.amperecomputing.com>
 +M:    Keyur Chudgar <keyur@os.amperecomputing.com>
  S:    Supported
  F:    drivers/net/ethernet/apm/xgene-v2/
  
  APPLIED MICRO (APM) X-GENE SOC ETHERNET DRIVER
 -M:    Iyappan Subramanian <isubramanian@apm.com>
 -M:    Keyur Chudgar <kchudgar@apm.com>
 -M:    Quan Nguyen <qnguyen@apm.com>
 +M:    Iyappan Subramanian <iyappan@os.amperecomputing.com>
 +M:    Keyur Chudgar <keyur@os.amperecomputing.com>
 +M:    Quan Nguyen <quan@os.amperecomputing.com>
  S:    Supported
  F:    drivers/net/ethernet/apm/xgene/
  F:    drivers/net/phy/mdio-xgene.c
@@@ -1090,7 -1064,7 +1090,7 @@@ F:      Documentation/devicetree/bindings/ne
  F:    Documentation/devicetree/bindings/net/apm-xgene-mdio.txt
  
  APPLIED MICRO (APM) X-GENE SOC PMU
 -M:    Tai Nguyen <ttnguyen@apm.com>
 +M:    Khuong Dinh <khuong@os.amperecomputing.com>
  S:    Supported
  F:    drivers/perf/xgene_pmu.c
  F:    Documentation/perf/xgene-pmu.txt
@@@ -1159,26 -1133,13 +1159,26 @@@ S:   Supporte
  F:    drivers/gpu/drm/arm/hdlcd_*
  F:    Documentation/devicetree/bindings/display/arm,hdlcd.txt
  
 +ARM KOMEDA DRM-KMS DRIVER
 +M:    James (Qian) Wang <james.qian.wang@arm.com>
 +M:    Liviu Dudau <liviu.dudau@arm.com>
 +L:    Mali DP Maintainers <malidp@foss.arm.com>
 +S:    Supported
 +T:    git git://linux-arm.org/linux-ld.git for-upstream/mali-dp
 +F:    drivers/gpu/drm/arm/display/include/
 +F:    drivers/gpu/drm/arm/display/komeda/
 +F:    Documentation/devicetree/bindings/display/arm/arm,komeda.txt
 +F:    Documentation/gpu/komeda-kms.rst
 +
  ARM MALI-DP DRM DRIVER
  M:    Liviu Dudau <liviu.dudau@arm.com>
  M:    Brian Starkey <brian.starkey@arm.com>
 -M:    Mali DP Maintainers <malidp@foss.arm.com>
 +L:    Mali DP Maintainers <malidp@foss.arm.com>
  S:    Supported
 +T:    git git://linux-arm.org/linux-ld.git for-upstream/mali-dp
  F:    drivers/gpu/drm/arm/
  F:    Documentation/devicetree/bindings/display/arm,malidp.txt
 +F:    Documentation/gpu/afbc.rst
  
  ARM MFM AND FLOPPY DRIVERS
  M:    Ian Molton <spyro@f2s.com>
@@@ -1198,7 -1159,7 +1198,7 @@@ F:      arch/arm*/include/asm/hw_breakpoint.
  F:    arch/arm*/include/asm/perf_event.h
  F:    drivers/perf/*
  F:    include/linux/perf/arm_pmu.h
 -F:    Documentation/devicetree/bindings/arm/pmu.txt
 +F:    Documentation/devicetree/bindings/arm/pmu.yaml
  F:    Documentation/devicetree/bindings/perf/
  
  ARM PORT
@@@ -1411,13 -1372,6 +1411,13 @@@ F:    arch/arm/mach-aspeed
  F:    arch/arm/boot/dts/aspeed-*
  N:    aspeed
  
 +ARM/BITMAIN ARCHITECTURE
 +M:    Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
 +L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 +S:    Maintained
 +F:    arch/arm64/boot/dts/bitmain/
 +F:    Documentation/devicetree/bindings/arm/bitmain.yaml
 +
  ARM/CALXEDA HIGHBANK ARCHITECTURE
  M:    Rob Herring <robh@kernel.org>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@@ -1577,14 -1531,21 +1577,14 @@@ ARM/FREESCALE IMX / MXC ARM ARCHITECTUR
  M:    Shawn Guo <shawnguo@kernel.org>
  M:    Sascha Hauer <s.hauer@pengutronix.de>
  R:    Pengutronix Kernel Team <kernel@pengutronix.de>
 -R:    Fabio Estevam <fabio.estevam@nxp.com>
 +R:    Fabio Estevam <festevam@gmail.com>
  R:    NXP Linux Team <linux-imx@nxp.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
 -F:    arch/arm/mach-imx/
 -F:    arch/arm/mach-mxs/
 -F:    arch/arm/boot/dts/imx*
 -F:    arch/arm/configs/imx*_defconfig
 -F:    arch/arm64/boot/dts/freescale/imx*
 -F:    drivers/clk/imx/
 -F:    drivers/firmware/imx/
 -F:    drivers/soc/imx/
 -F:    include/linux/firmware/imx/
 -F:    include/soc/imx/
 +N:    imx
 +N:    mxs
 +X:    drivers/media/i2c/
  
  ARM/FREESCALE VYBRID ARM ARCHITECTURE
  M:    Shawn Guo <shawnguo@kernel.org>
@@@ -1776,7 -1737,6 +1776,7 @@@ F:      arch/arm/configs/mvebu_*_defconfi
  F:    arch/arm/mach-mvebu/
  F:    arch/arm64/boot/dts/marvell/armada*
  F:    drivers/cpufreq/armada-37xx-cpufreq.c
 +F:    drivers/cpufreq/armada-8k-cpufreq.c
  F:    drivers/cpufreq/mvebu-cpufreq.c
  F:    drivers/irqchip/irq-armada-370-xp.c
  F:    drivers/irqchip/irq-mvebu-*
@@@ -1922,11 -1882,10 +1922,11 @@@ F:   drivers/usb/host/ehci-w90x900.
  F:    drivers/video/fbdev/nuc900fb.c
  
  ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT
 -M:    Nelson Castillo <arhuaco@freaks-unidos.net>
  L:    openmoko-kernel@lists.openmoko.org (subscribers-only)
  W:    http://wiki.openmoko.org/wiki/Neo_FreeRunner
 -S:    Supported
 +S:    Orphan
 +F:    arch/arm/mach-s3c24xx/mach-gta02.c
 +F:    arch/arm/mach-s3c24xx/gta02.h
  
  ARM/Orion SoC/Technologic Systems TS-78xx platform support
  M:    Alexander Clouter <alex@digriz.org.uk>
@@@ -1989,37 -1948,19 +1989,37 @@@ M:   David Brown <david.brown@linaro.org
  L:    linux-arm-msm@vger.kernel.org
  S:    Maintained
  F:    Documentation/devicetree/bindings/soc/qcom/
 +F:    Documentation/devicetree/bindings/*/qcom*
  F:    arch/arm/boot/dts/qcom-*.dts
  F:    arch/arm/boot/dts/qcom-*.dtsi
  F:    arch/arm/mach-qcom/
 -F:    arch/arm64/boot/dts/qcom/*
 +F:    arch/arm64/boot/dts/qcom/
 +F:    drivers/*/qcom/
 +F:    drivers/*/qcom*
 +F:    drivers/*/*/qcom/
 +F:    drivers/*/*/qcom*
 +F:    drivers/*/pm8???-*
 +F:    drivers/bluetooth/btqcomsmd.c
 +F:    drivers/clocksource/timer-qcom.c
 +F:    drivers/extcon/extcon-qcom*
 +F:    drivers/iommu/msm*
  F:    drivers/i2c/busses/i2c-qup.c
 -F:    drivers/clk/qcom/
 -F:    drivers/dma/qcom/
 -F:    drivers/soc/qcom/
 +F:    drivers/i2c/busses/i2c-qcom-geni.c
 +F:    drivers/mfd/ssbi.c
 +F:    drivers/mmc/host/mmci_qcom*
 +F:    drivers/mmc/host/sdhci_msm.c
 +F:    drivers/pci/controller/dwc/pcie-qcom.c
 +F:    drivers/phy/qualcomm/
 +F:    drivers/power/*/msm*
 +F:    drivers/reset/reset-qcom-*
 +F:    drivers/scsi/ufs/ufs-qcom.*
  F:    drivers/spi/spi-qup.c
 +F:    drivers/spi/spi-geni-qcom.c
 +F:    drivers/spi/spi-qcom-qspi.c
  F:    drivers/tty/serial/msm_serial.c
 -F:    drivers/*/pm8???-*
 -F:    drivers/mfd/ssbi.c
 -F:    drivers/firmware/qcom_scm*
 +F:    drivers/usb/dwc3/dwc3-qcom.c
 +F:    include/dt-bindings/*/qcom*
 +F:    include/linux/*/qcom*
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/agross/linux.git
  
  ARM/RADISYS ENP2611 MACHINE SUPPORT
@@@ -2056,7 -1997,7 +2056,7 @@@ Q:      http://patchwork.kernel.org/project/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
  S:    Supported
  F:    arch/arm64/boot/dts/renesas/
 -F:    Documentation/devicetree/bindings/arm/shmobile.txt
 +F:    Documentation/devicetree/bindings/arm/renesas.yaml
  F:    drivers/soc/renesas/
  F:    include/linux/soc/renesas/
  
@@@ -2143,9 -2084,8 +2143,9 @@@ F:      drivers/media/platform/s5p-cec
  F:    Documentation/devicetree/bindings/media/s5p-cec.txt
  
  ARM/SAMSUNG S5P SERIES JPEG CODEC SUPPORT
 -M:    Andrzej Pietrasiewicz <andrzej.p@samsung.com>
 +M:    Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
  M:    Jacek Anaszewski <jacek.anaszewski@gmail.com>
 +M:    Sylwester Nawrocki <s.nawrocki@samsung.com>
  L:    linux-arm-kernel@lists.infradead.org
  L:    linux-media@vger.kernel.org
  S:    Maintained
@@@ -2169,8 -2109,6 +2169,8 @@@ Q:      http://patchwork.kernel.org/project/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
  S:    Supported
  F:    arch/arm/boot/dts/emev2*
 +F:    arch/arm/boot/dts/gr-peach*
 +F:    arch/arm/boot/dts/iwg20d-q7*
  F:    arch/arm/boot/dts/r7s*
  F:    arch/arm/boot/dts/r8a*
  F:    arch/arm/boot/dts/r9a*
@@@ -2178,7 -2116,7 +2178,7 @@@ F:      arch/arm/boot/dts/sh
  F:    arch/arm/configs/shmobile_defconfig
  F:    arch/arm/include/debug/renesas-scif.S
  F:    arch/arm/mach-shmobile/
 -F:    Documentation/devicetree/bindings/arm/shmobile.txt
 +F:    Documentation/devicetree/bindings/arm/renesas.yaml
  F:    drivers/soc/renesas/
  F:    include/linux/soc/renesas/
  
@@@ -2565,6 -2503,7 +2565,6 @@@ T:      git git://git.kernel.org/pub/scm/lin
  S:    Maintained
  F:    Documentation/devicetree/bindings/eeprom/at24.txt
  F:    drivers/misc/eeprom/at24.c
 -F:    include/linux/platform_data/at24.h
  
  ATA OVER ETHERNET (AOE) DRIVER
  M:    "Ed L. Cashin" <ed.cashin@acm.org>
@@@ -2670,7 -2609,6 +2670,7 @@@ L:      linux-kernel@vger.kernel.or
  S:    Maintained
  F:    arch/*/include/asm/atomic*.h
  F:    include/*/atomic*.h
 +F:    scripts/atomic/
  
  ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
  M:    Bradley Grove <linuxdrivers@attotech.com>
@@@ -2914,7 -2852,7 +2914,7 @@@ R:      Martin KaFai Lau <kafai@fb.com
  R:    Song Liu <songliubraving@fb.com>
  R:    Yonghong Song <yhs@fb.com>
  L:    netdev@vger.kernel.org
 -L:    linux-kernel@vger.kernel.org
 +L:    bpf@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git
  Q:    https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147
@@@ -2944,7 -2882,6 +2944,7 @@@ N:      bp
  BPF JIT for ARM
  M:    Shubham Bansal <illusionist.neo@gmail.com>
  L:    netdev@vger.kernel.org
 +L:    bpf@vger.kernel.org
  S:    Maintained
  F:    arch/arm/net/
  
@@@ -2953,21 -2890,18 +2953,21 @@@ M:   Daniel Borkmann <daniel@iogearbox.ne
  M:    Alexei Starovoitov <ast@kernel.org>
  M:    Zi Shen Lim <zlim.lnx@gmail.com>
  L:    netdev@vger.kernel.org
 +L:    bpf@vger.kernel.org
  S:    Supported
  F:    arch/arm64/net/
  
  BPF JIT for MIPS (32-BIT AND 64-BIT)
  M:    Paul Burton <paul.burton@mips.com>
  L:    netdev@vger.kernel.org
 +L:    bpf@vger.kernel.org
  S:    Maintained
  F:    arch/mips/net/
  
  BPF JIT for NFP NICs
  M:    Jakub Kicinski <jakub.kicinski@netronome.com>
  L:    netdev@vger.kernel.org
 +L:    bpf@vger.kernel.org
  S:    Supported
  F:    drivers/net/ethernet/netronome/nfp/bpf/
  
@@@ -2975,21 -2909,13 +2975,21 @@@ BPF JIT for POWERPC (32-BIT AND 64-BIT
  M:    Naveen N. Rao <naveen.n.rao@linux.ibm.com>
  M:    Sandipan Das <sandipan@linux.ibm.com>
  L:    netdev@vger.kernel.org
 +L:    bpf@vger.kernel.org
  S:    Maintained
  F:    arch/powerpc/net/
  
 +BPF JIT for RISC-V (RV64G)
 +M:    Björn Töpel <bjorn.topel@gmail.com>
 +L:    netdev@vger.kernel.org
 +S:    Maintained
 +F:    arch/riscv/net/
 +
  BPF JIT for S390
  M:    Martin Schwidefsky <schwidefsky@de.ibm.com>
  M:    Heiko Carstens <heiko.carstens@de.ibm.com>
  L:    netdev@vger.kernel.org
 +L:    bpf@vger.kernel.org
  S:    Maintained
  F:    arch/s390/net/
  X:    arch/s390/net/pnet.c
  BPF JIT for SPARC (32-BIT AND 64-BIT)
  M:    David S. Miller <davem@davemloft.net>
  L:    netdev@vger.kernel.org
 +L:    bpf@vger.kernel.org
  S:    Maintained
  F:    arch/sparc/net/
  
  BPF JIT for X86 32-BIT
  M:    Wang YanQing <udknight@gmail.com>
  L:    netdev@vger.kernel.org
 +L:    bpf@vger.kernel.org
  S:    Maintained
  F:    arch/x86/net/bpf_jit_comp32.c
  
@@@ -3012,7 -2936,6 +3012,7 @@@ BPF JIT for X86 64-BI
  M:    Alexei Starovoitov <ast@kernel.org>
  M:    Daniel Borkmann <daniel@iogearbox.net>
  L:    netdev@vger.kernel.org
 +L:    bpf@vger.kernel.org
  S:    Supported
  F:    arch/x86/net/
  X:    arch/x86/net/bpf_jit_comp32.c
@@@ -3204,7 -3127,6 +3204,7 @@@ F:      drivers/phy/broadcom/phy-brcm-usb
  BROADCOM GENET ETHERNET DRIVER
  M:    Doug Berger <opendmb@gmail.com>
  M:    Florian Fainelli <f.fainelli@gmail.com>
 +L:    bcm-kernel-feedback-list@broadcom.com
  L:    netdev@vger.kernel.org
  S:    Supported
  F:    drivers/net/ethernet/broadcom/genet/
@@@ -3312,7 -3234,6 +3312,7 @@@ F:      drivers/spi/spi-iproc-qspi.
  
  BROADCOM SYSTEMPORT ETHERNET DRIVER
  M:    Florian Fainelli <f.fainelli@gmail.com>
 +L:    bcm-kernel-feedback-list@broadcom.com
  L:    netdev@vger.kernel.org
  S:    Supported
  F:    drivers/net/ethernet/broadcom/bcmsysport.*
@@@ -3469,8 -3390,9 +3469,8 @@@ F:      Documentation/media/v4l-drivers/cafe
  F:    drivers/media/platform/marvell-ccic/
  
  CAIF NETWORK LAYER
 -M:    Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
  L:    netdev@vger.kernel.org
 -S:    Supported
 +S:    Orphan
  F:    Documentation/networking/caif/
  F:    drivers/net/caif/
  F:    include/uapi/linux/caif/
@@@ -3594,6 -3516,7 +3594,6 @@@ F:      include/linux/spi/cc2520.
  F:    Documentation/devicetree/bindings/net/ieee802154/cc2520.txt
  
  CCREE ARM TRUSTZONE CRYPTOCELL REE DRIVER
 -M:    Yael Chemla <yael.chemla@foss.arm.com>
  M:    Gilad Ben-Yossef <gilad@benyossef.com>
  L:    linux-crypto@vger.kernel.org
  S:    Supported
@@@ -3757,7 -3680,7 +3757,7 @@@ CHROME HARDWARE PLATFORM SUPPOR
  M:    Benson Leung <bleung@chromium.org>
  M:    Enric Balletbo i Serra <enric.balletbo@collabora.com>
  S:    Maintained
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/bleung/chrome-platform.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/chrome-platform/linux.git
  F:    drivers/platform/chrome/
  
  CHROMEOS EC SUBDRIVERS
@@@ -3769,14 -3692,6 +3769,14 @@@ N:    cros_e
  N:    cros-ec
  F:    drivers/power/supply/cros_usbpd-charger.c
  
 +CHROMEOS EC CODEC DRIVER
 +M:    Cheng-Yi Chiang <cychiang@chromium.org>
 +S:    Maintained
 +R:    Enric Balletbo i Serra <enric.balletbo@collabora.com>
 +R:    Guenter Roeck <groeck@chromium.org>
 +F:    Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt
 +F:    sound/soc/codecs/cros_ec_codec.*
 +
  CIRRUS LOGIC AUDIO CODEC DRIVERS
  M:    Brian Austin <brian.austin@cirrus.com>
  M:    Paul Handrigan <Paul.Handrigan@cirrus.com>
@@@ -3790,23 -3705,6 +3790,23 @@@ L:    netdev@vger.kernel.or
  S:    Maintained
  F:    drivers/net/ethernet/cirrus/ep93xx_eth.c
  
 +CIRRUS LOGIC LOCHNAGAR DRIVER
 +M:    Charles Keepax <ckeepax@opensource.cirrus.com>
 +M:    Richard Fitzgerald <rf@opensource.cirrus.com>
 +L:    patches@opensource.cirrus.com
 +S:    Supported
 +F:    drivers/clk/clk-lochnagar.c
 +F:    drivers/mfd/lochnagar-i2c.c
 +F:    drivers/pinctrl/cirrus/pinctrl-lochnagar.c
 +F:    drivers/regulator/lochnagar-regulator.c
 +F:    include/dt-bindings/clk/lochnagar.h
 +F:    include/dt-bindings/pinctrl/lochnagar.h
 +F:    include/linux/mfd/lochnagar*
 +F:    Documentation/devicetree/bindings/mfd/cirrus,lochnagar.txt
 +F:    Documentation/devicetree/bindings/clock/cirrus,lochnagar.txt
 +F:    Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.txt
 +F:    Documentation/devicetree/bindings/regulator/cirrus,lochnagar.txt
 +
  CISCO FCOE HBA DRIVER
  M:    Satish Kharat <satishkh@cisco.com>
  M:    Sesidhar Baddela <sebaddel@cisco.com>
@@@ -4013,10 -3911,9 +4013,10 @@@ M:    Johannes Weiner <hannes@cmpxchg.org
  L:    cgroups@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
  S:    Maintained
 -F:    Documentation/cgroup*
 +F:    Documentation/admin-guide/cgroup-v2.rst
 +F:    Documentation/cgroup-v1/
  F:    include/linux/cgroup*
 -F:    kernel/cgroup*
 +F:    kernel/cgroup/
  
  CONTROL GROUP - CPUSET
  M:    Li Zefan <lizefan@huawei.com>
@@@ -4064,7 -3961,7 +4064,7 @@@ M:      Viresh Kumar <viresh.kumar@linaro.or
  L:    linux-pm@vger.kernel.org
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
 -T:    git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates)
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git (For ARM Updates)
  B:    https://bugzilla.kernel.org
  F:    Documentation/admin-guide/pm/cpufreq.rst
  F:    Documentation/admin-guide/pm/intel_pstate.rst
@@@ -4124,7 -4021,6 +4124,7 @@@ S:      Maintaine
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
  B:    https://bugzilla.kernel.org
  F:    Documentation/admin-guide/pm/cpuidle.rst
 +F:    Documentation/driver-api/pm/cpuidle.rst
  F:    drivers/cpuidle/*
  F:    include/linux/cpuidle.h
  
@@@ -4232,7 -4128,7 +4232,7 @@@ S:      Maintaine
  F:    drivers/media/dvb-frontends/cxd2820r*
  
  CXGB3 ETHERNET DRIVER (CXGB3)
 -M:    Arjun Vynipadath <arjun@chelsio.com>
 +M:    Vishal Kulkarni <vishal@chelsio.com>
  L:    netdev@vger.kernel.org
  W:    http://www.chelsio.com
  S:    Supported
@@@ -4261,7 -4157,7 +4261,7 @@@ S:      Supporte
  F:    drivers/crypto/chelsio
  
  CXGB4 ETHERNET DRIVER (CXGB4)
 -M:    Arjun Vynipadath <arjun@chelsio.com>
 +M:    Vishal Kulkarni <vishal@chelsio.com>
  L:    netdev@vger.kernel.org
  W:    http://www.chelsio.com
  S:    Supported
@@@ -4645,11 -4541,10 +4645,11 @@@ S:   Maintaine
  F:    drivers/i2c/busses/i2c-diolan-u2c.c
  
  FILESYSTEM DIRECT ACCESS (DAX)
 -M:    Matthew Wilcox <willy@infradead.org>
 -M:    Ross Zwisler <zwisler@kernel.org>
 -M:    Jan Kara <jack@suse.cz>
 +M:    Dan Williams <dan.j.williams@intel.com>
 +R:    Matthew Wilcox <willy@infradead.org>
 +R:    Jan Kara <jack@suse.cz>
  L:    linux-fsdevel@vger.kernel.org
 +L:    linux-nvdimm@lists.01.org
  S:    Supported
  F:    fs/dax.c
  F:    include/linux/dax.h
@@@ -4657,9 -4552,9 +4657,9 @@@ F:      include/trace/events/fs_dax.
  
  DEVICE DIRECT ACCESS (DAX)
  M:    Dan Williams <dan.j.williams@intel.com>
 -M:    Dave Jiang <dave.jiang@intel.com>
 -M:    Ross Zwisler <zwisler@kernel.org>
  M:    Vishal Verma <vishal.l.verma@intel.com>
 +M:    Keith Busch <keith.busch@intel.com>
 +M:    Dave Jiang <dave.jiang@intel.com>
  L:    linux-nvdimm@lists.01.org
  S:    Supported
  F:    drivers/dax/
@@@ -4943,11 -4838,10 +4943,11 @@@ F:   Documentation/devicetree/bindings/di
  
  DRM DRIVER FOR MSM ADRENO GPU
  M:    Rob Clark <robdclark@gmail.com>
 +M:    Sean Paul <sean@poorly.run>
  L:    linux-arm-msm@vger.kernel.org
  L:    dri-devel@lists.freedesktop.org
  L:    freedreno@lists.freedesktop.org
 -T:    git git://people.freedesktop.org/~robclark/linux
 +T:    git https://gitlab.freedesktop.org/drm/msm.git
  S:    Maintained
  F:    drivers/gpu/drm/msm/
  F:    include/uapi/drm/msm_drm.h
@@@ -4987,7 -4881,6 +4987,7 @@@ DRM DRIVER FOR QXL VIRTUAL GP
  M:    Dave Airlie <airlied@redhat.com>
  M:    Gerd Hoffmann <kraxel@redhat.com>
  L:    virtualization@lists.linux-foundation.org
 +L:    spice-devel@lists.freedesktop.org
  T:    git git://anongit.freedesktop.org/drm/drm-misc
  S:    Maintained
  F:    drivers/gpu/drm/qxl/
@@@ -5008,12 -4901,6 +5008,12 @@@ S:    Orphan / Obsolet
  F:    drivers/gpu/drm/sis/
  F:    include/uapi/drm/sis_drm.h
  
 +DRM DRIVER FOR SITRONIX ST7701 PANELS
 +M:    Jagan Teki <jagan@amarulasolutions.com>
 +S:    Maintained
 +F:    drivers/gpu/drm/panel/panel-sitronix-st7701.c
 +F:    Documentation/devicetree/bindings/display/panel/sitronix,st7701.txt
 +
  DRM DRIVER FOR SITRONIX ST7586 PANELS
  M:    David Lechner <david@lechnology.com>
  S:    Maintained
@@@ -5030,13 -4917,6 +5030,13 @@@ DRM DRIVER FOR TDFX VIDEO CARD
  S:    Orphan / Obsolete
  F:    drivers/gpu/drm/tdfx/
  
 +DRM DRIVER FOR TPO TPG110 PANELS
 +M:    Linus Walleij <linus.walleij@linaro.org>
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
 +S:    Maintained
 +F:    drivers/gpu/drm/panel/panel-tpo-tpg110.c
 +F:    Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt
 +
  DRM DRIVER FOR USB DISPLAYLINK VIDEO ADAPTERS
  M:    Dave Airlie <airlied@redhat.com>
  R:    Sean Paul <sean@poorly.run>
@@@ -5045,16 -4925,6 +5045,16 @@@ S:    Odd Fixe
  F:    drivers/gpu/drm/udl/
  T:    git git://anongit.freedesktop.org/drm/drm-misc
  
 +DRM DRIVER FOR VIRTUAL KERNEL MODESETTING (VKMS)
 +M:    Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
 +R:    Haneen Mohammed <hamohammed.sa@gmail.com>
 +R:    Daniel Vetter <daniel@ffwll.ch>
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
 +S:    Maintained
 +L:    dri-devel@lists.freedesktop.org
 +F:    drivers/gpu/drm/vkms/
 +F:    Documentation/gpu/vkms.rst
 +
  DRM DRIVER FOR VMWARE VIRTUAL GPU
  M:    "VMware Graphics" <linux-graphics-maintainer@vmware.com>
  M:    Thomas Hellstrom <thellstrom@vmware.com>
@@@ -5124,6 -4994,7 +5124,6 @@@ F:      Documentation/devicetree/bindings/di
  T:    git git://anongit.freedesktop.org/drm/drm-misc
  
  DRM DRIVERS FOR BRIDGE CHIPS
 -M:    Archit Taneja <architt@codeaurora.org>
  M:    Andrzej Hajda <a.hajda@samsung.com>
  R:    Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
  S:    Maintained
@@@ -5278,7 -5149,7 +5278,7 @@@ DRM DRIVERS FOR VIVANTE GPU I
  M:    Lucas Stach <l.stach@pengutronix.de>
  R:    Russell King <linux+etnaviv@armlinux.org.uk>
  R:    Christian Gmeiner <christian.gmeiner@gmail.com>
 -L:    etnaviv@lists.freedesktop.org
 +L:    etnaviv@lists.freedesktop.org (moderated for non-subscribers)
  L:    dri-devel@lists.freedesktop.org
  S:    Maintained
  F:    drivers/gpu/drm/etnaviv/
@@@ -5533,12 -5404,6 +5533,12 @@@ L:    linux-edac@vger.kernel.or
  S:    Maintained
  F:    drivers/edac/amd64_edac*
  
 +EDAC-AST2500
 +M:    Stefan Schaeckeler <sschaeck@cisco.com>
 +S:    Supported
 +F:    drivers/edac/aspeed_edac.c
 +F:    Documentation/devicetree/bindings/edac/aspeed-sdram-edac.txt
 +
  EDAC-CALXEDA
  M:    Robert Richter <rric@kernel.org>
  L:    linux-edac@vger.kernel.org
@@@ -5563,7 -5428,6 +5563,7 @@@ F:      drivers/edac/thunderx_edac
  EDAC-CORE
  M:    Borislav Petkov <bp@alien8.de>
  M:    Mauro Carvalho Chehab <mchehab@kernel.org>
 +R:    James Morse <james.morse@arm.com>
  L:    linux-edac@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-edac.git linux_next
@@@ -5996,7 -5860,7 +5996,7 @@@ S:      Maintaine
  F:    drivers/media/tuners/fc2580*
  
  FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
 -M:    Johannes Thumshirn <jth@kernel.org>
 +M:    Hannes Reinecke <hare@suse.de>
  L:    linux-scsi@vger.kernel.org
  W:    www.Open-FCoE.org
  S:    Supported
@@@ -6023,7 -5887,6 +6023,7 @@@ L:      linux-fsdevel@vger.kernel.or
  S:    Maintained
  F:    fs/*
  F:    include/linux/fs.h
 +F:    include/linux/fs_types.h
  F:    include/uapi/linux/fs.h
  
  FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
@@@ -6166,12 -6029,6 +6166,12 @@@ L:    linuxppc-dev@lists.ozlabs.or
  S:    Maintained
  F:    drivers/dma/fsldma.*
  
 +FREESCALE ENETC ETHERNET DRIVERS
 +M:    Claudiu Manoil <claudiu.manoil@nxp.com>
 +L:    netdev@vger.kernel.org
 +S:    Maintained
 +F:    drivers/net/ethernet/freescale/enetc/
 +
  FREESCALE eTSEC ETHERNET DRIVER (GIANFAR)
  M:    Claudiu Manoil <claudiu.manoil@nxp.com>
  L:    netdev@vger.kernel.org
@@@ -6235,17 -6092,15 +6235,17 @@@ FREESCALE QORIQ PTP CLOCK DRIVE
  M:    Yangbo Lu <yangbo.lu@nxp.com>
  L:    netdev@vger.kernel.org
  S:    Maintained
 +F:    drivers/net/ethernet/freescale/enetc/enetc_ptp.c
  F:    drivers/ptp/ptp_qoriq.c
 +F:    drivers/ptp/ptp_qoriq_debugfs.c
  F:    include/linux/fsl/ptp_qoriq.h
  F:    Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
  
  FREESCALE QUAD SPI DRIVER
  M:    Han Xu <han.xu@nxp.com>
 -L:    linux-mtd@lists.infradead.org
 +L:    linux-spi@vger.kernel.org
  S:    Maintained
 -F:    drivers/mtd/spi-nor/fsl-quadspi.c
 +F:    drivers/spi/spi-fsl-qspi.c
  
  FREESCALE QUICC ENGINE LIBRARY
  M:    Qiang Zhao <qiang.zhao@nxp.com>
@@@ -6296,7 -6151,7 +6296,7 @@@ FREESCALE SOC SOUND DRIVER
  M:    Timur Tabi <timur@kernel.org>
  M:    Nicolin Chen <nicoleotsuka@gmail.com>
  M:    Xiubo Li <Xiubo.Lee@gmail.com>
 -R:    Fabio Estevam <fabio.estevam@nxp.com>
 +R:    Fabio Estevam <festevam@gmail.com>
  L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
  L:    linuxppc-dev@lists.ozlabs.org
  S:    Maintained
@@@ -6344,10 -6199,9 +6344,10 @@@ F:    include/linux/fscache*.
  FSCRYPT: FILE SYSTEM LEVEL ENCRYPTION SUPPORT
  M:    Theodore Y. Ts'o <tytso@mit.edu>
  M:    Jaegeuk Kim <jaegeuk@kernel.org>
 +M:    Eric Biggers <ebiggers@kernel.org>
  L:    linux-fscrypt@vger.kernel.org
  Q:    https://patchwork.kernel.org/project/linux-fscrypt/list/
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tytso/fscrypt.git
 +T:    git git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt.git
  S:    Supported
  F:    fs/crypto/
  F:    include/linux/fscrypt*.h
@@@ -6793,15 -6647,6 +6793,15 @@@ F:    drivers/clocksource/h8300_*.
  F:    drivers/clk/h8300/
  F:    drivers/irqchip/irq-renesas-h8*.c
  
 +HABANALABS PCI DRIVER
 +M:    Oded Gabbay <oded.gabbay@gmail.com>
 +T:    git https://github.com/HabanaAI/linux.git
 +S:    Supported
 +F:    drivers/misc/habanalabs/
 +F:    include/uapi/misc/habanalabs.h
 +F:    Documentation/ABI/testing/sysfs-driver-habanalabs
 +F:    Documentation/ABI/testing/debugfs-driver-habanalabs
 +
  HACKRF MEDIA DRIVER
  M:    Antti Palosaari <crope@iki.fi>
  L:    linux-media@vger.kernel.org
@@@ -7159,7 -7004,7 +7159,7 @@@ M:      Haiyang Zhang <haiyangz@microsoft.co
  M:    Stephen Hemminger <sthemmin@microsoft.com>
  M:    Sasha Levin <sashal@kernel.org>
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git
 -L:    devel@linuxdriverproject.org
 +L:    linux-hyperv@vger.kernel.org
  S:    Supported
  F:    Documentation/networking/device_drivers/microsoft/netvsc.txt
  F:    arch/x86/include/asm/mshyperv.h
@@@ -7175,7 -7020,6 +7175,7 @@@ F:      drivers/net/hyperv
  F:    drivers/scsi/storvsc_drv.c
  F:    drivers/uio/uio_hv_generic.c
  F:    drivers/video/fbdev/hyperv_fb.c
 +F:    drivers/iommu/hyperv_iommu.c
  F:    net/vmw_vsock/hyperv_transport.c
  F:    include/linux/hyperv.h
  F:    include/uapi/linux/hyperv.h
@@@ -7325,7 -7169,6 +7325,7 @@@ F:      drivers/i2c/i2c-stub.
  I3C SUBSYSTEM
  M:    Boris Brezillon <bbrezillon@kernel.org>
  L:    linux-i3c@lists.infradead.org
 +C:    irc://chat.freenode.net/linux-i3c
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/i3c/linux.git
  S:    Maintained
  F:    Documentation/ABI/testing/sysfs-bus-i3c
@@@ -7860,6 -7703,7 +7860,6 @@@ M:      Yong Zhi <yong.zhi@intel.com
  M:    Sakari Ailus <sakari.ailus@linux.intel.com>
  M:    Bingbu Cao <bingbu.cao@intel.com>
  R:    Tian Shu Qiu <tian.shu.qiu@intel.com>
 -R:    Jian Xu Zheng <jian.xu.zheng@intel.com>
  L:    linux-media@vger.kernel.org
  S:    Maintained
  F:    drivers/media/pci/intel/ipu3/
@@@ -8044,16 -7888,6 +8044,16 @@@ L:    linux-gpio@vger.kernel.or
  S:    Maintained
  F:    drivers/gpio/gpio-intel-mid.c
  
 +INTERCONNECT API
 +M:    Georgi Djakov <georgi.djakov@linaro.org>
 +S:    Maintained
 +F:    Documentation/interconnect/
 +F:    Documentation/devicetree/bindings/interconnect/
 +F:    drivers/interconnect/
 +F:    include/dt-bindings/interconnect/
 +F:    include/linux/interconnect-provider.h
 +F:    include/linux/interconnect.h
 +
  INVENSENSE MPU-3050 GYROSCOPE DRIVER
  M:    Linus Walleij <linus.walleij@linaro.org>
  L:    linux-iio@vger.kernel.org
@@@ -8461,6 -8295,7 +8461,7 @@@ F:      include/linux/kvm
  F:    include/kvm/iodev.h
  F:    virt/kvm/*
  F:    tools/kvm/
+ F:    tools/testing/selftests/kvm/
  
  KERNEL VIRTUAL MACHINE FOR AMD-V (KVM/amd)
  M:    Joerg Roedel <joro@8bytes.org>
@@@ -8470,29 -8305,25 +8471,25 @@@ S:   Maintaine
  F:    arch/x86/include/asm/svm.h
  F:    arch/x86/kvm/svm.c
  
- KERNEL VIRTUAL MACHINE FOR ARM (KVM/arm)
+ KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64)
  M:    Christoffer Dall <christoffer.dall@arm.com>
  M:    Marc Zyngier <marc.zyngier@arm.com>
+ R:    James Morse <james.morse@arm.com>
+ R:    Julien Thierry <julien.thierry@arm.com>
+ R:    Suzuki K Pouloze <suzuki.poulose@arm.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  L:    kvmarm@lists.cs.columbia.edu
  W:    http://systems.cs.columbia.edu/projects/kvm-arm
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git
- S:    Supported
+ S:    Maintained
  F:    arch/arm/include/uapi/asm/kvm*
  F:    arch/arm/include/asm/kvm*
  F:    arch/arm/kvm/
- F:    virt/kvm/arm/
- F:    include/kvm/arm_*
- KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
- M:    Christoffer Dall <christoffer.dall@arm.com>
- M:    Marc Zyngier <marc.zyngier@arm.com>
- L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
- L:    kvmarm@lists.cs.columbia.edu
- S:    Maintained
  F:    arch/arm64/include/uapi/asm/kvm*
  F:    arch/arm64/include/asm/kvm*
  F:    arch/arm64/kvm/
+ F:    virt/kvm/arm/
+ F:    include/kvm/arm_*
  
  KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)
  M:    James Hogan <jhogan@kernel.org>
@@@ -8572,7 -8403,7 +8569,7 @@@ F:      security/keys/encrypted-keys
  KEYS-TRUSTED
  M:    James Bottomley <jejb@linux.ibm.com>
  M:      Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
 -M:    Mimi Zohar <zohar@linuxibm.com>
 +M:    Mimi Zohar <zohar@linux.ibm.com>
  L:    linux-integrity@vger.kernel.org
  L:    keyrings@vger.kernel.org
  S:    Supported
@@@ -8653,7 -8484,6 +8650,7 @@@ L7 BPF FRAMEWOR
  M:    John Fastabend <john.fastabend@gmail.com>
  M:    Daniel Borkmann <daniel@iogearbox.net>
  L:    netdev@vger.kernel.org
 +L:    bpf@vger.kernel.org
  S:    Maintained
  F:    include/linux/skmsg.h
  F:    net/core/skmsg.c
@@@ -8815,6 -8645,7 +8812,6 @@@ S:      Maintaine
  F:    tools/lib/lockdep/
  
  LIBNVDIMM BLK: MMIO-APERTURE DRIVER
 -M:    Ross Zwisler <zwisler@kernel.org>
  M:    Dan Williams <dan.j.williams@intel.com>
  M:    Vishal Verma <vishal.l.verma@intel.com>
  M:    Dave Jiang <dave.jiang@intel.com>
@@@ -8827,6 -8658,7 +8824,6 @@@ F:      drivers/nvdimm/region_devs.
  LIBNVDIMM BTT: BLOCK TRANSLATION TABLE
  M:    Vishal Verma <vishal.l.verma@intel.com>
  M:    Dan Williams <dan.j.williams@intel.com>
 -M:    Ross Zwisler <zwisler@kernel.org>
  M:    Dave Jiang <dave.jiang@intel.com>
  L:    linux-nvdimm@lists.01.org
  Q:    https://patchwork.kernel.org/project/linux-nvdimm/list/
@@@ -8834,6 -8666,7 +8831,6 @@@ S:      Supporte
  F:    drivers/nvdimm/btt*
  
  LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER
 -M:    Ross Zwisler <zwisler@kernel.org>
  M:    Dan Williams <dan.j.williams@intel.com>
  M:    Vishal Verma <vishal.l.verma@intel.com>
  M:    Dave Jiang <dave.jiang@intel.com>
@@@ -8852,10 -8685,9 +8849,10 @@@ F:    Documentation/devicetree/bindings/pm
  
  LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM
  M:    Dan Williams <dan.j.williams@intel.com>
 -M:    Ross Zwisler <zwisler@kernel.org>
  M:    Vishal Verma <vishal.l.verma@intel.com>
  M:    Dave Jiang <dave.jiang@intel.com>
 +M:    Keith Busch <keith.busch@intel.com>
 +M:    Ira Weiny <ira.weiny@intel.com>
  L:    linux-nvdimm@lists.01.org
  Q:    https://patchwork.kernel.org/project/linux-nvdimm/list/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git
@@@ -9003,10 -8835,10 +9000,10 @@@ F:   drivers/platform/x86/hp_accel.
  
  LIVE PATCHING
  M:    Josh Poimboeuf <jpoimboe@redhat.com>
 -M:    Jessica Yu <jeyu@kernel.org>
  M:    Jiri Kosina <jikos@kernel.org>
  M:    Miroslav Benes <mbenes@suse.cz>
 -R:    Petr Mladek <pmladek@suse.com>
 +M:    Petr Mladek <pmladek@suse.com>
 +R:    Joe Lawrence <joe.lawrence@redhat.com>
  S:    Maintained
  F:    kernel/livepatch/
  F:    include/linux/livepatch.h
@@@ -9015,9 -8847,8 +9012,9 @@@ F:      arch/x86/kernel/livepatch.
  F:    Documentation/livepatch/
  F:    Documentation/ABI/testing/sysfs-kernel-livepatch
  F:    samples/livepatch/
 +F:    tools/testing/selftests/livepatch/
  L:    live-patching@vger.kernel.org
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/livepatching/livepatching.git
  
  LLC (802.2)
  L:    netdev@vger.kernel.org
@@@ -9255,14 -9086,6 +9252,14 @@@ F:    drivers/gpu/drm/armada
  F:    include/uapi/drm/armada_drm.h
  F:    Documentation/devicetree/bindings/display/armada/
  
 +MARVELL ARMADA 3700 PHY DRIVERS
 +M:    Miquel Raynal <miquel.raynal@bootlin.com>
 +S:    Maintained
 +F:    drivers/phy/marvell/phy-mvebu-a3700-comphy.c
 +F:    drivers/phy/marvell/phy-mvebu-a3700-utmi.c
 +F:    Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt
 +F:    Documentation/devicetree/bindings/phy/phy-mvebu-utmi.txt
 +
  MARVELL CRYPTO DRIVER
  M:    Boris Brezillon <bbrezillon@kernel.org>
  M:    Arnaud Ebalard <arno@natisbad.org>
@@@ -9531,17 -9354,6 +9528,17 @@@ T:    git git://linuxtv.org/media_tree.gi
  S:    Maintained
  F:    drivers/media/platform/imx-pxp.[ch]
  
 +MEDIA DRIVERS FOR FREESCALE IMX7
 +M:    Rui Miguel Silva <rmfrfs@gmail.com>
 +L:    linux-media@vger.kernel.org
 +T:    git git://linuxtv.org/media_tree.git
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/media/imx7-csi.txt
 +F:    Documentation/devicetree/bindings/media/imx7-mipi-csi2.txt
 +F:    Documentation/media/v4l-drivers/imx7.rst
 +F:    drivers/staging/media/imx/imx7-media-csi.c
 +F:    drivers/staging/media/imx/imx7-mipi-csis.c
 +
  MEDIA DRIVERS FOR HELENE
  M:    Abylay Ospan <aospan@netup.ru>
  L:    linux-media@vger.kernel.org
@@@ -9903,7 -9715,6 +9900,7 @@@ M:      Vadim Pasternak <vadimp@mellanox.com
  L:    platform-driver-x86@vger.kernel.org
  S:    Supported
  F:    drivers/platform/mellanox/
 +F:    include/linux/platform_data/mlxreg.h
  
  MELLANOX MLX4 core VPI driver
  M:    Tariq Toukan <tariqt@mellanox.com>
@@@ -9978,14 -9789,6 +9975,14 @@@ F:    kernel/sched/membarrier.
  F:    include/uapi/linux/membarrier.h
  F:    arch/powerpc/include/asm/membarrier.h
  
 +MEMBLOCK
 +M:    Mike Rapoport <rppt@linux.ibm.com>
 +L:    linux-mm@kvack.org
 +S:    Maintained
 +F:    include/linux/memblock.h
 +F:    mm/memblock.c
 +F:    Documentation/core-api/boot-time-mm.rst
 +
  MEMORY MANAGEMENT
  L:    linux-mm@kvack.org
  W:    http://www.linux-mm.org
@@@ -10052,18 -9855,6 +10049,18 @@@ F:  drivers/media/platform/meson/ao-cec.
  F:    Documentation/devicetree/bindings/media/meson-ao-cec.txt
  T:    git git://linuxtv.org/media_tree.git
  
 +MESON NAND CONTROLLER DRIVER FOR AMLOGIC SOCS
 +M:    Liang Yang <liang.yang@amlogic.com>
 +L:    linux-mtd@lists.infradead.org
 +S:    Maintained
 +F:    drivers/mtd/nand/raw/meson_*
 +F:    Documentation/devicetree/bindings/mtd/amlogic,meson-nand.txt
 +
 +METHODE UDPU SUPPORT
 +M:    Vladimir Vid <vladimir.vid@sartura.hr>
 +S:    Maintained
 +F:    arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
 +
  MICROBLAZE ARCHITECTURE
  M:    Michal Simek <monstr@monstr.eu>
  W:    http://www.monstr.eu/fdt/
@@@ -10788,7 -10579,6 +10785,7 @@@ F:   Documentation/devicetree/bindings/ne
  F:    net/dsa/
  F:    include/net/dsa.h
  F:    include/linux/dsa/
 +F:    include/linux/platform_data/dsa.h
  F:    drivers/net/dsa/
  
  NETWORKING [GENERAL]
@@@ -11004,12 -10794,6 +11001,12 @@@ F: drivers/power/supply/bq27xxx_battery
  F:    drivers/power/supply/isp1704_charger.c
  F:    drivers/power/supply/rx51_battery.c
  
 +NOLIBC HEADER FILE
 +M:    Willy Tarreau <w@1wt.eu>
 +S:    Maintained
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/wtarreau/nolibc.git
 +F:    tools/include/nolibc/
 +
  NTB AMD DRIVER
  M:    Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
  L:    linux-ntb@googlegroups.com
@@@ -11111,7 -10895,7 +11108,7 @@@ F:   include/linux/nvmem-consumer.
  F:    include/linux/nvmem-provider.h
  
  NXP SGTL5000 DRIVER
 -M:    Fabio Estevam <fabio.estevam@nxp.com>
 +M:    Fabio Estevam <festevam@gmail.com>
  L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
  S:    Maintained
  F:    Documentation/devicetree/bindings/sound/sgtl5000.txt
@@@ -11149,14 -10933,6 +11146,14 @@@ F: lib/objagg.
  F:    lib/test_objagg.c
  F:    include/linux/objagg.h
  
 +NXP FSPI DRIVER
 +R:    Yogesh Gaur <yogeshgaur.83@gmail.com>
 +M:    Ashish Kumar <ashish.kumar@nxp.com>
 +L:    linux-spi@vger.kernel.org
 +S:    Maintained
 +F:    drivers/spi/spi-nxp-fspi.c
 +F:    Documentation/devicetree/bindings/spi/spi-nxp-fspi.txt
 +
  OBJTOOL
  M:    Josh Poimboeuf <jpoimboe@redhat.com>
  M:    Peter Zijlstra <peterz@infradead.org>
@@@ -11458,19 -11234,6 +11455,19 @@@ S: Maintaine
  F:    drivers/media/i2c/ov7740.c
  F:    Documentation/devicetree/bindings/media/i2c/ov7740.txt
  
 +OMNIVISION OV9640 SENSOR DRIVER
 +M:    Petr Cvek <petrcvekcz@gmail.com>
 +L:    linux-media@vger.kernel.org
 +S:    Maintained
 +F:    drivers/media/i2c/ov9640.*
 +
 +OMNIVISION OV8856 SENSOR DRIVER
 +M:    Ben Kao <ben.kao@intel.com>
 +L:    linux-media@vger.kernel.org
 +T:    git git://linuxtv.org/media_tree.git
 +S:    Maintained
 +F:    drivers/media/i2c/ov8856.c
 +
  OMNIVISION OV9650 SENSOR DRIVER
  M:    Sakari Ailus <sakari.ailus@linux.intel.com>
  R:    Akinobu Mita <akinobu.mita@gmail.com>
@@@ -11503,11 -11266,6 +11500,11 @@@ M: Jens Wiklander <jens.wiklander@linar
  S:    Maintained
  F:    drivers/tee/optee/
  
 +OP-TEE RANDOM NUMBER GENERATOR (RNG) DRIVER
 +M:    Sumit Garg <sumit.garg@linaro.org>
 +S:    Maintained
 +F:    drivers/char/hw_random/optee-rng.c
 +
  OPA-VNIC DRIVER
  M:    Dennis Dalessandro <dennis.dalessandro@intel.com>
  M:    Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
@@@ -11635,6 -11393,13 +11632,6 @@@ W:  http://www.nongnu.org/orinoco
  S:    Orphan
  F:    drivers/net/wireless/intersil/orinoco/
  
 -OSD LIBRARY and FILESYSTEM
 -M:    Boaz Harrosh <ooo@electrozaur.com>
 -S:    Maintained
 -F:    drivers/scsi/osd/
 -F:    include/scsi/osd_*
 -F:    fs/exofs/
 -
  OV2659 OMNIVISION SENSOR DRIVER
  M:    "Lad, Prabhakar" <prabhakar.csengg@gmail.com>
  L:    linux-media@vger.kernel.org
@@@ -11720,7 -11485,7 +11717,7 @@@ F:   Documentation/blockdev/paride.tx
  F:    drivers/block/paride/
  
  PARISC ARCHITECTURE
 -M:    "James E.J. Bottomley" <jejb@parisc-linux.org>
 +M:    "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
  M:    Helge Deller <deller@gmx.de>
  L:    linux-parisc@vger.kernel.org
  W:    http://www.parisc-linux.org/
@@@ -11747,11 -11512,6 +11744,11 @@@ F: lib/parman.
  F:    lib/test_parman.c
  F:    include/linux/parman.h
  
 +PC ENGINES APU BOARD DRIVER
 +M:    Enrico Weigelt, metux IT consult <info@metux.net>
 +S:    Maintained
 +F:    drivers/platform/x86/pcengines-apuv2.c
 +
  PC87360 HARDWARE MONITORING DRIVER
  M:    Jim Cromie <jim.cromie@gmail.com>
  L:    linux-hwmon@vger.kernel.org
@@@ -11805,7 -11565,7 +11802,7 @@@ F:   Documentation/devicetree/bindings/pc
  F:    drivers/pci/controller/pcie-altera.c
  
  PCI DRIVER FOR APPLIEDMICRO XGENE
 -M:    Tanmay Inamdar <tinamdar@apm.com>
 +M:    Toan Le <toan@os.amperecomputing.com>
  L:    linux-pci@vger.kernel.org
  L:    linux-arm-kernel@lists.infradead.org
  S:    Maintained
@@@ -11829,7 -11589,7 +11826,7 @@@ F:   Documentation/devicetree/bindings/pc
  F:    drivers/pci/controller/dwc/pcie-armada8k.c
  
  PCI DRIVER FOR CADENCE PCIE IP
 -M:    Alan Douglas <adouglas@cadence.com>
 +M:    Tom Joseph <tjoseph@cadence.com>
  L:    linux-pci@vger.kernel.org
  S:    Maintained
  F:    Documentation/devicetree/bindings/pci/cdns,*.txt
@@@ -11983,7 -11743,7 +11980,7 @@@ F:   Documentation/devicetree/bindings/pc
  F:    drivers/pci/controller/pcie-altera-msi.c
  
  PCI MSI DRIVER FOR APPLIEDMICRO XGENE
 -M:    Duc Dang <dhdang@apm.com>
 +M:    Toan Le <toan@os.amperecomputing.com>
  L:    linux-pci@vger.kernel.org
  L:    linux-arm-kernel@lists.infradead.org
  S:    Maintained
@@@ -12472,6 -12232,14 +12469,6 @@@ S:  Maintaine
  F:    drivers/net/ppp/pptp.c
  W:    http://sourceforge.net/projects/accel-pptp
  
 -PREEMPTIBLE KERNEL
 -M:    Robert Love <rml@tech9.net>
 -L:    kpreempt-tech@lists.sourceforge.net
 -W:    https://www.kernel.org/pub/linux/kernel/people/rml/preempt-kernel
 -S:    Supported
 -F:    Documentation/preempt-locking.txt
 -F:    include/linux/preempt.h
 -
  PRINTK
  M:    Petr Mladek <pmladek@suse.com>
  M:    Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
@@@ -12608,7 -12376,6 +12605,7 @@@ L:   linux-media@vger.kernel.or
  T:    git git://linuxtv.org/media_tree.git
  S:    Odd Fixes
  F:    drivers/media/usb/pwc/*
 +F:    include/trace/events/pwc.h
  
  PWM FAN DRIVER
  M:    Kamil Debski <kamil@wypas.org>
@@@ -12834,11 -12601,11 +12831,11 @@@ F:        Documentation/media/v4l-drivers/qcom
  F:    drivers/media/platform/qcom/camss/
  
  QUALCOMM CPUFREQ DRIVER MSM8996/APQ8096
 -M:  Ilia Lin <ilia.lin@gmail.com>
 -L:  linux-pm@vger.kernel.org
 -S:  Maintained
 -F:  Documentation/devicetree/bindings/opp/kryo-cpufreq.txt
 -F:  drivers/cpufreq/qcom-cpufreq-kryo.c
 +M:    Ilia Lin <ilia.lin@kernel.org>
 +L:    linux-pm@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/opp/kryo-cpufreq.txt
 +F:    drivers/cpufreq/qcom-cpufreq-kryo.c
  
  QUALCOMM EMAC GIGABIT ETHERNET DRIVER
  M:    Timur Tabi <timur@kernel.org>
@@@ -12846,14 -12613,6 +12843,14 @@@ L: netdev@vger.kernel.or
  S:    Maintained
  F:    drivers/net/ethernet/qualcomm/emac/
  
 +QUALCOMM ETHQOS ETHERNET DRIVER
 +M:    Vinod Koul <vkoul@kernel.org>
 +M:    Niklas Cassel <niklas.cassel@linaro.org>
 +L:    netdev@vger.kernel.org
 +S:    Maintained
 +F:    drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
 +F:    Documentation/devicetree/bindings/net/qcom,dwmac.txt
 +
  QUALCOMM GENERIC INTERFACE I2C DRIVER
  M:    Alok Chauhan <alokc@codeaurora.org>
  M:    Karthikeyan Ramasubramanian <kramasub@codeaurora.org>
@@@ -13013,16 -12772,6 +13010,16 @@@ M: Alexandre Bounine <alex.bou9@gmail.c
  S:    Maintained
  F:    drivers/rapidio/
  
 +RAS INFRASTRUCTURE
 +M:    Tony Luck <tony.luck@intel.com>
 +M:    Borislav Petkov <bp@alien8.de>
 +L:    linux-edac@vger.kernel.org
 +S:    Maintained
 +F:    drivers/ras/
 +F:    include/linux/ras.h
 +F:    include/ras/ras_event.h
 +F:    Documentation/admin-guide/ras.rst
 +
  RAYLINK/WEBGEAR 802.11 WIRELESS LAN DRIVER
  L:    linux-wireless@vger.kernel.org
  S:    Orphan
@@@ -13220,7 -12969,6 +13217,7 @@@ F:   drivers/reset
  F:    Documentation/devicetree/bindings/reset/
  F:    include/dt-bindings/reset/
  F:    include/linux/reset.h
 +F:    include/linux/reset/
  F:    include/linux/reset-controller.h
  
  RESTARTABLE SEQUENCES SUPPORT
@@@ -13721,7 -13469,6 +13718,7 @@@ F:   kernel/sched
  F:    include/linux/sched.h
  F:    include/uapi/linux/sched.h
  F:    include/linux/wait.h
 +F:    include/linux/preempt.h
  
  SCR24X CHIP CARD INTERFACE DRIVER
  M:    Lubomir Rintel <lkundrak@v3.sk>
@@@ -13765,7 -13512,6 +13762,7 @@@ M:   "James E.J. Bottomley" <jejb@linux.i
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git
  M:    "Martin K. Petersen" <martin.petersen@oracle.com>
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git
 +Q:    https://patchwork.kernel.org/project/linux-scsi/list/
  L:    linux-scsi@vger.kernel.org
  S:    Maintained
  F:    Documentation/devicetree/bindings/scsi/
@@@ -13780,18 -13526,6 +13777,18 @@@ F: Documentation/scsi/st.tx
  F:    drivers/scsi/st.*
  F:    drivers/scsi/st_*.h
  
 +SCSI TARGET SUBSYSTEM
 +M:    "Martin K. Petersen" <martin.petersen@oracle.com>
 +L:    linux-scsi@vger.kernel.org
 +L:    target-devel@vger.kernel.org
 +W:    http://www.linux-iscsi.org
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git
 +Q:    https://patchwork.kernel.org/project/target-devel/list/
 +S:    Supported
 +F:    drivers/target/
 +F:    include/target/
 +F:    Documentation/target/
 +
  SCTP PROTOCOL
  M:    Vlad Yasevich <vyasevich@gmail.com>
  M:    Neil Horman <nhorman@tuxdriver.com>
@@@ -13863,18 -13597,11 +13860,18 @@@ F:        drivers/mmc/host/sdhci-brcmstb
  SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER
  M:    Adrian Hunter <adrian.hunter@intel.com>
  L:    linux-mmc@vger.kernel.org
 -T:    git git://git.infradead.org/users/ahunter/linux-sdhci.git
  S:    Maintained
  F:    drivers/mmc/host/sdhci*
  F:    include/linux/mmc/sdhci*
  
 +EMMC CMDQ HOST CONTROLLER INTERFACE (CQHCI) DRIVER
 +M:    Adrian Hunter <adrian.hunter@intel.com>
 +M:    Ritesh Harjani <riteshh@codeaurora.org>
 +M:    Asutosh Das <asutoshd@codeaurora.org>
 +L:    linux-mmc@vger.kernel.org
 +S:    Maintained
 +F:    drivers/mmc/host/cqhci*
 +
  SYNOPSYS SDHCI COMPLIANT DWC MSHC DRIVER
  M:    Prabu Thangamuthu <prabu.t@synopsys.com>
  M:    Manjunath M B <manjumb@synopsys.com>
@@@ -14010,7 -13737,6 +14007,7 @@@ F:   drivers/misc/sgi-xp
  
  SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
  M:    Ursula Braun <ubraun@linux.ibm.com>
 +M:    Karsten Graul <kgraul@linux.ibm.com>
  L:    linux-s390@vger.kernel.org
  W:    http://www.ibm.com/developerworks/linux/linux390/
  S:    Supported
@@@ -14602,7 -14328,6 +14599,7 @@@ F:   arch/arm/mach-spear
  
  SPI NOR SUBSYSTEM
  M:    Marek Vasut <marek.vasut@gmail.com>
 +M:    Tudor Ambarus <tudor.ambarus@microchip.com>
  L:    linux-mtd@lists.infradead.org
  W:    http://www.linux-mtd.infradead.org/
  Q:    http://patchwork.ozlabs.org/project/linux-mtd/list/
@@@ -14767,6 -14492,11 +14764,6 @@@ L:  linux-wireless@vger.kernel.or
  S:    Supported
  F:    drivers/staging/wilc1000/
  
 -STAGING - XGI Z7,Z9,Z11 PCI DISPLAY DRIVER
 -M:    Arnaud Patard <arnaud.patard@rtp-net.org>
 -S:    Odd Fixes
 -F:    drivers/staging/xgifb/
 -
  STAGING SUBSYSTEM
  M:    Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
@@@ -14971,7 -14701,7 +14968,7 @@@ S:   Maintaine
  F:    drivers/tty/serial/8250/8250_dw.c
  
  SYNOPSYS DESIGNWARE APB GPIO DRIVER
 -M:    Hoan Tran <hotran@apm.com>
 +M:    Hoan Tran <hoan@os.amperecomputing.com>
  L:    linux-gpio@vger.kernel.org
  S:    Maintained
  F:    drivers/gpio/gpio-dwapb.c
@@@ -15063,6 -14793,18 +15060,6 @@@ F:  Documentation/filesystems/sysv-fs.tx
  F:    fs/sysv/
  F:    include/linux/sysv_fs.h
  
 -TARGET SUBSYSTEM
 -M:    "Nicholas A. Bellinger" <nab@linux-iscsi.org>
 -L:    linux-scsi@vger.kernel.org
 -L:    target-devel@vger.kernel.org
 -W:    http://www.linux-iscsi.org
 -W:    http://groups.google.com/group/linux-iscsi-target-dev
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
 -S:    Supported
 -F:    drivers/target/
 -F:    include/target/
 -F:    Documentation/target/
 -
  TASKSTATS STATISTICS INTERFACE
  M:    Balbir Singh <bsingharora@gmail.com>
  S:    Maintained
@@@ -15349,13 -15091,6 +15346,13 @@@ L: alsa-devel@alsa-project.org (moderat
  S:    Maintained
  F:    sound/soc/ti/
  
 +Texas Instruments' DAC7612 DAC Driver
 +M:    Ricardo Ribalda <ricardo@ribalda.com>
 +L:    linux-iio@vger.kernel.org
 +S:    Supported
 +F:    drivers/iio/dac/ti-dac7612.c
 +F:    Documentation/devicetree/bindings/iio/dac/ti,dac7612.txt
 +
  THANKO'S RAREMONO AM/FM/SW RADIO RECEIVER USB DRIVER
  M:    Hans Verkuil <hverkuil@xs4all.nl>
  L:    linux-media@vger.kernel.org
@@@ -15667,11 -15402,12 +15664,11 @@@ F:        mm/shmem.
  TOMOYO SECURITY MODULE
  M:    Kentaro Takeda <takedakn@nttdata.co.jp>
  M:    Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
 -L:    tomoyo-dev-en@lists.sourceforge.jp (subscribers-only, for developers in English)
 -L:    tomoyo-users-en@lists.sourceforge.jp (subscribers-only, for users in English)
 -L:    tomoyo-dev@lists.sourceforge.jp (subscribers-only, for developers in Japanese)
 -L:    tomoyo-users@lists.sourceforge.jp (subscribers-only, for users in Japanese)
 -W:    http://tomoyo.sourceforge.jp/
 -T:    quilt http://svn.sourceforge.jp/svnroot/tomoyo/trunk/2.5.x/tomoyo-lsm/patches/
 +L:    tomoyo-dev-en@lists.osdn.me (subscribers-only, for developers in English)
 +L:    tomoyo-users-en@lists.osdn.me (subscribers-only, for users in English)
 +L:    tomoyo-dev@lists.osdn.me (subscribers-only, for developers in Japanese)
 +L:    tomoyo-users@lists.osdn.me (subscribers-only, for users in Japanese)
 +W:    https://tomoyo.osdn.jp/
  S:    Maintained
  F:    security/tomoyo/
  
@@@ -15960,16 -15696,14 +15957,16 @@@ F:        drivers/visorbus
  F:    drivers/staging/unisys/
  
  UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER
 -M:    Vinayak Holikatti <vinholikatti@gmail.com>
 +R:    Alim Akhtar <alim.akhtar@samsung.com>
 +R:    Avri Altman <avri.altman@wdc.com>
 +R:    Pedro Sousa <pedrom.sousa@synopsys.com>
  L:    linux-scsi@vger.kernel.org
  S:    Supported
  F:    Documentation/scsi/ufs.txt
  F:    drivers/scsi/ufs/
  
  UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER DWC HOOKS
 -M:    Joao Pinto <jpinto@synopsys.com>
 +M:    Pedro Sousa <pedrom.sousa@synopsys.com>
  L:    linux-scsi@vger.kernel.org
  S:    Supported
  F:    drivers/scsi/ufs/*dwc*
@@@ -16754,12 -16488,6 +16751,12 @@@ L: linux-gpio@vger.kernel.or
  S:    Maintained
  F:    drivers/gpio/gpio-wcove.c
  
 +WHWAVE RTC DRIVER
 +M:    Dianlong Li <long17.cool@163.com>
 +L:    linux-rtc@vger.kernel.org
 +S:    Maintained
 +F:    drivers/rtc/rtc-sd3078.c
 +
  WIIMOTE HID DRIVER
  M:    David Herrmann <dh.herrmann@googlemail.com>
  L:    linux-input@vger.kernel.org
@@@ -16791,11 -16519,6 +16788,11 @@@ M: David Härdeman <david@hardeman.nu
  S:    Maintained
  F:    drivers/media/rc/winbond-cir.c
  
 +RCMM REMOTE CONTROLS DECODER
 +M:    Patrick Lerda <patrick9876@free.fr>
 +S:    Maintained
 +F:    drivers/media/rc/ir-rcmm-decoder.c
 +
  WINSYSTEMS EBC-C384 WATCHDOG DRIVER
  M:    William Breathitt Gray <vilhelm.gray@gmail.com>
  L:    linux-watchdog@vger.kernel.org
@@@ -16988,7 -16711,6 +16985,7 @@@ M:   Jesper Dangaard Brouer <hawk@kernel.
  M:    John Fastabend <john.fastabend@gmail.com>
  L:    netdev@vger.kernel.org
  L:    xdp-newbies@vger.kernel.org
 +L:    bpf@vger.kernel.org
  S:    Supported
  F:    net/core/xdp.c
  F:    include/net/xdp.h
@@@ -17002,7 -16724,6 +16999,7 @@@ XDP SOCKETS (AF_XDP
  M:    Björn Töpel <bjorn.topel@intel.com>
  M:    Magnus Karlsson <magnus.karlsson@intel.com>
  L:    netdev@vger.kernel.org
 +L:    bpf@vger.kernel.org
  S:    Maintained
  F:    kernel/bpf/xskmap.c
  F:    net/xdp/
@@@ -34,7 -34,6 +34,7 @@@
  #define ICC_SRE                               __ACCESS_CP15(c12, 0, c12, 5)
  #define ICC_IGRPEN1                   __ACCESS_CP15(c12, 0, c12, 7)
  #define ICC_BPR1                      __ACCESS_CP15(c12, 0, c12, 3)
 +#define ICC_RPR                               __ACCESS_CP15(c12, 0, c11, 3)
  
  #define __ICC_AP0Rx(x)                        __ACCESS_CP15(c12, 0, c8, 4 | x)
  #define ICC_AP0R0                     __ICC_AP0Rx(0)
@@@ -55,7 -54,7 +55,7 @@@
  #define ICH_VTR                               __ACCESS_CP15(c12, 4, c11, 1)
  #define ICH_MISR                      __ACCESS_CP15(c12, 4, c11, 2)
  #define ICH_EISR                      __ACCESS_CP15(c12, 4, c11, 3)
- #define ICH_ELSR                      __ACCESS_CP15(c12, 4, c11, 5)
+ #define ICH_ELRSR                     __ACCESS_CP15(c12, 4, c11, 5)
  #define ICH_VMCR                      __ACCESS_CP15(c12, 4, c11, 7)
  
  #define __LR0(x)                      __ACCESS_CP15(c12, 4, c12, x)
@@@ -152,7 -151,7 +152,7 @@@ CPUIF_MAP(ICH_HCR, ICH_HCR_EL2
  CPUIF_MAP(ICH_VTR, ICH_VTR_EL2)
  CPUIF_MAP(ICH_MISR, ICH_MISR_EL2)
  CPUIF_MAP(ICH_EISR, ICH_EISR_EL2)
- CPUIF_MAP(ICH_ELSR, ICH_ELSR_EL2)
+ CPUIF_MAP(ICH_ELRSR, ICH_ELRSR_EL2)
  CPUIF_MAP(ICH_VMCR, ICH_VMCR_EL2)
  CPUIF_MAP(ICH_AP0R3, ICH_AP0R3_EL2)
  CPUIF_MAP(ICH_AP0R2, ICH_AP0R2_EL2)
@@@ -246,21 -245,6 +246,21 @@@ static inline void gic_write_bpr1(u32 v
        write_sysreg(val, ICC_BPR1);
  }
  
 +static inline u32 gic_read_pmr(void)
 +{
 +      return read_sysreg(ICC_PMR);
 +}
 +
 +static inline void gic_write_pmr(u32 val)
 +{
 +      write_sysreg(val, ICC_PMR);
 +}
 +
 +static inline u32 gic_read_rpr(void)
 +{
 +      return read_sysreg(ICC_RPR);
 +}
 +
  /*
   * Even in 32bit systems that use LPAE, there is no guarantee that the I/O
   * interface provides true 64bit atomic accesses, so using strd/ldrd doesn't
@@@ -363,22 -347,5 +363,22 @@@ static inline void gits_write_vpendbase
  
  #define gits_read_vpendbaser(c)               __gic_readq_nonatomic(c)
  
 +static inline bool gic_prio_masking_enabled(void)
 +{
 +      return false;
 +}
 +
 +static inline void gic_pmr_mask_irqs(void)
 +{
 +      /* Should not get called. */
 +      WARN_ON_ONCE(true);
 +}
 +
 +static inline void gic_arch_enable_irqs(void)
 +{
 +      /* Should not get called. */
 +      WARN_ON_ONCE(true);
 +}
 +
  #endif /* !__ASSEMBLY__ */
  #endif /* !__ASM_ARCH_GICV3_H */
@@@ -26,6 -26,7 +26,7 @@@
  #include <asm/kvm_asm.h>
  #include <asm/kvm_mmio.h>
  #include <asm/fpstate.h>
+ #include <asm/smp_plat.h>
  #include <kvm/arm_arch_timer.h>
  
  #define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@@ -48,7 -49,6 +49,7 @@@
  #define KVM_REQ_SLEEP \
        KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
  #define KVM_REQ_IRQ_PENDING   KVM_ARCH_REQ(1)
 +#define KVM_REQ_VCPU_RESET    KVM_ARCH_REQ(2)
  
  DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
  
@@@ -57,10 -57,13 +58,13 @@@ int __attribute_const__ kvm_target_cpu(
  int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
  void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
  
- struct kvm_arch {
-       /* VTTBR value associated with below pgd and vmid */
-       u64    vttbr;
+ struct kvm_vmid {
+       /* The VMID generation used for the virt. memory system */
+       u64    vmid_gen;
+       u32    vmid;
+ };
  
+ struct kvm_arch {
        /* The last vcpu id that ran on each physical CPU */
        int __percpu *last_vcpu_ran;
  
         */
  
        /* The VMID generation used for the virt. memory system */
-       u64    vmid_gen;
-       u32    vmid;
+       struct kvm_vmid vmid;
  
        /* Stage-2 page table */
        pgd_t *pgd;
+       phys_addr_t pgd_phys;
  
        /* Interrupt controller */
        struct vgic_dist        vgic;
@@@ -148,13 -151,13 +152,20 @@@ struct kvm_cpu_context 
  
  typedef struct kvm_cpu_context kvm_cpu_context_t;
  
+ static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt,
+                                            int cpu)
+ {
+       /* The host's MPIDR is immutable, so let's set it up at boot time */
+       cpu_ctxt->cp15[c0_MPIDR] = cpu_logical_map(cpu);
+ }
 +struct vcpu_reset_state {
 +      unsigned long   pc;
 +      unsigned long   r0;
 +      bool            be;
 +      bool            reset;
 +};
 +
  struct kvm_vcpu_arch {
        struct kvm_cpu_context ctxt;
  
        /* Cache some mmu pages needed inside spinlock regions */
        struct kvm_mmu_memory_cache mmu_page_cache;
  
 +      struct vcpu_reset_state reset_state;
 +
        /* Detect first run of a vcpu */
        bool has_run_once;
  };
@@@ -224,7 -225,35 +235,35 @@@ unsigned long kvm_arm_num_regs(struct k
  int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
  int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
  int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
- unsigned long kvm_call_hyp(void *hypfn, ...);
+ unsigned long __kvm_call_hyp(void *hypfn, ...);
+ /*
+  * The has_vhe() part doesn't get emitted, but is used for type-checking.
+  */
+ #define kvm_call_hyp(f, ...)                                          \
+       do {                                                            \
+               if (has_vhe()) {                                        \
+                       f(__VA_ARGS__);                                 \
+               } else {                                                \
+                       __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
+               }                                                       \
+       } while(0)
+ #define kvm_call_hyp_ret(f, ...)                                      \
+       ({                                                              \
+               typeof(f(__VA_ARGS__)) ret;                             \
+                                                                       \
+               if (has_vhe()) {                                        \
+                       ret = f(__VA_ARGS__);                           \
+               } else {                                                \
+                       ret = __kvm_call_hyp(kvm_ksym_ref(f),           \
+                                            ##__VA_ARGS__);            \
+               }                                                       \
+                                                                       \
+               ret;                                                    \
+       })
  void force_vm_exit(const cpumask_t *mask);
  int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
                              struct kvm_vcpu_events *events);
@@@ -275,7 -304,7 +314,7 @@@ static inline void __cpu_init_hyp_mode(
         * compliant with the PCS!).
         */
  
-       kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
+       __kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
  }
  
  static inline void __cpu_init_stage2(void)
diff --combined arch/arm/kvm/coproc.c
@@@ -293,15 -293,16 +293,16 @@@ static bool access_cntp_tval(struct kvm
                             const struct coproc_params *p,
                             const struct coproc_reg *r)
  {
-       u64 now = kvm_phys_timer_read();
-       u64 val;
+       u32 val;
  
        if (p->is_write) {
                val = *vcpu_reg(vcpu, p->Rt1);
-               kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, val + now);
+               kvm_arm_timer_write_sysreg(vcpu,
+                                          TIMER_PTIMER, TIMER_REG_TVAL, val);
        } else {
-               val = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
-               *vcpu_reg(vcpu, p->Rt1) = val - now;
+               val = kvm_arm_timer_read_sysreg(vcpu,
+                                               TIMER_PTIMER, TIMER_REG_TVAL);
+               *vcpu_reg(vcpu, p->Rt1) = val;
        }
  
        return true;
@@@ -315,9 -316,11 +316,11 @@@ static bool access_cntp_ctl(struct kvm_
  
        if (p->is_write) {
                val = *vcpu_reg(vcpu, p->Rt1);
-               kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, val);
+               kvm_arm_timer_write_sysreg(vcpu,
+                                          TIMER_PTIMER, TIMER_REG_CTL, val);
        } else {
-               val = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL);
+               val = kvm_arm_timer_read_sysreg(vcpu,
+                                               TIMER_PTIMER, TIMER_REG_CTL);
                *vcpu_reg(vcpu, p->Rt1) = val;
        }
  
@@@ -333,9 -336,11 +336,11 @@@ static bool access_cntp_cval(struct kvm
        if (p->is_write) {
                val = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
                val |= *vcpu_reg(vcpu, p->Rt1);
-               kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, val);
+               kvm_arm_timer_write_sysreg(vcpu,
+                                          TIMER_PTIMER, TIMER_REG_CVAL, val);
        } else {
-               val = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
+               val = kvm_arm_timer_read_sysreg(vcpu,
+                                               TIMER_PTIMER, TIMER_REG_CVAL);
                *vcpu_reg(vcpu, p->Rt1) = val;
                *vcpu_reg(vcpu, p->Rt2) = val >> 32;
        }
@@@ -1450,6 -1455,6 +1455,6 @@@ void kvm_reset_coprocs(struct kvm_vcpu 
        reset_coproc_regs(vcpu, table, num);
  
        for (num = 1; num < NR_CP15_REGS; num++)
 -              if (vcpu_cp15(vcpu, num) == 0x42424242)
 -                      panic("Didn't reset vcpu_cp15(vcpu, %zi)", num);
 +              WARN(vcpu_cp15(vcpu, num) == 0x42424242,
 +                   "Didn't reset vcpu_cp15(vcpu, %zi)", num);
  }
  
  #include <linux/types.h>
  #include <linux/kvm_types.h>
 +#include <asm/arch_gicv3.h>
  #include <asm/cpufeature.h>
  #include <asm/daifflags.h>
  #include <asm/fpsimd.h>
  #include <asm/kvm.h>
  #include <asm/kvm_asm.h>
  #include <asm/kvm_mmio.h>
+ #include <asm/smp_plat.h>
  #include <asm/thread_info.h>
  
  #define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@@ -49,7 -49,6 +50,7 @@@
  #define KVM_REQ_SLEEP \
        KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
  #define KVM_REQ_IRQ_PENDING   KVM_ARCH_REQ(1)
 +#define KVM_REQ_VCPU_RESET    KVM_ARCH_REQ(2)
  
  DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
  
@@@ -58,16 -57,19 +59,19 @@@ int kvm_reset_vcpu(struct kvm_vcpu *vcp
  int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext);
  void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
  
- struct kvm_arch {
+ struct kvm_vmid {
        /* The VMID generation used for the virt. memory system */
        u64    vmid_gen;
        u32    vmid;
+ };
+ struct kvm_arch {
+       struct kvm_vmid vmid;
  
        /* stage2 entry level table */
        pgd_t *pgd;
+       phys_addr_t pgd_phys;
  
-       /* VTTBR value associated with above pgd and vmid */
-       u64    vttbr;
        /* VTCR_EL2 value for this VM */
        u64    vtcr;
  
@@@ -210,13 -212,6 +214,13 @@@ struct kvm_cpu_context 
  
  typedef struct kvm_cpu_context kvm_cpu_context_t;
  
 +struct vcpu_reset_state {
 +      unsigned long   pc;
 +      unsigned long   r0;
 +      bool            be;
 +      bool            reset;
 +};
 +
  struct kvm_vcpu_arch {
        struct kvm_cpu_context ctxt;
  
        /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
        u64 vsesr_el2;
  
 +      /* Additional reset state */
 +      struct vcpu_reset_state reset_state;
 +
        /* True when deferrable sysregs are loaded on the physical CPU,
         * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
        bool sysregs_loaded_on_cpu;
@@@ -382,7 -374,36 +386,36 @@@ void kvm_arm_halt_guest(struct kvm *kvm
  void kvm_arm_resume_guest(struct kvm *kvm);
  
  u64 __kvm_call_hyp(void *hypfn, ...);
- #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
+ /*
+  * The couple of isb() below are there to guarantee the same behaviour
+  * on VHE as on !VHE, where the eret to EL1 acts as a context
+  * synchronization event.
+  */
+ #define kvm_call_hyp(f, ...)                                          \
+       do {                                                            \
+               if (has_vhe()) {                                        \
+                       f(__VA_ARGS__);                                 \
+                       isb();                                          \
+               } else {                                                \
+                       __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
+               }                                                       \
+       } while(0)
+ #define kvm_call_hyp_ret(f, ...)                                      \
+       ({                                                              \
+               typeof(f(__VA_ARGS__)) ret;                             \
+                                                                       \
+               if (has_vhe()) {                                        \
+                       ret = f(__VA_ARGS__);                           \
+                       isb();                                          \
+               } else {                                                \
+                       ret = __kvm_call_hyp(kvm_ksym_ref(f),           \
+                                            ##__VA_ARGS__);            \
+               }                                                       \
+                                                                       \
+               ret;                                                    \
+       })
  
  void force_vm_exit(const cpumask_t *mask);
  void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
@@@ -401,6 -422,13 +434,13 @@@ struct kvm_vcpu *kvm_mpidr_to_vcpu(stru
  
  DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
  
+ static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt,
+                                            int cpu)
+ {
+       /* The host's MPIDR is immutable, so let's set it up at boot time */
+       cpu_ctxt->sys_regs[MPIDR_EL1] = cpu_logical_map(cpu);
+ }
  void __kvm_enable_ssbs(void);
  
  static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
@@@ -486,25 -514,10 +526,25 @@@ static inline int kvm_arch_vcpu_run_pid
  static inline void kvm_arm_vhe_guest_enter(void)
  {
        local_daif_mask();
 +
 +      /*
 +       * Having IRQs masked via PMR when entering the guest means the GIC
 +       * will not signal the CPU of interrupts of lower priority, and the
 +       * only way to get out will be via guest exceptions.
 +       * Naturally, we want to avoid this.
 +       */
 +      if (system_uses_irq_prio_masking()) {
 +              gic_write_pmr(GIC_PRIO_IRQON);
 +              dsb(sy);
 +      }
  }
  
  static inline void kvm_arm_vhe_guest_exit(void)
  {
 +      /*
 +       * local_daif_restore() takes care to properly restore PSTATE.DAIF
 +       * and the GIC PMR if the host is using IRQ priorities.
 +       */
        local_daif_restore(DAIF_PROCCTX_NOIRQ);
  
        /*
@@@ -18,7 -18,6 +18,7 @@@
  #include <linux/compiler.h>
  #include <linux/kvm_host.h>
  
 +#include <asm/kprobes.h>
  #include <asm/kvm_asm.h>
  #include <asm/kvm_emulate.h>
  #include <asm/kvm_hyp.h>
@@@ -53,7 -52,6 +53,6 @@@ static void __hyp_text __sysreg_save_us
  
  static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
  {
-       ctxt->sys_regs[MPIDR_EL1]       = read_sysreg(vmpidr_el2);
        ctxt->sys_regs[CSSELR_EL1]      = read_sysreg(csselr_el1);
        ctxt->sys_regs[SCTLR_EL1]       = read_sysreg_el1(sctlr);
        ctxt->sys_regs[ACTLR_EL1]       = read_sysreg(actlr_el1);
@@@ -99,14 -97,12 +98,14 @@@ void sysreg_save_host_state_vhe(struct 
  {
        __sysreg_save_common_state(ctxt);
  }
 +NOKPROBE_SYMBOL(sysreg_save_host_state_vhe);
  
  void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
  {
        __sysreg_save_common_state(ctxt);
        __sysreg_save_el2_return_state(ctxt);
  }
 +NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
  
  static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
  {
@@@ -191,14 -187,12 +190,14 @@@ void sysreg_restore_host_state_vhe(stru
  {
        __sysreg_restore_common_state(ctxt);
  }
 +NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe);
  
  void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
  {
        __sysreg_restore_common_state(ctxt);
        __sysreg_restore_el2_return_state(ctxt);
  }
 +NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
  
  void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
  {
@@@ -314,29 -314,12 +314,29 @@@ static bool trap_raz_wi(struct kvm_vcp
                return read_zero(vcpu, p);
  }
  
 -static bool trap_undef(struct kvm_vcpu *vcpu,
 -                     struct sys_reg_params *p,
 -                     const struct sys_reg_desc *r)
 +/*
 + * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
 + * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
 + * system, these registers should UNDEF. LORID_EL1 being a RO register, we
 + * treat it separately.
 + */
 +static bool trap_loregion(struct kvm_vcpu *vcpu,
 +                        struct sys_reg_params *p,
 +                        const struct sys_reg_desc *r)
  {
 -      kvm_inject_undefined(vcpu);
 -      return false;
 +      u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
 +      u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
 +                       (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
 +
 +      if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
 +              kvm_inject_undefined(vcpu);
 +              return false;
 +      }
 +
 +      if (p->is_write && sr == SYS_LORID_EL1)
 +              return write_to_read_only(vcpu, p, r);
 +
 +      return trap_raz_wi(vcpu, p, r);
  }
  
  static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
@@@ -982,6 -965,10 +982,10 @@@ static bool access_pmuserenr(struct kvm
        return true;
  }
  
+ #define reg_to_encoding(x)                                            \
+       sys_reg((u32)(x)->Op0, (u32)(x)->Op1,                           \
+               (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2);
  /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
  #define DBG_BCR_BVR_WCR_WVR_EL1(n)                                    \
        { SYS_DESC(SYS_DBGBVRn_EL1(n)),                                 \
        { SYS_DESC(SYS_PMEVTYPERn_EL0(n)),                                      \
          access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
  
- static bool access_cntp_tval(struct kvm_vcpu *vcpu,
-               struct sys_reg_params *p,
-               const struct sys_reg_desc *r)
+ static bool access_arch_timer(struct kvm_vcpu *vcpu,
+                             struct sys_reg_params *p,
+                             const struct sys_reg_desc *r)
  {
-       u64 now = kvm_phys_timer_read();
-       u64 cval;
+       enum kvm_arch_timers tmr;
+       enum kvm_arch_timer_regs treg;
+       u64 reg = reg_to_encoding(r);
  
-       if (p->is_write) {
-               kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL,
-                                     p->regval + now);
-       } else {
-               cval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
-               p->regval = cval - now;
+       switch (reg) {
+       case SYS_CNTP_TVAL_EL0:
+       case SYS_AARCH32_CNTP_TVAL:
+               tmr = TIMER_PTIMER;
+               treg = TIMER_REG_TVAL;
+               break;
+       case SYS_CNTP_CTL_EL0:
+       case SYS_AARCH32_CNTP_CTL:
+               tmr = TIMER_PTIMER;
+               treg = TIMER_REG_CTL;
+               break;
+       case SYS_CNTP_CVAL_EL0:
+       case SYS_AARCH32_CNTP_CVAL:
+               tmr = TIMER_PTIMER;
+               treg = TIMER_REG_CVAL;
+               break;
+       default:
+               BUG();
        }
  
-       return true;
- }
- static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
-               struct sys_reg_params *p,
-               const struct sys_reg_desc *r)
- {
-       if (p->is_write)
-               kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, p->regval);
-       else
-               p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL);
-       return true;
- }
- static bool access_cntp_cval(struct kvm_vcpu *vcpu,
-               struct sys_reg_params *p,
-               const struct sys_reg_desc *r)
- {
        if (p->is_write)
-               kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, p->regval);
+               kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
        else
-               p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
+               p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
  
        return true;
  }
@@@ -1065,6 -1046,11 +1063,6 @@@ static u64 read_id_reg(struct sys_reg_d
                if (val & ptrauth_mask)
                        kvm_debug("ptrauth unsupported for guests, suppressing\n");
                val &= ~ptrauth_mask;
 -      } else if (id == SYS_ID_AA64MMFR1_EL1) {
 -              if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))
 -                      kvm_debug("LORegions unsupported for guests, suppressing\n");
 -
 -              val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT);
        }
  
        return val;
@@@ -1160,6 -1146,64 +1158,64 @@@ static int set_raz_id_reg(struct kvm_vc
        return __set_id_reg(rd, uaddr, true);
  }
  
+ static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+                      const struct sys_reg_desc *r)
+ {
+       if (p->is_write)
+               return write_to_read_only(vcpu, p, r);
+       p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
+       return true;
+ }
+ static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+                        const struct sys_reg_desc *r)
+ {
+       if (p->is_write)
+               return write_to_read_only(vcpu, p, r);
+       p->regval = read_sysreg(clidr_el1);
+       return true;
+ }
+ static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+                         const struct sys_reg_desc *r)
+ {
+       if (p->is_write)
+               vcpu_write_sys_reg(vcpu, p->regval, r->reg);
+       else
+               p->regval = vcpu_read_sys_reg(vcpu, r->reg);
+       return true;
+ }
+ static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+                         const struct sys_reg_desc *r)
+ {
+       u32 csselr;
+       if (p->is_write)
+               return write_to_read_only(vcpu, p, r);
+       csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
+       p->regval = get_ccsidr(csselr);
+       /*
+        * Guests should not be doing cache operations by set/way at all, and
+        * for this reason, we trap them and attempt to infer the intent, so
+        * that we can flush the entire guest's address space at the appropriate
+        * time.
+        * To prevent this trapping from causing performance problems, let's
+        * expose the geometry of all data and unified caches (which are
+        * guaranteed to be PIPT and thus non-aliasing) as 1 set and 1 way.
+        * [If guests should attempt to infer aliasing properties from the
+        * geometry (which is not permitted by the architecture), they would
+        * only do so for virtually indexed caches.]
+        */
+       if (!(csselr & 1)) // data or unified cache
+               p->regval &= ~GENMASK(27, 3);
+       return true;
+ }
  /* sys_reg_desc initialiser for known cpufeature ID registers */
  #define ID_SANITISED(name) {                  \
        SYS_DESC(SYS_##name),                   \
@@@ -1350,11 -1394,11 +1406,11 @@@ static const struct sys_reg_desc sys_re
        { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
        { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
  
 -      { SYS_DESC(SYS_LORSA_EL1), trap_undef },
 -      { SYS_DESC(SYS_LOREA_EL1), trap_undef },
 -      { SYS_DESC(SYS_LORN_EL1), trap_undef },
 -      { SYS_DESC(SYS_LORC_EL1), trap_undef },
 -      { SYS_DESC(SYS_LORID_EL1), trap_undef },
 +      { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
 +      { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
 +      { SYS_DESC(SYS_LORN_EL1), trap_loregion },
 +      { SYS_DESC(SYS_LORC_EL1), trap_loregion },
 +      { SYS_DESC(SYS_LORID_EL1), trap_loregion },
  
        { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
        { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
  
        { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
  
-       { SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
+       { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
+       { SYS_DESC(SYS_CLIDR_EL1), access_clidr },
+       { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
+       { SYS_DESC(SYS_CTR_EL0), access_ctr },
  
        { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
        { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
        { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
        { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
  
-       { SYS_DESC(SYS_CNTP_TVAL_EL0), access_cntp_tval },
-       { SYS_DESC(SYS_CNTP_CTL_EL0), access_cntp_ctl },
-       { SYS_DESC(SYS_CNTP_CVAL_EL0), access_cntp_cval },
+       { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
+       { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
+       { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
  
        /* PMEVCNTRn_EL0 */
        PMU_PMEVCNTR_EL0(0),
  
        { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
        { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
-       { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 },
+       { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
  };
  
  static bool trap_dbgidr(struct kvm_vcpu *vcpu,
@@@ -1677,6 -1724,7 +1736,7 @@@ static const struct sys_reg_desc cp14_6
   * register).
   */
  static const struct sys_reg_desc cp15_regs[] = {
+       { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
        { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
        { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
        { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
  
        { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
  
-       /* CNTP_TVAL */
-       { Op1( 0), CRn(14), CRm( 2), Op2( 0), access_cntp_tval },
-       /* CNTP_CTL */
-       { Op1( 0), CRn(14), CRm( 2), Op2( 1), access_cntp_ctl },
+       /* Arch Tmers */
+       { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
+       { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
  
        /* PMEVCNTRn */
        PMU_PMEVCNTR(0),
        PMU_PMEVTYPER(30),
        /* PMCCFILTR */
        { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
+       { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
+       { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
+       { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, c0_CSSELR },
  };
  
  static const struct sys_reg_desc cp15_64_regs[] = {
        { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
        { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
        { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
-       { Op1( 2), CRn( 0), CRm(14), Op2( 0), access_cntp_cval },
+       { SYS_DESC(SYS_AARCH32_CNTP_CVAL),    access_arch_timer },
  };
  
  /* Target specific emulation tables */
@@@ -1832,30 -1883,19 +1895,19 @@@ static const struct sys_reg_desc *get_t
        }
  }
  
- #define reg_to_match_value(x)                                         \
-       ({                                                              \
-               unsigned long val;                                      \
-               val  = (x)->Op0 << 14;                                  \
-               val |= (x)->Op1 << 11;                                  \
-               val |= (x)->CRn << 7;                                   \
-               val |= (x)->CRm << 3;                                   \
-               val |= (x)->Op2;                                        \
-               val;                                                    \
-        })
  static int match_sys_reg(const void *key, const void *elt)
  {
        const unsigned long pval = (unsigned long)key;
        const struct sys_reg_desc *r = elt;
  
-       return pval - reg_to_match_value(r);
+       return pval - reg_to_encoding(r);
  }
  
  static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
                                         const struct sys_reg_desc table[],
                                         unsigned int num)
  {
-       unsigned long pval = reg_to_match_value(params);
+       unsigned long pval = reg_to_encoding(params);
  
        return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
  }
@@@ -2218,11 -2258,15 +2270,15 @@@ static const struct sys_reg_desc *index
        }
  
  FUNCTION_INVARIANT(midr_el1)
- FUNCTION_INVARIANT(ctr_el0)
  FUNCTION_INVARIANT(revidr_el1)
  FUNCTION_INVARIANT(clidr_el1)
  FUNCTION_INVARIANT(aidr_el1)
  
+ static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
+ {
+       ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
+ }
  /* ->val is filled in by kvm_sys_reg_table_init() */
  static struct sys_reg_desc invariant_sys_regs[] = {
        { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
@@@ -2608,9 -2652,7 +2664,9 @@@ void kvm_reset_sys_regs(struct kvm_vcp
        table = get_target_table(vcpu->arch.target, true, &num);
        reset_sys_reg_descs(vcpu, table, num);
  
 -      for (num = 1; num < NR_SYS_REGS; num++)
 -              if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
 -                      panic("Didn't reset __vcpu_sys_reg(%zi)", num);
 +      for (num = 1; num < NR_SYS_REGS; num++) {
 +              if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242,
 +                       "Didn't reset __vcpu_sys_reg(%zi)\n", num))
 +                      break;
 +      }
  }
@@@ -35,6 -35,7 +35,7 @@@
  #include <asm/msr-index.h>
  #include <asm/asm.h>
  #include <asm/kvm_page_track.h>
+ #include <asm/kvm_vcpu_regs.h>
  #include <asm/hyperv-tlfs.h>
  
  #define KVM_MAX_VCPUS 288
@@@ -137,23 -138,23 +138,23 @@@ static inline gfn_t gfn_to_index(gfn_t 
  #define ASYNC_PF_PER_VCPU 64
  
  enum kvm_reg {
-       VCPU_REGS_RAX = 0,
-       VCPU_REGS_RCX = 1,
-       VCPU_REGS_RDX = 2,
-       VCPU_REGS_RBX = 3,
-       VCPU_REGS_RSP = 4,
-       VCPU_REGS_RBP = 5,
-       VCPU_REGS_RSI = 6,
-       VCPU_REGS_RDI = 7,
+       VCPU_REGS_RAX = __VCPU_REGS_RAX,
+       VCPU_REGS_RCX = __VCPU_REGS_RCX,
+       VCPU_REGS_RDX = __VCPU_REGS_RDX,
+       VCPU_REGS_RBX = __VCPU_REGS_RBX,
+       VCPU_REGS_RSP = __VCPU_REGS_RSP,
+       VCPU_REGS_RBP = __VCPU_REGS_RBP,
+       VCPU_REGS_RSI = __VCPU_REGS_RSI,
+       VCPU_REGS_RDI = __VCPU_REGS_RDI,
  #ifdef CONFIG_X86_64
-       VCPU_REGS_R8 8,
-       VCPU_REGS_R9 9,
-       VCPU_REGS_R10 = 10,
-       VCPU_REGS_R11 = 11,
-       VCPU_REGS_R12 = 12,
-       VCPU_REGS_R13 = 13,
-       VCPU_REGS_R14 = 14,
-       VCPU_REGS_R15 = 15,
+       VCPU_REGS_R8  = __VCPU_REGS_R8,
+       VCPU_REGS_R9  = __VCPU_REGS_R9,
+       VCPU_REGS_R10 = __VCPU_REGS_R10,
+       VCPU_REGS_R11 = __VCPU_REGS_R11,
+       VCPU_REGS_R12 = __VCPU_REGS_R12,
+       VCPU_REGS_R13 = __VCPU_REGS_R13,
+       VCPU_REGS_R14 = __VCPU_REGS_R14,
+       VCPU_REGS_R15 = __VCPU_REGS_R15,
  #endif
        VCPU_REGS_RIP,
        NR_VCPU_REGS
@@@ -299,7 -300,6 +300,7 @@@ union kvm_mmu_extended_role 
                unsigned int cr4_smap:1;
                unsigned int cr4_smep:1;
                unsigned int cr4_la57:1;
 +              unsigned int maxphyaddr:6;
        };
  };
  
@@@ -319,6 -319,7 +320,7 @@@ struct kvm_mmu_page 
        struct list_head link;
        struct hlist_node hash_link;
        bool unsync;
+       bool mmio_cached;
  
        /*
         * The following two entries are used to key the shadow page in the
        int root_count;          /* Currently serving as active root */
        unsigned int unsync_children;
        struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
-       /* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen.  */
-       unsigned long mmu_valid_gen;
        DECLARE_BITMAP(unsync_child_bitmap, 512);
  
  #ifdef CONFIG_X86_32
@@@ -398,7 -395,6 +396,7 @@@ struct kvm_mmu 
        void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
                           u64 *spte, const void *pte);
        hpa_t root_hpa;
 +      gpa_t root_cr3;
        union kvm_mmu_role mmu_role;
        u8 root_level;
        u8 shadow_root_level;
@@@ -848,13 -844,11 +846,11 @@@ struct kvm_arch 
        unsigned int n_requested_mmu_pages;
        unsigned int n_max_mmu_pages;
        unsigned int indirect_shadow_pages;
-       unsigned long mmu_valid_gen;
        struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
        /*
         * Hash table of struct kvm_mmu_page.
         */
        struct list_head active_mmu_pages;
-       struct list_head zapped_obsolete_pages;
        struct kvm_page_track_notifier_node mmu_sp_tracker;
        struct kvm_page_track_notifier_head track_notifier_head;
  
@@@ -1255,7 -1249,7 +1251,7 @@@ void kvm_mmu_clear_dirty_pt_masked(stru
                                   struct kvm_memory_slot *slot,
                                   gfn_t gfn_offset, unsigned long mask);
  void kvm_mmu_zap_all(struct kvm *kvm);
- void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
+ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
  unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
  void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
  
diff --combined arch/x86/kvm/cpuid.c
@@@ -335,7 -335,6 +335,7 @@@ static inline int __do_cpuid_ent(struc
        unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
        unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
        unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0;
 +      unsigned f_la57 = 0;
  
        /* cpuid 1.edx */
        const u32 kvm_cpuid_1_edx_x86_features =
                F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ |
                F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
                F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
-               F(CLDEMOTE);
+               F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B);
  
        /* cpuid 7.0.edx*/
        const u32 kvm_cpuid_7_0_edx_x86_features =
                        // TSC_ADJUST is emulated
                        entry->ebx |= F(TSC_ADJUST);
                        entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
 +                      f_la57 = entry->ecx & F(LA57);
                        cpuid_mask(&entry->ecx, CPUID_7_ECX);
 +                      /* Set LA57 based on hardware capability. */
 +                      entry->ecx |= f_la57;
                        entry->ecx |= f_umip;
                        /* PKU is not yet implemented for shadow paging. */
                        if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
diff --combined arch/x86/kvm/mmu.c
@@@ -109,9 -109,11 +109,11 @@@ module_param(dbg, bool, 0644)
        (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
  
  
- #define PT64_BASE_ADDR_MASK __sme_clr((((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)))
- #define PT64_DIR_BASE_ADDR_MASK \
-       (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
+ #ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
+ #define PT64_BASE_ADDR_MASK (physical_mask & ~(u64)(PAGE_SIZE-1))
+ #else
+ #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
+ #endif
  #define PT64_LVL_ADDR_MASK(level) \
        (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
                                                * PT64_LEVEL_BITS))) - 1))
@@@ -330,53 -332,56 +332,56 @@@ static inline bool is_access_track_spte
  }
  
  /*
-  * the low bit of the generation number is always presumed to be zero.
-  * This disables mmio caching during memslot updates.  The concept is
-  * similar to a seqcount but instead of retrying the access we just punt
-  * and ignore the cache.
+  * Due to limited space in PTEs, the MMIO generation is a 19 bit subset of
+  * the memslots generation and is derived as follows:
   *
-  * spte bits 3-11 are used as bits 1-9 of the generation number,
-  * the bits 52-61 are used as bits 10-19 of the generation number.
+  * Bits 0-8 of the MMIO generation are propagated to spte bits 3-11
+  * Bits 9-18 of the MMIO generation are propagated to spte bits 52-61
+  *
+  * The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in
+  * the MMIO generation number, as doing so would require stealing a bit from
+  * the "real" generation number and thus effectively halve the maximum number
+  * of MMIO generations that can be handled before encountering a wrap (which
+  * requires a full MMU zap).  The flag is instead explicitly queried when
+  * checking for MMIO spte cache hits.
   */
- #define MMIO_SPTE_GEN_LOW_SHIFT               2
- #define MMIO_SPTE_GEN_HIGH_SHIFT      52
+ #define MMIO_SPTE_GEN_MASK            GENMASK_ULL(18, 0)
  
- #define MMIO_GEN_SHIFT                        20
- #define MMIO_GEN_LOW_SHIFT            10
- #define MMIO_GEN_LOW_MASK             ((1 << MMIO_GEN_LOW_SHIFT) - 2)
#define MMIO_GEN_MASK                 ((1 << MMIO_GEN_SHIFT) - 1)
+ #define MMIO_SPTE_GEN_LOW_START               3
+ #define MMIO_SPTE_GEN_LOW_END         11
+ #define MMIO_SPTE_GEN_LOW_MASK                GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \
                                                  MMIO_SPTE_GEN_LOW_START)
  
- static u64 generation_mmio_spte_mask(unsigned int gen)
+ #define MMIO_SPTE_GEN_HIGH_START      52
+ #define MMIO_SPTE_GEN_HIGH_END                61
+ #define MMIO_SPTE_GEN_HIGH_MASK               GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \
+                                                   MMIO_SPTE_GEN_HIGH_START)
+ static u64 generation_mmio_spte_mask(u64 gen)
  {
        u64 mask;
  
-       WARN_ON(gen & ~MMIO_GEN_MASK);
+       WARN_ON(gen & ~MMIO_SPTE_GEN_MASK);
  
-       mask = (gen & MMIO_GEN_LOW_MASK) << MMIO_SPTE_GEN_LOW_SHIFT;
-       mask |= ((u64)gen >> MMIO_GEN_LOW_SHIFT) << MMIO_SPTE_GEN_HIGH_SHIFT;
+       mask = (gen << MMIO_SPTE_GEN_LOW_START) & MMIO_SPTE_GEN_LOW_MASK;
+       mask |= (gen << MMIO_SPTE_GEN_HIGH_START) & MMIO_SPTE_GEN_HIGH_MASK;
        return mask;
  }
  
- static unsigned int get_mmio_spte_generation(u64 spte)
+ static u64 get_mmio_spte_generation(u64 spte)
  {
-       unsigned int gen;
+       u64 gen;
  
        spte &= ~shadow_mmio_mask;
  
-       gen = (spte >> MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_GEN_LOW_MASK;
-       gen |= (spte >> MMIO_SPTE_GEN_HIGH_SHIFT) << MMIO_GEN_LOW_SHIFT;
+       gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_START;
+       gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_START;
        return gen;
  }
  
- static unsigned int kvm_current_mmio_generation(struct kvm_vcpu *vcpu)
- {
-       return kvm_vcpu_memslots(vcpu)->generation & MMIO_GEN_MASK;
- }
  static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
                           unsigned access)
  {
-       unsigned int gen = kvm_current_mmio_generation(vcpu);
+       u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK;
        u64 mask = generation_mmio_spte_mask(gen);
        u64 gpa = gfn << PAGE_SHIFT;
  
        mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
                << shadow_nonpresent_or_rsvd_mask_len;
  
+       page_header(__pa(sptep))->mmio_cached = true;
        trace_mark_mmio_spte(sptep, gfn, access, gen);
        mmu_spte_set(sptep, mask);
  }
@@@ -407,7 -414,7 +414,7 @@@ static gfn_t get_mmio_spte_gfn(u64 spte
  
  static unsigned get_mmio_spte_access(u64 spte)
  {
-       u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
+       u64 mask = generation_mmio_spte_mask(MMIO_SPTE_GEN_MASK) | shadow_mmio_mask;
        return (spte & ~mask) & ~PAGE_MASK;
  }
  
@@@ -424,9 -431,13 +431,13 @@@ static bool set_mmio_spte(struct kvm_vc
  
  static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
  {
-       unsigned int kvm_gen, spte_gen;
+       u64 kvm_gen, spte_gen, gen;
  
-       kvm_gen = kvm_current_mmio_generation(vcpu);
+       gen = kvm_vcpu_memslots(vcpu)->generation;
+       if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
+               return false;
+       kvm_gen = gen & MMIO_SPTE_GEN_MASK;
        spte_gen = get_mmio_spte_generation(spte);
  
        trace_check_mmio_spte(spte, kvm_gen, spte_gen);
@@@ -959,7 -970,7 +970,7 @@@ static int mmu_topup_memory_cache(struc
        if (cache->nobjs >= min)
                return 0;
        while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
-               obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
+               obj = kmem_cache_zalloc(base_cache, GFP_KERNEL_ACCOUNT);
                if (!obj)
                        return cache->nobjs >= min ? 0 : -ENOMEM;
                cache->objects[cache->nobjs++] = obj;
@@@ -2049,12 -2060,6 +2060,6 @@@ static struct kvm_mmu_page *kvm_mmu_all
        if (!direct)
                sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
        set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
-       /*
-        * The active_mmu_pages list is the FIFO list, do not move the
-        * page until it is zapped. kvm_zap_obsolete_pages depends on
-        * this feature. See the comments in kvm_zap_obsolete_pages().
-        */
        list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
        kvm_mod_used_mmu_pages(vcpu->kvm, +1);
        return sp;
@@@ -2195,23 -2200,15 +2200,15 @@@ static void kvm_unlink_unsync_page(stru
        --kvm->stat.mmu_unsync;
  }
  
- static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
-                                   struct list_head *invalid_list);
+ static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+                                    struct list_head *invalid_list);
  static void kvm_mmu_commit_zap_page(struct kvm *kvm,
                                    struct list_head *invalid_list);
  
- /*
-  * NOTE: we should pay more attention on the zapped-obsolete page
-  * (is_obsolete_sp(sp) && sp->role.invalid) when you do hash list walk
-  * since it has been deleted from active_mmu_pages but still can be found
-  * at hast list.
-  *
-  * for_each_valid_sp() has skipped that kind of pages.
-  */
  #define for_each_valid_sp(_kvm, _sp, _gfn)                            \
        hlist_for_each_entry(_sp,                                       \
          &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
-               if (is_obsolete_sp((_kvm), (_sp)) || (_sp)->role.invalid) {    \
+               if ((_sp)->role.invalid) {    \
                } else
  
  #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)                       \
@@@ -2231,18 -2228,28 +2228,28 @@@ static bool __kvm_sync_page(struct kvm_
        return true;
  }
  
+ static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
+                                       struct list_head *invalid_list,
+                                       bool remote_flush)
+ {
+       if (!remote_flush && !list_empty(invalid_list))
+               return false;
+       if (!list_empty(invalid_list))
+               kvm_mmu_commit_zap_page(kvm, invalid_list);
+       else
+               kvm_flush_remote_tlbs(kvm);
+       return true;
+ }
  static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
                                 struct list_head *invalid_list,
                                 bool remote_flush, bool local_flush)
  {
-       if (!list_empty(invalid_list)) {
-               kvm_mmu_commit_zap_page(vcpu->kvm, invalid_list);
+       if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush))
                return;
-       }
  
-       if (remote_flush)
-               kvm_flush_remote_tlbs(vcpu->kvm);
-       else if (local_flush)
+       if (local_flush)
                kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
  }
  
@@@ -2253,11 -2260,6 +2260,6 @@@ static void kvm_mmu_audit(struct kvm_vc
  static void mmu_audit_disable(void) { }
  #endif
  
- static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
- {
-       return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
- }
  static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
                         struct list_head *invalid_list)
  {
@@@ -2482,7 -2484,6 +2484,6 @@@ static struct kvm_mmu_page *kvm_mmu_get
                if (level > PT_PAGE_TABLE_LEVEL && need_sync)
                        flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
        }
-       sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
        clear_page(sp->spt);
        trace_kvm_mmu_get_page(sp, true);
  
@@@ -2668,17 -2669,22 +2669,22 @@@ static int mmu_zap_unsync_children(stru
        return zapped;
  }
  
- static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
-                                   struct list_head *invalid_list)
+ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
+                                      struct kvm_mmu_page *sp,
+                                      struct list_head *invalid_list,
+                                      int *nr_zapped)
  {
-       int ret;
+       bool list_unstable;
  
        trace_kvm_mmu_prepare_zap_page(sp);
        ++kvm->stat.mmu_shadow_zapped;
-       ret = mmu_zap_unsync_children(kvm, sp, invalid_list);
+       *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
        kvm_mmu_page_unlink_children(kvm, sp);
        kvm_mmu_unlink_parents(kvm, sp);
  
+       /* Zapping children means active_mmu_pages has become unstable. */
+       list_unstable = *nr_zapped;
        if (!sp->role.invalid && !sp->role.direct)
                unaccount_shadowed(kvm, sp);
  
                kvm_unlink_unsync_page(kvm, sp);
        if (!sp->root_count) {
                /* Count self */
-               ret++;
+               (*nr_zapped)++;
                list_move(&sp->link, invalid_list);
                kvm_mod_used_mmu_pages(kvm, -1);
        } else {
                list_move(&sp->link, &kvm->arch.active_mmu_pages);
  
-               /*
-                * The obsolete pages can not be used on any vcpus.
-                * See the comments in kvm_mmu_invalidate_zap_all_pages().
-                */
-               if (!sp->role.invalid && !is_obsolete_sp(kvm, sp))
+               if (!sp->role.invalid)
                        kvm_reload_remote_mmus(kvm);
        }
  
        sp->role.invalid = 1;
-       return ret;
+       return list_unstable;
+ }
+ static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+                                    struct list_head *invalid_list)
+ {
+       int nr_zapped;
+       __kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
+       return nr_zapped;
  }
  
  static void kvm_mmu_commit_zap_page(struct kvm *kvm,
@@@ -3555,7 -3566,6 +3566,7 @@@ void kvm_mmu_free_roots(struct kvm_vcp
                                                           &invalid_list);
                        mmu->root_hpa = INVALID_PAGE;
                }
 +              mmu->root_cr3 = 0;
        }
  
        kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
@@@ -3611,7 -3621,6 +3622,7 @@@ static int mmu_alloc_direct_roots(struc
                vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
        } else
                BUG();
 +      vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
  
        return 0;
  }
@@@ -3620,11 -3629,10 +3631,11 @@@ static int mmu_alloc_shadow_roots(struc
  {
        struct kvm_mmu_page *sp;
        u64 pdptr, pm_mask;
 -      gfn_t root_gfn;
 +      gfn_t root_gfn, root_cr3;
        int i;
  
 -      root_gfn = vcpu->arch.mmu->get_cr3(vcpu) >> PAGE_SHIFT;
 +      root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
 +      root_gfn = root_cr3 >> PAGE_SHIFT;
  
        if (mmu_check_root(vcpu, root_gfn))
                return 1;
                ++sp->root_count;
                spin_unlock(&vcpu->kvm->mmu_lock);
                vcpu->arch.mmu->root_hpa = root;
 -              return 0;
 +              goto set_root_cr3;
        }
  
        /*
  
                        u64 *lm_root;
  
-                       lm_root = (void*)get_zeroed_page(GFP_KERNEL);
+                       lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
                        if (lm_root == NULL)
                                return 1;
  
                vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
        }
  
 +set_root_cr3:
 +      vcpu->arch.mmu->root_cr3 = root_cr3;
 +
        return 0;
  }
  
@@@ -4169,7 -4174,7 +4180,7 @@@ static bool cached_root_available(struc
        struct kvm_mmu_root_info root;
        struct kvm_mmu *mmu = vcpu->arch.mmu;
  
 -      root.cr3 = mmu->get_cr3(vcpu);
 +      root.cr3 = mmu->root_cr3;
        root.hpa = mmu->root_hpa;
  
        for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
        }
  
        mmu->root_hpa = root.hpa;
 +      mmu->root_cr3 = root.cr3;
  
        return i < KVM_MMU_NUM_PREV_ROOTS;
  }
@@@ -4204,14 -4208,6 +4215,6 @@@ static bool fast_cr3_switch(struct kvm_
                        return false;
  
                if (cached_root_available(vcpu, new_cr3, new_role)) {
-                       /*
-                        * It is possible that the cached previous root page is
-                        * obsolete because of a change in the MMU
-                        * generation number. However, that is accompanied by
-                        * KVM_REQ_MMU_RELOAD, which will free the root that we
-                        * have set here and allocate a new one.
-                        */
                        kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
                        if (!skip_tlb_flush) {
                                kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
@@@ -4777,7 -4773,6 +4780,7 @@@ static union kvm_mmu_extended_role kvm_
        ext.cr4_pse = !!is_pse(vcpu);
        ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
        ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
 +      ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
  
        ext.valid = 1;
  
@@@ -5486,6 -5481,76 +5489,76 @@@ void kvm_disable_tdp(void
  }
  EXPORT_SYMBOL_GPL(kvm_disable_tdp);
  
+ /* The return value indicates if tlb flush on all vcpus is needed. */
+ typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
+ /* The caller should hold mmu-lock before calling this function. */
+ static __always_inline bool
+ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
+                       slot_level_handler fn, int start_level, int end_level,
+                       gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
+ {
+       struct slot_rmap_walk_iterator iterator;
+       bool flush = false;
+       for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
+                       end_gfn, &iterator) {
+               if (iterator.rmap)
+                       flush |= fn(kvm, iterator.rmap);
+               if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+                       if (flush && lock_flush_tlb) {
+                               kvm_flush_remote_tlbs(kvm);
+                               flush = false;
+                       }
+                       cond_resched_lock(&kvm->mmu_lock);
+               }
+       }
+       if (flush && lock_flush_tlb) {
+               kvm_flush_remote_tlbs(kvm);
+               flush = false;
+       }
+       return flush;
+ }
+ static __always_inline bool
+ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
+                 slot_level_handler fn, int start_level, int end_level,
+                 bool lock_flush_tlb)
+ {
+       return slot_handle_level_range(kvm, memslot, fn, start_level,
+                       end_level, memslot->base_gfn,
+                       memslot->base_gfn + memslot->npages - 1,
+                       lock_flush_tlb);
+ }
+ static __always_inline bool
+ slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
+                     slot_level_handler fn, bool lock_flush_tlb)
+ {
+       return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
+                                PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
+ }
+ static __always_inline bool
+ slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
+                       slot_level_handler fn, bool lock_flush_tlb)
+ {
+       return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1,
+                                PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
+ }
+ static __always_inline bool
+ slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
+                slot_level_handler fn, bool lock_flush_tlb)
+ {
+       return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
+                                PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
+ }
  static void free_mmu_pages(struct kvm_vcpu *vcpu)
  {
        free_page((unsigned long)vcpu->arch.mmu->pae_root);
@@@ -5505,7 -5570,7 +5578,7 @@@ static int alloc_mmu_pages(struct kvm_v
         * Therefore we need to allocate shadow page tables in the first
         * 4GB of memory, which happens to fit the DMA32 zone.
         */
-       page = alloc_page(GFP_KERNEL | __GFP_DMA32);
+       page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
        if (!page)
                return -ENOMEM;
  
@@@ -5524,13 -5589,11 +5597,13 @@@ int kvm_mmu_create(struct kvm_vcpu *vcp
        vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
  
        vcpu->arch.root_mmu.root_hpa = INVALID_PAGE;
 +      vcpu->arch.root_mmu.root_cr3 = 0;
        vcpu->arch.root_mmu.translate_gpa = translate_gpa;
        for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
                vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
  
        vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE;
 +      vcpu->arch.guest_mmu.root_cr3 = 0;
        vcpu->arch.guest_mmu.translate_gpa = translate_gpa;
        for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
                vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
@@@ -5543,105 -5606,62 +5616,62 @@@ static void kvm_mmu_invalidate_zap_page
                        struct kvm_memory_slot *slot,
                        struct kvm_page_track_notifier_node *node)
  {
-       kvm_mmu_invalidate_zap_all_pages(kvm);
- }
- void kvm_mmu_init_vm(struct kvm *kvm)
- {
-       struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
-       node->track_write = kvm_mmu_pte_write;
-       node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
-       kvm_page_track_register_notifier(kvm, node);
- }
+       struct kvm_mmu_page *sp;
+       LIST_HEAD(invalid_list);
+       unsigned long i;
+       bool flush;
+       gfn_t gfn;
  
- void kvm_mmu_uninit_vm(struct kvm *kvm)
- {
-       struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
+       spin_lock(&kvm->mmu_lock);
  
-       kvm_page_track_unregister_notifier(kvm, node);
- }
+       if (list_empty(&kvm->arch.active_mmu_pages))
+               goto out_unlock;
  
- /* The return value indicates if tlb flush on all vcpus is needed. */
- typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
+       flush = slot_handle_all_level(kvm, slot, kvm_zap_rmapp, false);
  
- /* The caller should hold mmu-lock before calling this function. */
- static __always_inline bool
- slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
-                       slot_level_handler fn, int start_level, int end_level,
-                       gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
- {
-       struct slot_rmap_walk_iterator iterator;
-       bool flush = false;
+       for (i = 0; i < slot->npages; i++) {
+               gfn = slot->base_gfn + i;
  
-       for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
-                       end_gfn, &iterator) {
-               if (iterator.rmap)
-                       flush |= fn(kvm, iterator.rmap);
+               for_each_valid_sp(kvm, sp, gfn) {
+                       if (sp->gfn != gfn)
+                               continue;
  
+                       kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
+               }
                if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
-                       if (flush && lock_flush_tlb) {
-                               kvm_flush_remote_tlbs(kvm);
-                               flush = false;
-                       }
+                       kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
+                       flush = false;
                        cond_resched_lock(&kvm->mmu_lock);
                }
        }
+       kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
  
-       if (flush && lock_flush_tlb) {
-               kvm_flush_remote_tlbs(kvm);
-               flush = false;
-       }
-       return flush;
+ out_unlock:
+       spin_unlock(&kvm->mmu_lock);
  }
  
- static __always_inline bool
- slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
-                 slot_level_handler fn, int start_level, int end_level,
-                 bool lock_flush_tlb)
+ void kvm_mmu_init_vm(struct kvm *kvm)
  {
-       return slot_handle_level_range(kvm, memslot, fn, start_level,
-                       end_level, memslot->base_gfn,
-                       memslot->base_gfn + memslot->npages - 1,
-                       lock_flush_tlb);
- }
+       struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
  
- static __always_inline bool
- slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
-                     slot_level_handler fn, bool lock_flush_tlb)
- {
-       return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
-                                PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
+       node->track_write = kvm_mmu_pte_write;
+       node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
+       kvm_page_track_register_notifier(kvm, node);
  }
  
- static __always_inline bool
- slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
-                       slot_level_handler fn, bool lock_flush_tlb)
+ void kvm_mmu_uninit_vm(struct kvm *kvm)
  {
-       return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1,
-                                PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
- }
+       struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
  
- static __always_inline bool
- slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
-                slot_level_handler fn, bool lock_flush_tlb)
- {
-       return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
-                                PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
+       kvm_page_track_unregister_notifier(kvm, node);
  }
  
  void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
  {
        struct kvm_memslots *slots;
        struct kvm_memory_slot *memslot;
-       bool flush_tlb = true;
-       bool flush = false;
        int i;
  
-       if (kvm_available_flush_tlb_with_range())
-               flush_tlb = false;
        spin_lock(&kvm->mmu_lock);
        for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
                slots = __kvm_memslots(kvm, i);
                        if (start >= end)
                                continue;
  
-                       flush |= slot_handle_level_range(kvm, memslot,
-                                       kvm_zap_rmapp, PT_PAGE_TABLE_LEVEL,
-                                       PT_MAX_HUGEPAGE_LEVEL, start,
-                                       end - 1, flush_tlb);
+                       slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
+                                               PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
+                                               start, end - 1, true);
                }
        }
  
-       if (flush)
-               kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
-                               gfn_end - gfn_start + 1);
        spin_unlock(&kvm->mmu_lock);
  }
  
@@@ -5815,101 -5830,58 +5840,58 @@@ void kvm_mmu_slot_set_dirty(struct kvm 
  }
  EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
  
- #define BATCH_ZAP_PAGES       10
- static void kvm_zap_obsolete_pages(struct kvm *kvm)
+ static void __kvm_mmu_zap_all(struct kvm *kvm, bool mmio_only)
  {
        struct kvm_mmu_page *sp, *node;
-       int batch = 0;
+       LIST_HEAD(invalid_list);
+       int ign;
  
+       spin_lock(&kvm->mmu_lock);
  restart:
-       list_for_each_entry_safe_reverse(sp, node,
-             &kvm->arch.active_mmu_pages, link) {
-               int ret;
-               /*
-                * No obsolete page exists before new created page since
-                * active_mmu_pages is the FIFO list.
-                */
-               if (!is_obsolete_sp(kvm, sp))
-                       break;
-               /*
-                * Since we are reversely walking the list and the invalid
-                * list will be moved to the head, skip the invalid page
-                * can help us to avoid the infinity list walking.
-                */
-               if (sp->role.invalid)
+       list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
+               if (mmio_only && !sp->mmio_cached)
                        continue;
-               /*
-                * Need not flush tlb since we only zap the sp with invalid
-                * generation number.
-                */
-               if (batch >= BATCH_ZAP_PAGES &&
-                     cond_resched_lock(&kvm->mmu_lock)) {
-                       batch = 0;
+               if (sp->role.invalid && sp->root_count)
+                       continue;
+               if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) {
+                       WARN_ON_ONCE(mmio_only);
                        goto restart;
                }
-               ret = kvm_mmu_prepare_zap_page(kvm, sp,
-                               &kvm->arch.zapped_obsolete_pages);
-               batch += ret;
-               if (ret)
+               if (cond_resched_lock(&kvm->mmu_lock))
                        goto restart;
        }
  
-       /*
-        * Should flush tlb before free page tables since lockless-walking
-        * may use the pages.
-        */
-       kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
- }
- /*
-  * Fast invalidate all shadow pages and use lock-break technique
-  * to zap obsolete pages.
-  *
-  * It's required when memslot is being deleted or VM is being
-  * destroyed, in these cases, we should ensure that KVM MMU does
-  * not use any resource of the being-deleted slot or all slots
-  * after calling the function.
-  */
- void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm)
- {
-       spin_lock(&kvm->mmu_lock);
-       trace_kvm_mmu_invalidate_zap_all_pages(kvm);
-       kvm->arch.mmu_valid_gen++;
-       /*
-        * Notify all vcpus to reload its shadow page table
-        * and flush TLB. Then all vcpus will switch to new
-        * shadow page table with the new mmu_valid_gen.
-        *
-        * Note: we should do this under the protection of
-        * mmu-lock, otherwise, vcpu would purge shadow page
-        * but miss tlb flush.
-        */
-       kvm_reload_remote_mmus(kvm);
-       kvm_zap_obsolete_pages(kvm);
+       kvm_mmu_commit_zap_page(kvm, &invalid_list);
        spin_unlock(&kvm->mmu_lock);
  }
  
static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
void kvm_mmu_zap_all(struct kvm *kvm)
  {
-       return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
+       return __kvm_mmu_zap_all(kvm, false);
  }
  
- void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots)
+ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
  {
+       WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
+       gen &= MMIO_SPTE_GEN_MASK;
        /*
-        * The very rare case: if the generation-number is round,
+        * Generation numbers are incremented in multiples of the number of
+        * address spaces in order to provide unique generations across all
+        * address spaces.  Strip what is effectively the address space
+        * modifier prior to checking for a wrap of the MMIO generation so
+        * that a wrap in any address space is detected.
+        */
+       gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
+       /*
+        * The very rare case: if the MMIO generation number has wrapped,
         * zap all shadow pages.
         */
-       if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) {
+       if (unlikely(gen == 0)) {
                kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
-               kvm_mmu_invalidate_zap_all_pages(kvm);
+               __kvm_mmu_zap_all(kvm, true);
        }
  }
  
@@@ -5940,24 -5912,16 +5922,16 @@@ mmu_shrink_scan(struct shrinker *shrink
                 * want to shrink a VM that only started to populate its MMU
                 * anyway.
                 */
-               if (!kvm->arch.n_used_mmu_pages &&
-                     !kvm_has_zapped_obsolete_pages(kvm))
+               if (!kvm->arch.n_used_mmu_pages)
                        continue;
  
                idx = srcu_read_lock(&kvm->srcu);
                spin_lock(&kvm->mmu_lock);
  
-               if (kvm_has_zapped_obsolete_pages(kvm)) {
-                       kvm_mmu_commit_zap_page(kvm,
-                             &kvm->arch.zapped_obsolete_pages);
-                       goto unlock;
-               }
                if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
                        freed++;
                kvm_mmu_commit_zap_page(kvm, &invalid_list);
  
- unlock:
                spin_unlock(&kvm->mmu_lock);
                srcu_read_unlock(&kvm->srcu, idx);
  
@@@ -211,7 -211,6 +211,6 @@@ static void free_nested(struct kvm_vcp
        if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
                return;
  
-       hrtimer_cancel(&vmx->nested.preemption_timer);
        vmx->nested.vmxon = false;
        vmx->nested.smm.vmxon = false;
        free_vpid(vmx->nested.vpid02);
@@@ -274,6 -273,7 +273,7 @@@ static void vmx_switch_vmcs(struct kvm_
  void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
  {
        vcpu_load(vcpu);
+       vmx_leave_nested(vcpu);
        vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01);
        free_nested(vcpu);
        vcpu_put(vcpu);
@@@ -1980,17 -1980,6 +1980,6 @@@ static void prepare_vmcs02_early(struc
                prepare_vmcs02_early_full(vmx, vmcs12);
  
        /*
-        * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
-        * entry, but only if the current (host) sp changed from the value
-        * we wrote last (vmx->host_rsp).  This cache is no longer relevant
-        * if we switch vmcs, and rather than hold a separate cache per vmcs,
-        * here we just force the write to happen on entry.  host_rsp will
-        * also be written unconditionally by nested_vmx_check_vmentry_hw()
-        * if we are doing early consistency checks via hardware.
-        */
-       vmx->host_rsp = 0;
-       /*
         * PIN CONTROLS
         */
        exec_control = vmcs12->pin_based_vm_exec_control;
@@@ -2289,10 -2278,6 +2278,6 @@@ static int prepare_vmcs02(struct kvm_vc
        }
        vmx_set_rflags(vcpu, vmcs12->guest_rflags);
  
-       vmx->nested.preemption_timer_expired = false;
-       if (nested_cpu_has_preemption_timer(vmcs12))
-               vmx_start_preemption_timer(vcpu);
        /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
         * bitwise-or of what L1 wants to trap for L2, and what we want to
         * trap. Note that CR0.TS also needs updating - we do this later.
@@@ -2473,10 -2458,6 +2458,10 @@@ static int nested_check_vm_execution_co
            (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
                return -EINVAL;
  
 +      if (!nested_cpu_has_preemption_timer(vmcs12) &&
 +          nested_cpu_has_save_preemption_timer(vmcs12))
 +              return -EINVAL;
 +
        if (nested_cpu_has_ept(vmcs12) &&
            !valid_ept_address(vcpu, vmcs12->ept_pointer))
                return -EINVAL;
@@@ -2722,6 -2703,7 +2707,7 @@@ static int nested_vmx_check_vmentry_hw(
  {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long cr3, cr4;
+       bool vm_fail;
  
        if (!nested_early_check)
                return 0;
                vmx->loaded_vmcs->host_state.cr4 = cr4;
        }
  
-       vmx->__launched = vmx->loaded_vmcs->launched;
        asm(
-               /* Set HOST_RSP */
                "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
-               __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
-               "mov %%" _ASM_SP ", %c[host_rsp](%1)\n\t"
+               "cmp %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
+               "je 1f \n\t"
+               __ex("vmwrite %%" _ASM_SP ", %[HOST_RSP]") "\n\t"
+               "mov %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
+               "1: \n\t"
                "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
  
                /* Check if vmlaunch or vmresume is needed */
-               "cmpl $0, %c[launched](%% " _ASM_CX")\n\t"
+               "cmpb $0, %c[launched](%[loaded_vmcs])\n\t"
  
+               /*
+                * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set
+                * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail
+                * Valid.  vmx_vmenter() directly "returns" RFLAGS, and so the
+                * results of VM-Enter is captured via CC_{SET,OUT} to vm_fail.
+                */
                "call vmx_vmenter\n\t"
  
-               /* Set vmx->fail accordingly */
-               "setbe %c[fail](%% " _ASM_CX")\n\t"
-             : ASM_CALL_CONSTRAINT
-             : "c"(vmx), "d"((unsigned long)HOST_RSP),
-               [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
-               [fail]"i"(offsetof(struct vcpu_vmx, fail)),
-               [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
+               CC_SET(be)
+             : ASM_CALL_CONSTRAINT, CC_OUT(be) (vm_fail)
+             : [HOST_RSP]"r"((unsigned long)HOST_RSP),
+               [loaded_vmcs]"r"(vmx->loaded_vmcs),
+               [launched]"i"(offsetof(struct loaded_vmcs, launched)),
+               [host_state_rsp]"i"(offsetof(struct loaded_vmcs, host_state.rsp)),
                [wordsize]"i"(sizeof(ulong))
-             : "rax", "cc", "memory"
+             : "cc", "memory"
        );
  
        preempt_enable();
        if (vmx->msr_autoload.guest.nr)
                vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
  
-       if (vmx->fail) {
+       if (vm_fail) {
                WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
                             VMXERR_ENTRY_INVALID_CONTROL_FIELD);
-               vmx->fail = 0;
                return 1;
        }
  
  
        return 0;
  }
- STACK_FRAME_NON_STANDARD(nested_vmx_check_vmentry_hw);
  
  static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
                                                 struct vmcs12 *vmcs12);
@@@ -3031,6 -3015,15 +3019,15 @@@ int nested_vmx_enter_non_root_mode(stru
                kvm_make_request(KVM_REQ_EVENT, vcpu);
  
        /*
+        * Do not start the preemption timer hrtimer until after we know
+        * we are successful, so that only nested_vmx_vmexit needs to cancel
+        * the timer.
+        */
+       vmx->nested.preemption_timer_expired = false;
+       if (nested_cpu_has_preemption_timer(vmcs12))
+               vmx_start_preemption_timer(vcpu);
+       /*
         * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
         * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
         * returned as far as L1 is concerned. It will only return (and set
@@@ -3450,13 -3443,10 +3447,10 @@@ static void sync_vmcs12(struct kvm_vcp
        else
                vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
  
-       if (nested_cpu_has_preemption_timer(vmcs12)) {
-               if (vmcs12->vm_exit_controls &
-                   VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
+       if (nested_cpu_has_preemption_timer(vmcs12) &&
+           vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
                        vmcs12->vmx_preemption_timer_value =
                                vmx_get_preemption_timer_value(vcpu);
-               hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
-       }
  
        /*
         * In some cases (usually, nested EPT), L2 is allowed to change its
@@@ -3864,6 -3854,9 +3858,9 @@@ void nested_vmx_vmexit(struct kvm_vcpu 
  
        leave_guest_mode(vcpu);
  
+       if (nested_cpu_has_preemption_timer(vmcs12))
+               hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
        if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
                vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
  
                vmx_flush_tlb(vcpu, true);
        }
  
-       /* This is needed for same reason as it was needed in prepare_vmcs02 */
-       vmx->host_rsp = 0;
        /* Unpin physical memory we referred to in vmcs02 */
        if (vmx->nested.apic_access_page) {
                kvm_release_page_dirty(vmx->nested.apic_access_page);
@@@ -4035,25 -4025,50 +4029,50 @@@ int get_vmx_mem_address(struct kvm_vcp
        /* Addr = segment_base + offset */
        /* offset = base + [index * scale] + displacement */
        off = exit_qualification; /* holds the displacement */
+       if (addr_size == 1)
+               off = (gva_t)sign_extend64(off, 31);
+       else if (addr_size == 0)
+               off = (gva_t)sign_extend64(off, 15);
        if (base_is_valid)
                off += kvm_register_read(vcpu, base_reg);
        if (index_is_valid)
                off += kvm_register_read(vcpu, index_reg)<<scaling;
        vmx_get_segment(vcpu, &s, seg_reg);
-       *ret = s.base + off;
  
+       /*
+        * The effective address, i.e. @off, of a memory operand is truncated
+        * based on the address size of the instruction.  Note that this is
+        * the *effective address*, i.e. the address prior to accounting for
+        * the segment's base.
+        */
        if (addr_size == 1) /* 32 bit */
-               *ret &= 0xffffffff;
+               off &= 0xffffffff;
+       else if (addr_size == 0) /* 16 bit */
+               off &= 0xffff;
  
        /* Checks for #GP/#SS exceptions. */
        exn = false;
        if (is_long_mode(vcpu)) {
+               /*
+                * The virtual/linear address is never truncated in 64-bit
+                * mode, e.g. a 32-bit address size can yield a 64-bit virtual
+                * address when using FS/GS with a non-zero base.
+                */
+               *ret = s.base + off;
                /* Long mode: #GP(0)/#SS(0) if the memory address is in a
                 * non-canonical form. This is the only check on the memory
                 * destination for long mode!
                 */
                exn = is_noncanonical_address(*ret, vcpu);
-       } else if (is_protmode(vcpu)) {
+       } else {
+               /*
+                * When not in long mode, the virtual/linear address is
+                * unconditionally truncated to 32 bits regardless of the
+                * address size.
+                */
+               *ret = (s.base + off) & 0xffffffff;
                /* Protected mode: apply checks for segment validity in the
                 * following order:
                 * - segment type check (#GP(0) may be thrown)
                /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
                 */
                exn = (s.unusable != 0);
-               /* Protected mode: #GP(0)/#SS(0) if the memory
-                * operand is outside the segment limit.
+               /*
+                * Protected mode: #GP(0)/#SS(0) if the memory operand is
+                * outside the segment limit.  All CPUs that support VMX ignore
+                * limit checks for flat segments, i.e. segments with base==0,
+                * limit==0xffffffff and of type expand-up data or code.
                 */
-               exn = exn || (off + sizeof(u64) > s.limit);
+               if (!(s.base == 0 && s.limit == 0xffffffff &&
+                    ((s.type & 8) || !(s.type & 4))))
+                       exn = exn || (off + sizeof(u64) > s.limit);
        }
        if (exn) {
                kvm_queue_exception_e(vcpu,
@@@ -4145,11 -4166,11 +4170,11 @@@ static int enter_vmx_operation(struct k
        if (r < 0)
                goto out_vmcs02;
  
-       vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
+       vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
        if (!vmx->nested.cached_vmcs12)
                goto out_cached_vmcs12;
  
-       vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
+       vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
        if (!vmx->nested.cached_shadow_vmcs12)
                goto out_cached_shadow_vmcs12;
  
@@@ -5561,11 -5582,9 +5586,11 @@@ void nested_vmx_setup_ctls_msrs(struct 
         * secondary cpu-based controls.  Do not include those that
         * depend on CPUID bits, they are added later by vmx_cpuid_update.
         */
 -      rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
 -              msrs->secondary_ctls_low,
 -              msrs->secondary_ctls_high);
 +      if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
 +              rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
 +                    msrs->secondary_ctls_low,
 +                    msrs->secondary_ctls_high);
 +
        msrs->secondary_ctls_low = 0;
        msrs->secondary_ctls_high &=
                SECONDARY_EXEC_DESC |
@@@ -5696,6 -5715,10 +5721,10 @@@ __init int nested_vmx_hardware_setup(in
                enable_shadow_vmcs = 0;
        if (enable_shadow_vmcs) {
                for (i = 0; i < VMX_BITMAP_NR; i++) {
+                       /*
+                        * The vmx_bitmap is not tied to a VM and so should
+                        * not be charged to a memcg.
+                        */
                        vmx_bitmap[i] = (unsigned long *)
                                __get_free_page(GFP_KERNEL);
                        if (!vmx_bitmap[i]) {
diff --combined arch/x86/kvm/vmx/vmx.c
@@@ -246,6 -246,10 +246,10 @@@ static int vmx_setup_l1d_flush(enum vmx
  
        if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
            !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+               /*
+                * This allocation for vmx_l1d_flush_pages is not tied to a VM
+                * lifetime and so should not be charged to a memcg.
+                */
                page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
                if (!page)
                        return -ENOMEM;
@@@ -863,8 -867,7 +867,8 @@@ static void add_atomic_switch_msr(struc
        if (!entry_only)
                j = find_msr(&m->host, msr);
  
 -      if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
 +      if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) ||
 +              (j < 0 &&  m->host.nr == NR_AUTOLOAD_MSRS)) {
                printk_once(KERN_WARNING "Not enough msr switch entries. "
                                "Can't add msr %x\n", msr);
                return;
@@@ -2387,13 -2390,13 +2391,13 @@@ static __init int setup_vmcs_config(str
        return 0;
  }
  
- struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu)
+ struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
  {
        int node = cpu_to_node(cpu);
        struct page *pages;
        struct vmcs *vmcs;
  
-       pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
+       pages = __alloc_pages_node(node, flags, vmcs_config.order);
        if (!pages)
                return NULL;
        vmcs = page_address(pages);
@@@ -2440,7 -2443,8 +2444,8 @@@ int alloc_loaded_vmcs(struct loaded_vmc
        loaded_vmcs_init(loaded_vmcs);
  
        if (cpu_has_vmx_msr_bitmap()) {
-               loaded_vmcs->msr_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
+               loaded_vmcs->msr_bitmap = (unsigned long *)
+                               __get_free_page(GFP_KERNEL_ACCOUNT);
                if (!loaded_vmcs->msr_bitmap)
                        goto out_vmcs;
                memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
@@@ -2481,7 -2485,7 +2486,7 @@@ static __init int alloc_kvm_area(void
        for_each_possible_cpu(cpu) {
                struct vmcs *vmcs;
  
-               vmcs = alloc_vmcs_cpu(false, cpu);
+               vmcs = alloc_vmcs_cpu(false, cpu, GFP_KERNEL);
                if (!vmcs) {
                        free_kvm_area();
                        return -ENOMEM;
@@@ -6360,150 -6364,15 +6365,15 @@@ static void vmx_update_hv_timer(struct 
        vmx->loaded_vmcs->hv_timer_armed = false;
  }
  
static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
  {
-       unsigned long evmcs_rsp;
-       vmx->__launched = vmx->loaded_vmcs->launched;
-       evmcs_rsp = static_branch_unlikely(&enable_evmcs) ?
-               (unsigned long)&current_evmcs->host_rsp : 0;
-       if (static_branch_unlikely(&vmx_l1d_should_flush))
-               vmx_l1d_flush(vcpu);
-       asm(
-               /* Store host registers */
-               "push %%" _ASM_DX "; push %%" _ASM_BP ";"
-               "push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */
-               "push %%" _ASM_CX " \n\t"
-               "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
-               "cmp %%" _ASM_SP ", %c[host_rsp](%%" _ASM_CX ") \n\t"
-               "je 1f \n\t"
-               "mov %%" _ASM_SP ", %c[host_rsp](%%" _ASM_CX ") \n\t"
-               /* Avoid VMWRITE when Enlightened VMCS is in use */
-               "test %%" _ASM_SI ", %%" _ASM_SI " \n\t"
-               "jz 2f \n\t"
-               "mov %%" _ASM_SP ", (%%" _ASM_SI ") \n\t"
-               "jmp 1f \n\t"
-               "2: \n\t"
-               __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
-               "1: \n\t"
-               "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
-               /* Reload cr2 if changed */
-               "mov %c[cr2](%%" _ASM_CX "), %%" _ASM_AX " \n\t"
-               "mov %%cr2, %%" _ASM_DX " \n\t"
-               "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
-               "je 3f \n\t"
-               "mov %%" _ASM_AX", %%cr2 \n\t"
-               "3: \n\t"
-               /* Check if vmlaunch or vmresume is needed */
-               "cmpl $0, %c[launched](%%" _ASM_CX ") \n\t"
-               /* Load guest registers.  Don't clobber flags. */
-               "mov %c[rax](%%" _ASM_CX "), %%" _ASM_AX " \n\t"
-               "mov %c[rbx](%%" _ASM_CX "), %%" _ASM_BX " \n\t"
-               "mov %c[rdx](%%" _ASM_CX "), %%" _ASM_DX " \n\t"
-               "mov %c[rsi](%%" _ASM_CX "), %%" _ASM_SI " \n\t"
-               "mov %c[rdi](%%" _ASM_CX "), %%" _ASM_DI " \n\t"
-               "mov %c[rbp](%%" _ASM_CX "), %%" _ASM_BP " \n\t"
- #ifdef CONFIG_X86_64
-               "mov %c[r8](%%" _ASM_CX "),  %%r8  \n\t"
-               "mov %c[r9](%%" _ASM_CX "),  %%r9  \n\t"
-               "mov %c[r10](%%" _ASM_CX "), %%r10 \n\t"
-               "mov %c[r11](%%" _ASM_CX "), %%r11 \n\t"
-               "mov %c[r12](%%" _ASM_CX "), %%r12 \n\t"
-               "mov %c[r13](%%" _ASM_CX "), %%r13 \n\t"
-               "mov %c[r14](%%" _ASM_CX "), %%r14 \n\t"
-               "mov %c[r15](%%" _ASM_CX "), %%r15 \n\t"
- #endif
-               /* Load guest RCX.  This kills the vmx_vcpu pointer! */
-               "mov %c[rcx](%%" _ASM_CX "), %%" _ASM_CX " \n\t"
-               /* Enter guest mode */
-               "call vmx_vmenter\n\t"
-               /* Save guest's RCX to the stack placeholder (see above) */
-               "mov %%" _ASM_CX ", %c[wordsize](%%" _ASM_SP ") \n\t"
-               /* Load host's RCX, i.e. the vmx_vcpu pointer */
-               "pop %%" _ASM_CX " \n\t"
-               /* Set vmx->fail based on EFLAGS.{CF,ZF} */
-               "setbe %c[fail](%%" _ASM_CX ")\n\t"
-               /* Save all guest registers, including RCX from the stack */
-               "mov %%" _ASM_AX ", %c[rax](%%" _ASM_CX ") \n\t"
-               "mov %%" _ASM_BX ", %c[rbx](%%" _ASM_CX ") \n\t"
-               __ASM_SIZE(pop) " %c[rcx](%%" _ASM_CX ") \n\t"
-               "mov %%" _ASM_DX ", %c[rdx](%%" _ASM_CX ") \n\t"
-               "mov %%" _ASM_SI ", %c[rsi](%%" _ASM_CX ") \n\t"
-               "mov %%" _ASM_DI ", %c[rdi](%%" _ASM_CX ") \n\t"
-               "mov %%" _ASM_BP ", %c[rbp](%%" _ASM_CX ") \n\t"
- #ifdef CONFIG_X86_64
-               "mov %%r8,  %c[r8](%%" _ASM_CX ") \n\t"
-               "mov %%r9,  %c[r9](%%" _ASM_CX ") \n\t"
-               "mov %%r10, %c[r10](%%" _ASM_CX ") \n\t"
-               "mov %%r11, %c[r11](%%" _ASM_CX ") \n\t"
-               "mov %%r12, %c[r12](%%" _ASM_CX ") \n\t"
-               "mov %%r13, %c[r13](%%" _ASM_CX ") \n\t"
-               "mov %%r14, %c[r14](%%" _ASM_CX ") \n\t"
-               "mov %%r15, %c[r15](%%" _ASM_CX ") \n\t"
-               /*
-               * Clear host registers marked as clobbered to prevent
-               * speculative use.
-               */
-               "xor %%r8d,  %%r8d \n\t"
-               "xor %%r9d,  %%r9d \n\t"
-               "xor %%r10d, %%r10d \n\t"
-               "xor %%r11d, %%r11d \n\t"
-               "xor %%r12d, %%r12d \n\t"
-               "xor %%r13d, %%r13d \n\t"
-               "xor %%r14d, %%r14d \n\t"
-               "xor %%r15d, %%r15d \n\t"
- #endif
-               "mov %%cr2, %%" _ASM_AX "   \n\t"
-               "mov %%" _ASM_AX ", %c[cr2](%%" _ASM_CX ") \n\t"
-               "xor %%eax, %%eax \n\t"
-               "xor %%ebx, %%ebx \n\t"
-               "xor %%esi, %%esi \n\t"
-               "xor %%edi, %%edi \n\t"
-               "pop  %%" _ASM_BP "; pop  %%" _ASM_DX " \n\t"
-             : ASM_CALL_CONSTRAINT
-             : "c"(vmx), "d"((unsigned long)HOST_RSP), "S"(evmcs_rsp),
-               [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
-               [fail]"i"(offsetof(struct vcpu_vmx, fail)),
-               [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
-               [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
-               [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
-               [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
-               [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
-               [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
-               [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
-               [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
- #ifdef CONFIG_X86_64
-               [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
-               [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
-               [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
-               [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
-               [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
-               [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
-               [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
-               [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
- #endif
-               [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
-               [wordsize]"i"(sizeof(ulong))
-             : "cc", "memory"
- #ifdef CONFIG_X86_64
-               , "rax", "rbx", "rdi"
-               , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
- #else
-               , "eax", "ebx", "edi"
- #endif
-             );
+       if (unlikely(host_rsp != vmx->loaded_vmcs->host_state.rsp)) {
+               vmx->loaded_vmcs->host_state.rsp = host_rsp;
+               vmcs_writel(HOST_RSP, host_rsp);
+       }
  }
- STACK_FRAME_NON_STANDARD(__vmx_vcpu_run);
+ bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
  
  static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
  {
         */
        x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
  
-       __vmx_vcpu_run(vcpu, vmx);
+       if (static_branch_unlikely(&vmx_l1d_should_flush))
+               vmx_l1d_flush(vcpu);
+       if (vcpu->arch.cr2 != read_cr2())
+               write_cr2(vcpu->arch.cr2);
+       vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
+                                  vmx->loaded_vmcs->launched);
+       vcpu->arch.cr2 = read_cr2();
  
        /*
         * We do not use IBRS in the kernel. If this vCPU has used the
  
  static struct kvm *vmx_vm_alloc(void)
  {
-       struct kvm_vmx *kvm_vmx = vzalloc(sizeof(struct kvm_vmx));
+       struct kvm_vmx *kvm_vmx = __vmalloc(sizeof(struct kvm_vmx),
+                                           GFP_KERNEL_ACCOUNT | __GFP_ZERO,
+                                           PAGE_KERNEL);
        return &kvm_vmx->kvm;
  }
  
@@@ -6673,7 -6553,6 +6554,6 @@@ static void vmx_free_vcpu(struct kvm_vc
        if (enable_pml)
                vmx_destroy_pml_buffer(vmx);
        free_vpid(vmx->vpid);
-       leave_guest_mode(vcpu);
        nested_vmx_free_vcpu(vcpu);
        free_loaded_vmcs(vmx->loaded_vmcs);
        kfree(vmx->guest_msrs);
  static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
  {
        int err;
-       struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+       struct vcpu_vmx *vmx;
        unsigned long *msr_bitmap;
        int cpu;
  
+       vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
        if (!vmx)
                return ERR_PTR(-ENOMEM);
  
-       vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, GFP_KERNEL);
+       vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
+                       GFP_KERNEL_ACCOUNT);
        if (!vmx->vcpu.arch.guest_fpu) {
                printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n");
                err = -ENOMEM;
         * for the guest, etc.
         */
        if (enable_pml) {
-               vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+               vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
                if (!vmx->pml_pg)
                        goto uninit_vcpu;
        }
  
-       vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
        BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0])
                     > PAGE_SIZE);
  
@@@ -326,48 -326,6 +326,48 @@@ static u64 notrace arm64_1188873_read_c
  }
  #endif
  
 +#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
 +/*
 + * The low bits of the counter registers are indeterminate while bit 10 or
 + * greater is rolling over. Since the counter value can jump both backward
 + * (7ff -> 000 -> 800) and forward (7ff -> fff -> 800), ignore register values
 + * with all ones or all zeros in the low bits. Bound the loop by the maximum
 + * number of CPU cycles in 3 consecutive 24 MHz counter periods.
 + */
 +#define __sun50i_a64_read_reg(reg) ({                                 \
 +      u64 _val;                                                       \
 +      int _retries = 150;                                             \
 +                                                                      \
 +      do {                                                            \
 +              _val = read_sysreg(reg);                                \
 +              _retries--;                                             \
 +      } while (((_val + 1) & GENMASK(9, 0)) <= 1 && _retries);        \
 +                                                                      \
 +      WARN_ON_ONCE(!_retries);                                        \
 +      _val;                                                           \
 +})
 +
 +static u64 notrace sun50i_a64_read_cntpct_el0(void)
 +{
 +      return __sun50i_a64_read_reg(cntpct_el0);
 +}
 +
 +static u64 notrace sun50i_a64_read_cntvct_el0(void)
 +{
 +      return __sun50i_a64_read_reg(cntvct_el0);
 +}
 +
 +static u32 notrace sun50i_a64_read_cntp_tval_el0(void)
 +{
 +      return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0();
 +}
 +
 +static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
 +{
 +      return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0();
 +}
 +#endif
 +
  #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
  DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
  EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
@@@ -465,19 -423,6 +465,19 @@@ static const struct arch_timer_erratum_
                .read_cntvct_el0 = arm64_1188873_read_cntvct_el0,
        },
  #endif
 +#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
 +      {
 +              .match_type = ate_match_dt,
 +              .id = "allwinner,erratum-unknown1",
 +              .desc = "Allwinner erratum UNKNOWN1",
 +              .read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0,
 +              .read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0,
 +              .read_cntpct_el0 = sun50i_a64_read_cntpct_el0,
 +              .read_cntvct_el0 = sun50i_a64_read_cntvct_el0,
 +              .set_next_event_phys = erratum_set_next_event_tval_phys,
 +              .set_next_event_virt = erratum_set_next_event_tval_virt,
 +      },
 +#endif
  };
  
  typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
@@@ -1261,6 -1206,13 +1261,13 @@@ static enum arch_timer_ppi_nr __init ar
        return ARCH_TIMER_PHYS_SECURE_PPI;
  }
  
+ static void __init arch_timer_populate_kvm_info(void)
+ {
+       arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
+       if (is_kernel_in_hyp_mode())
+               arch_timer_kvm_info.physical_irq = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
+ }
  static int __init arch_timer_of_init(struct device_node *np)
  {
        int i, ret;
        for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++)
                arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
  
-       arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
+       arch_timer_populate_kvm_info();
  
        rate = arch_timer_get_cntfrq();
        arch_timer_of_configure_rate(rate, np);
@@@ -1605,7 -1557,7 +1612,7 @@@ static int __init arch_timer_acpi_init(
        arch_timer_ppi[ARCH_TIMER_HYP_PPI] =
                acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI);
  
-       arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
+       arch_timer_populate_kvm_info();
  
        /*
         * When probing via ACPI, we have no mechanism to override the sysreg
diff --combined virt/kvm/arm/arm.c
@@@ -65,7 -65,6 +65,6 @@@ static DEFINE_PER_CPU(struct kvm_vcpu *
  /* The VMID used in the VTTBR */
  static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
  static u32 kvm_next_vmid;
- static unsigned int kvm_vmid_bits __read_mostly;
  static DEFINE_SPINLOCK(kvm_vmid_lock);
  
  static bool vgic_present;
@@@ -142,7 -141,7 +141,7 @@@ int kvm_arch_init_vm(struct kvm *kvm, u
        kvm_vgic_early_init(kvm);
  
        /* Mark the initial VMID generation invalid */
-       kvm->arch.vmid_gen = 0;
+       kvm->arch.vmid.vmid_gen = 0;
  
        /* The maximum number of VCPUs is limited by the host's GIC model */
        kvm->arch.max_vcpus = vgic_present ?
@@@ -336,13 -335,11 +335,11 @@@ int kvm_cpu_has_pending_timer(struct kv
  
  void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
  {
-       kvm_timer_schedule(vcpu);
        kvm_vgic_v4_enable_doorbell(vcpu);
  }
  
  void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
  {
-       kvm_timer_unschedule(vcpu);
        kvm_vgic_v4_disable_doorbell(vcpu);
  }
  
@@@ -472,37 -469,31 +469,31 @@@ void force_vm_exit(const cpumask_t *mas
  
  /**
   * need_new_vmid_gen - check that the VMID is still valid
-  * @kvm: The VM's VMID to check
+  * @vmid: The VMID to check
   *
   * return true if there is a new generation of VMIDs being used
   *
-  * The hardware supports only 256 values with the value zero reserved for the
-  * host, so we check if an assigned value belongs to a previous generation,
-  * which which requires us to assign a new value. If we're the first to use a
-  * VMID for the new generation, we must flush necessary caches and TLBs on all
-  * CPUs.
+  * The hardware supports a limited set of values with the value zero reserved
+  * for the host, so we check if an assigned value belongs to a previous
+  * generation, which which requires us to assign a new value. If we're the
+  * first to use a VMID for the new generation, we must flush necessary caches
+  * and TLBs on all CPUs.
   */
- static bool need_new_vmid_gen(struct kvm *kvm)
+ static bool need_new_vmid_gen(struct kvm_vmid *vmid)
  {
        u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
        smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
-       return unlikely(READ_ONCE(kvm->arch.vmid_gen) != current_vmid_gen);
+       return unlikely(READ_ONCE(vmid->vmid_gen) != current_vmid_gen);
  }
  
  /**
-  * update_vttbr - Update the VTTBR with a valid VMID before the guest runs
-  * @kvm       The guest that we are about to run
-  *
-  * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the
-  * VM has a valid VMID, otherwise assigns a new one and flushes corresponding
-  * caches and TLBs.
+  * update_vmid - Update the vmid with a valid VMID for the current generation
+  * @kvm: The guest that struct vmid belongs to
+  * @vmid: The stage-2 VMID information struct
   */
- static void update_vttbr(struct kvm *kvm)
+ static void update_vmid(struct kvm_vmid *vmid)
  {
-       phys_addr_t pgd_phys;
-       u64 vmid, cnp = kvm_cpu_has_cnp() ? VTTBR_CNP_BIT : 0;
-       if (!need_new_vmid_gen(kvm))
+       if (!need_new_vmid_gen(vmid))
                return;
  
        spin_lock(&kvm_vmid_lock);
         * already allocated a valid vmid for this vm, then this vcpu should
         * use the same vmid.
         */
-       if (!need_new_vmid_gen(kvm)) {
+       if (!need_new_vmid_gen(vmid)) {
                spin_unlock(&kvm_vmid_lock);
                return;
        }
                kvm_call_hyp(__kvm_flush_vm_context);
        }
  
-       kvm->arch.vmid = kvm_next_vmid;
+       vmid->vmid = kvm_next_vmid;
        kvm_next_vmid++;
-       kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
-       /* update vttbr to be used with the new vmid */
-       pgd_phys = virt_to_phys(kvm->arch.pgd);
-       BUG_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm));
-       vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
-       kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid | cnp;
+       kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;
  
        smp_wmb();
-       WRITE_ONCE(kvm->arch.vmid_gen, atomic64_read(&kvm_vmid_gen));
+       WRITE_ONCE(vmid->vmid_gen, atomic64_read(&kvm_vmid_gen));
  
        spin_unlock(&kvm_vmid_lock);
  }
@@@ -626,13 -611,6 +611,13 @@@ static void vcpu_req_sleep(struct kvm_v
                /* Awaken to handle a signal, request we sleep again later. */
                kvm_make_request(KVM_REQ_SLEEP, vcpu);
        }
 +
 +      /*
 +       * Make sure we will observe a potential reset request if we've
 +       * observed a change to the power state. Pairs with the smp_wmb() in
 +       * kvm_psci_vcpu_on().
 +       */
 +      smp_rmb();
  }
  
  static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
@@@ -646,9 -624,6 +631,9 @@@ static void check_vcpu_requests(struct 
                if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
                        vcpu_req_sleep(vcpu);
  
 +              if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
 +                      kvm_reset_vcpu(vcpu);
 +
                /*
                 * Clear IRQ_PENDING requests that were made to guarantee
                 * that a VCPU sees new virtual interrupts.
@@@ -700,7 -675,7 +685,7 @@@ int kvm_arch_vcpu_ioctl_run(struct kvm_
                 */
                cond_resched();
  
-               update_vttbr(vcpu->kvm);
+               update_vmid(&vcpu->kvm->arch.vmid);
  
                check_vcpu_requests(vcpu);
  
                 */
                smp_store_mb(vcpu->mode, IN_GUEST_MODE);
  
-               if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) ||
+               if (ret <= 0 || need_new_vmid_gen(&vcpu->kvm->arch.vmid) ||
                    kvm_request_pending(vcpu)) {
                        vcpu->mode = OUTSIDE_GUEST_MODE;
                        isb(); /* Ensure work in x_flush_hwstate is committed */
                        ret = kvm_vcpu_run_vhe(vcpu);
                        kvm_arm_vhe_guest_exit();
                } else {
-                       ret = kvm_call_hyp(__kvm_vcpu_run_nvhe, vcpu);
+                       ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu);
                }
  
                vcpu->mode = OUTSIDE_GUEST_MODE;
@@@ -1427,10 -1402,6 +1412,6 @@@ static inline void hyp_cpu_pm_exit(void
  
  static int init_common_resources(void)
  {
-       /* set size of VMID supported by CPU */
-       kvm_vmid_bits = kvm_get_vmid_bits();
-       kvm_info("%d-bit VMID\n", kvm_vmid_bits);
        kvm_set_ipa_limit();
  
        return 0;
@@@ -1571,6 -1542,7 +1552,7 @@@ static int init_hyp_mode(void
                kvm_cpu_context_t *cpu_ctxt;
  
                cpu_ctxt = per_cpu_ptr(&kvm_host_cpu_state, cpu);
+               kvm_init_host_cpu_context(cpu_ctxt, cpu);
                err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
  
                if (err) {
  
        err = hyp_map_aux_data();
        if (err)
-               kvm_err("Cannot map host auxilary data: %d\n", err);
+               kvm_err("Cannot map host auxiliary data: %d\n", err);
  
        return 0;
  
diff --combined virt/kvm/arm/mmu.c
  #include <asm/kvm_arm.h>
  #include <asm/kvm_mmu.h>
  #include <asm/kvm_mmio.h>
 +#include <asm/kvm_ras.h>
  #include <asm/kvm_asm.h>
  #include <asm/kvm_emulate.h>
  #include <asm/virt.h>
 -#include <asm/system_misc.h>
  
  #include "trace.h"
  
@@@ -908,6 -908,7 +908,7 @@@ int create_hyp_exec_mappings(phys_addr_
   */
  int kvm_alloc_stage2_pgd(struct kvm *kvm)
  {
+       phys_addr_t pgd_phys;
        pgd_t *pgd;
  
        if (kvm->arch.pgd != NULL) {
        if (!pgd)
                return -ENOMEM;
  
+       pgd_phys = virt_to_phys(pgd);
+       if (WARN_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm)))
+               return -EINVAL;
        kvm->arch.pgd = pgd;
+       kvm->arch.pgd_phys = pgd_phys;
        return 0;
  }
  
@@@ -1008,6 -1014,7 +1014,7 @@@ void kvm_free_stage2_pgd(struct kvm *kv
                unmap_stage2_range(kvm, 0, kvm_phys_size(kvm));
                pgd = READ_ONCE(kvm->arch.pgd);
                kvm->arch.pgd = NULL;
+               kvm->arch.pgd_phys = 0;
        }
        spin_unlock(&kvm->mmu_lock);
  
@@@ -1396,14 -1403,6 +1403,6 @@@ static bool transparent_hugepage_adjust
        return false;
  }
  
- static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
- {
-       if (kvm_vcpu_trap_is_iabt(vcpu))
-               return false;
-       return kvm_vcpu_dabt_iswrite(vcpu);
- }
  /**
   * stage2_wp_ptes - write protect PMD range
   * @pmd:      pointer to pmd entry
@@@ -1598,14 -1597,13 +1597,13 @@@ static void kvm_send_hwpoison_signal(un
  static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot,
                                               unsigned long hva)
  {
-       gpa_t gpa_start, gpa_end;
+       gpa_t gpa_start;
        hva_t uaddr_start, uaddr_end;
        size_t size;
  
        size = memslot->npages * PAGE_SIZE;
  
        gpa_start = memslot->base_gfn << PAGE_SHIFT;
-       gpa_end = gpa_start + size;
  
        uaddr_start = memslot->userspace_addr;
        uaddr_end = uaddr_start + size;
@@@ -1695,14 -1693,11 +1693,14 @@@ static int user_mem_abort(struct kvm_vc
  
        vma_pagesize = vma_kernel_pagesize(vma);
        /*
 -       * PUD level may not exist for a VM but PMD is guaranteed to
 -       * exist.
 +       * The stage2 has a minimum of 2 level table (For arm64 see
 +       * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
 +       * use PMD_SIZE huge mappings (even when the PMD is folded into PGD).
 +       * As for PUD huge maps, we must make sure that we have at least
 +       * 3 levels, i.e, PMD is not folded.
         */
        if ((vma_pagesize == PMD_SIZE ||
 -           (vma_pagesize == PUD_SIZE && kvm_stage2_has_pud(kvm))) &&
 +           (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) &&
            !force_pte) {
                gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
        }
@@@ -1906,7 -1901,7 +1904,7 @@@ int kvm_handle_guest_abort(struct kvm_v
                 * For RAS the host kernel may handle this abort.
                 * There is no need to pass the error into the guest.
                 */
 -              if (!handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
 +              if (!kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
                        return 1;
  
                if (unlikely(!is_iabt)) {
@@@ -2353,7 -2348,7 +2351,7 @@@ int kvm_arch_create_memslot(struct kvm 
        return 0;
  }
  
- void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
+ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
  {
  }
  
@@@ -76,7 -76,7 +76,7 @@@ void vgic_v3_fold_lr_state(struct kvm_v
                if (!irq)       /* An LPI could have been unmapped. */
                        continue;
  
 -              spin_lock(&irq->irq_lock);
 +              raw_spin_lock(&irq->irq_lock);
  
                /* Always preserve the active bit */
                irq->active = !!(val & ICH_LR_ACTIVE_BIT);
                                vgic_irq_set_phys_active(irq, false);
                }
  
 -              spin_unlock(&irq->irq_lock);
 +              raw_spin_unlock(&irq->irq_lock);
                vgic_put_irq(vcpu->kvm, irq);
        }
  
@@@ -347,9 -347,9 +347,9 @@@ retry
  
        status = val & (1 << bit_nr);
  
 -      spin_lock_irqsave(&irq->irq_lock, flags);
 +      raw_spin_lock_irqsave(&irq->irq_lock, flags);
        if (irq->target_vcpu != vcpu) {
 -              spin_unlock_irqrestore(&irq->irq_lock, flags);
 +              raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
                goto retry;
        }
        irq->pending_latch = status;
@@@ -589,7 -589,7 +589,7 @@@ early_param("kvm-arm.vgic_v4_enable", e
   */
  int vgic_v3_probe(const struct gic_kvm_info *info)
  {
-       u32 ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2);
+       u32 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_ich_vtr_el2);
        int ret;
  
        /*
@@@ -679,7 -679,7 +679,7 @@@ void vgic_v3_put(struct kvm_vcpu *vcpu
        struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
  
        if (likely(cpu_if->vgic_sre))
-               cpu_if->vgic_vmcr = kvm_call_hyp(__vgic_v3_read_vmcr);
+               cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
  
        kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
  
diff --combined virt/kvm/kvm_main.c
@@@ -81,6 -81,11 +81,11 @@@ unsigned int halt_poll_ns_grow = 2
  module_param(halt_poll_ns_grow, uint, 0644);
  EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
  
+ /* The start value to grow halt_poll_ns from */
+ unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
+ module_param(halt_poll_ns_grow_start, uint, 0644);
+ EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
  /* Default resets per-vcpu halt_poll_ns . */
  unsigned int halt_poll_ns_shrink;
  module_param(halt_poll_ns_shrink, uint, 0644);
@@@ -525,7 -530,7 +530,7 @@@ static struct kvm_memslots *kvm_alloc_m
        int i;
        struct kvm_memslots *slots;
  
-       slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+       slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
        if (!slots)
                return NULL;
  
@@@ -601,12 -606,12 +606,12 @@@ static int kvm_create_vm_debugfs(struc
  
        kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
                                         sizeof(*kvm->debugfs_stat_data),
-                                        GFP_KERNEL);
+                                        GFP_KERNEL_ACCOUNT);
        if (!kvm->debugfs_stat_data)
                return -ENOMEM;
  
        for (p = debugfs_entries; p->name; p++) {
-               stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL);
+               stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
                if (!stat_data)
                        return -ENOMEM;
  
@@@ -656,12 -661,8 +661,8 @@@ static struct kvm *kvm_create_vm(unsign
                struct kvm_memslots *slots = kvm_alloc_memslots();
                if (!slots)
                        goto out_err_no_srcu;
-               /*
-                * Generations must be different for each address space.
-                * Init kvm generation close to the maximum to easily test the
-                * code of handling generation number wrap-around.
-                */
-               slots->generation = i * 2 - 150;
+               /* Generations must be different for each address space. */
+               slots->generation = i;
                rcu_assign_pointer(kvm->memslots[i], slots);
        }
  
                goto out_err_no_irq_srcu;
        for (i = 0; i < KVM_NR_BUSES; i++) {
                rcu_assign_pointer(kvm->buses[i],
-                       kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL));
+                       kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
                if (!kvm->buses[i])
                        goto out_err;
        }
@@@ -789,7 -790,7 +790,7 @@@ static int kvm_create_dirty_bitmap(stru
  {
        unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
  
-       memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL);
+       memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL_ACCOUNT);
        if (!memslot->dirty_bitmap)
                return -ENOMEM;
  
@@@ -874,31 -875,34 +875,34 @@@ static struct kvm_memslots *install_new
                int as_id, struct kvm_memslots *slots)
  {
        struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
+       u64 gen = old_memslots->generation;
  
-       /*
-        * Set the low bit in the generation, which disables SPTE caching
-        * until the end of synchronize_srcu_expedited.
-        */
-       WARN_ON(old_memslots->generation & 1);
-       slots->generation = old_memslots->generation + 1;
+       WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
+       slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
  
        rcu_assign_pointer(kvm->memslots[as_id], slots);
        synchronize_srcu_expedited(&kvm->srcu);
  
        /*
-        * Increment the new memslot generation a second time. This prevents
-        * vm exits that race with memslot updates from caching a memslot
-        * generation that will (potentially) be valid forever.
-        *
+        * Increment the new memslot generation a second time, dropping the
+        * update in-progress flag and incrementing then generation based on
+        * the number of address spaces.  This provides a unique and easily
+        * identifiable generation number while the memslots are in flux.
+        */
+       gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
+       /*
         * Generations must be unique even across address spaces.  We do not need
         * a global counter for that, instead the generation space is evenly split
         * across address spaces.  For example, with two address spaces, address
-        * space 0 will use generations 0, 4, 8, ... while * address space 1 will
-        * use generations 2, 6, 10, 14, ...
+        * space 0 will use generations 0, 2, 4, ... while address space 1 will
+        * use generations 1, 3, 5, ...
         */
-       slots->generation += KVM_ADDRESS_SPACE_NUM * 2 - 1;
+       gen += KVM_ADDRESS_SPACE_NUM;
+       kvm_arch_memslots_updated(kvm, gen);
  
-       kvm_arch_memslots_updated(kvm, slots);
+       slots->generation = gen;
  
        return old_memslots;
  }
@@@ -1018,7 -1022,7 +1022,7 @@@ int __kvm_set_memory_region(struct kvm 
                        goto out_free;
        }
  
-       slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+       slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
        if (!slots)
                goto out_free;
        memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots));
@@@ -1201,11 -1205,9 +1205,9 @@@ int kvm_get_dirty_log_protect(struct kv
                        mask = xchg(&dirty_bitmap[i], 0);
                        dirty_bitmap_buffer[i] = mask;
  
-                       if (mask) {
-                               offset = i * BITS_PER_LONG;
-                               kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
-                                                                       offset, mask);
-                       }
+                       offset = i * BITS_PER_LONG;
+                       kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
+                                                               offset, mask);
                }
                spin_unlock(&kvm->mmu_lock);
        }
@@@ -2185,20 -2187,23 +2187,23 @@@ void kvm_sigset_deactivate(struct kvm_v
  
  static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
  {
-       unsigned int old, val, grow;
+       unsigned int old, val, grow, grow_start;
  
        old = val = vcpu->halt_poll_ns;
+       grow_start = READ_ONCE(halt_poll_ns_grow_start);
        grow = READ_ONCE(halt_poll_ns_grow);
-       /* 10us base */
-       if (val == 0 && grow)
-               val = 10000;
-       else
-               val *= grow;
+       if (!grow)
+               goto out;
+       val *= grow;
+       if (val < grow_start)
+               val = grow_start;
  
        if (val > halt_poll_ns)
                val = halt_poll_ns;
  
        vcpu->halt_poll_ns = val;
+ out:
        trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
  }
  
@@@ -2683,7 -2688,7 +2688,7 @@@ static long kvm_vcpu_ioctl(struct file 
                struct kvm_regs *kvm_regs;
  
                r = -ENOMEM;
-               kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
+               kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT);
                if (!kvm_regs)
                        goto out;
                r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
@@@ -2711,7 -2716,8 +2716,8 @@@ out_free1
                break;
        }
        case KVM_GET_SREGS: {
-               kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
+               kvm_sregs = kzalloc(sizeof(struct kvm_sregs),
+                                   GFP_KERNEL_ACCOUNT);
                r = -ENOMEM;
                if (!kvm_sregs)
                        goto out;
                break;
        }
        case KVM_GET_FPU: {
-               fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
+               fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT);
                r = -ENOMEM;
                if (!fpu)
                        goto out;
@@@ -2980,7 -2986,7 +2986,7 @@@ static int kvm_ioctl_create_device(stru
        if (test)
                return 0;
  
-       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT);
        if (!dev)
                return -ENOMEM;
  
@@@ -3625,6 -3631,7 +3631,7 @@@ int kvm_io_bus_write(struct kvm_vcpu *v
        r = __kvm_io_bus_write(vcpu, bus, &range, val);
        return r < 0 ? r : 0;
  }
+ EXPORT_SYMBOL_GPL(kvm_io_bus_write);
  
  /* kvm_io_bus_write_cookie - called under kvm->slots_lock */
  int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
@@@ -3675,7 -3682,6 +3682,6 @@@ static int __kvm_io_bus_read(struct kvm
  
        return -EOPNOTSUPP;
  }
- EXPORT_SYMBOL_GPL(kvm_io_bus_write);
  
  /* kvm_io_bus_read - called under kvm->slots_lock */
  int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
        return r < 0 ? r : 0;
  }
  
  /* Caller must hold slots_lock. */
  int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
                            int len, struct kvm_io_device *dev)
        if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
                return -ENOSPC;
  
-       new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count + 1) *
-                         sizeof(struct kvm_io_range)), GFP_KERNEL);
+       new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
+                         GFP_KERNEL_ACCOUNT);
        if (!new_bus)
                return -ENOMEM;
  
@@@ -3760,8 -3765,8 +3765,8 @@@ void kvm_io_bus_unregister_dev(struct k
        if (i == bus->dev_count)
                return;
  
-       new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
-                         sizeof(struct kvm_io_range)), GFP_KERNEL);
+       new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
+                         GFP_KERNEL_ACCOUNT);
        if (!new_bus)  {
                pr_err("kvm: failed to shrink bus, removing it completely\n");
                goto broken;
@@@ -4029,7 -4034,7 +4034,7 @@@ static void kvm_uevent_notify_change(un
        active = kvm_active_vms;
        spin_unlock(&kvm_lock);
  
-       env = kzalloc(sizeof(*env), GFP_KERNEL);
+       env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
        if (!env)
                return;
  
        }
        add_uevent_var(env, "PID=%d", kvm->userspace_pid);
  
 -      if (kvm->debugfs_dentry) {
 +      if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) {
-               char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL);
+               char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
  
                if (p) {
                        tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
@@@ -4084,7 -4089,7 +4089,7 @@@ static int kvm_suspend(void
  static void kvm_resume(void)
  {
        if (kvm_usage_count) {
 -              WARN_ON(raw_spin_is_locked(&kvm_count_lock));
 +              lockdep_assert_held(&kvm_count_lock);
                hardware_enable_nolock(NULL);
        }
  }