Merge tag 'drm-misc-next-2020-10-27' of git://anongit.freedesktop.org/drm/drm-misc...
authorDave Airlie <airlied@redhat.com>
Wed, 4 Nov 2020 00:55:11 +0000 (10:55 +1000)
committerDave Airlie <airlied@redhat.com>
Wed, 4 Nov 2020 01:49:10 +0000 (11:49 +1000)
drm-misc-next for 5.11:

UAPI Changes:

  - doc: rules for EBUSY on non-blocking commits; requirements for fourcc
    modifiers; on parsing EDID
  - fbdev/sbuslib: Remove unused FBIOSCURSOR32
  - fourcc: deprecate DRM_FORMAT_MOD_NONE
  - virtio: Support blob resources for memory allocations; Expose host-visible
    and cross-device features

Cross-subsystem Changes:

  - devicetree: Add vendor Prefix for Yes Optoelectronics, Shanghai Top Display
    Optoelectronics
  - dma-buf: Add struct dma_buf_map that stores DMA pointer and I/O-memory flag;
    dma_buf_vmap()/vunmap() return address in dma_buf_map; Use struct_size() macro

Core Changes:

  - atomic: pass full state to CRTC atomic enable/disable; warn for EBUSY during
    non-blocking commits
  - dp: Prepare for DP 2.0 DPCD
  - dp_mst: Receive extended DPCD caps
  - dma-buf: Documentation
  - doc: Format modifiers; dma-buf-map; Cleanups
  - fbdev: Don't use compat_alloc_user_space(); mark as orphaned
  - fb-helper: Take lock in drm_fb_helper_restore_work_fb()
  - gem: Convert implementation and drivers to GEM object functions, remove
    GEM callbacks from struct drm_driver (expect gem_prime_mmap)
  - panel: Cleanups
  - pci: Add legacy infix to drm_irq_by_busid()
  - sched: Avoid infinite waits in drm_sched_entity_destroy()
  - switcheroo: Cleanups
  - ttm: Remove AGP support; Don't modify caching during swapout; Major
    refactoring of the implementation and API that affects all depending
    drivers; Add ttm_bo_wait_ctx(); Add ttm_bo_pin()/unpin() in favor of
    TTM_PL_FLAG_NO_EVICT; Remove ttm_bo_create(); Remove fault_reserve_notify()
    callback; Push move() implementation into drivers; Remove TTM_PAGE_FLAG_WRITE;
    Replace caching flags with init-time cache setting; Push ttm_tt_bind() into
    drivers; Replace move_notify() with delete_mem_notify(); No overlapping memcpy();
    no more ttm_set_populated()
  - vram-helper: Fix BO top-down placement; TTM-related changes; Init GEM
    object functions with defaults; Default placement in system memory; Cleanups

Driver Changes:

  - amdgpu: Use GEM object functions
  - armada: Use GEM object functions
  - aspeed: Configure output via sysfs; Init struct drm_driver with
  - ast: Reload LUT after FB format changes
  - bridge: Add driver and DT bindings for anx7625; Cleanups
  - bridge/dw-hdmi: Constify ops
  - bridge/ti-sn65dsi86: Add retries for link training
  - bridge/lvds-codec: Add support for regulator
  - bridge/tc358768: Restore connector support DRM_GEM_CMA_DRIVEROPS; Cleanups
  - display/ti,j721e-dss: Add DT properies assigned-clocks, assigned-clocks-parent and
    dma-coherent
  - display/ti,am65s-dss: Add DT properies assigned-clocks, assigned-clocks-parent and
    dma-coherent
  - etnaviv: Use GEM object functions
  - exynos: Use GEM object functions
  - fbdev: Cleanups and compiler fixes throughout framebuffer drivers
  - fbdev/cirrusfb: Avoid division by 0
  - gma500: Use GEM object functions; Fix double-free of connector; Cleanups
  - hisilicon/hibmc: I2C-based DDC support; Use to_hibmc_drm_device(); Cleanups
  - i915: Use GEM object functions
  - imx/dcss: Init driver with DRM_GEM_CMA_DRIVER_OPS; Cleanups
  - ingenic: Reset pixel clock when parent clock changes; support reserved
    memory; Alloc F0 and F1 DMA channels at once; Support different pixel formats;
    Revert support for cached mmap buffers
    on F0/F1; support 30-bit/24-bit/8-bit-palette modes
  - komeda: Use DEFINE_SHOW_ATTRIBUTE
  - mcde: Detect platform_get_irq() errors
  - mediatek: Use GEM object functions
  - msm: Use GEM object functions
  - nouveau: Cleanups; TTM-related changes; Use GEM object functions
  - omapdrm: Use GEM object functions
  - panel: Add driver and DT bindings for Novatak nt36672a; Add driver and DT
    bindings for YTC700TLAG-05-201C; Add driver and DT bindings for TDO TL070WSH30;
    Cleanups
  - panel/mantix: Fix reset; Fix deref of NULL pointer in mantix_get_modes()
  - panel/otm8009a: Allow non-continuous dsi clock; Cleanups
  - panel/rm68200: Allow non-continuous dsi clock; Fix mode to 50 FPS
  - panfrost: Fix job timeout handling; Cleanups
  - pl111: Use GEM object functions
  - qxl: Cleanups; TTM-related changes; Pin new BOs with ttm_bo_init_reserved()
  - radeon: Cleanups; TTM-related changes; Use GEM object functions
  - rockchip: Use GEM object functions
  - shmobile: Cleanups
  - tegra: Use GEM object functions
  - tidss: Set drm_plane_helper_funcs.prepare_fb
  - tilcdc: Don't keep vblank interrupt enabled all the time
  - tve200: Detect platform_get_irq() errors
  - vc4: Use GEM object functions; Only register components once DSI is attached;
    Add Maxime as maintainer
  - vgem: Use GEM object functions
  - via: Simplify critical section in via_mem_alloc()
  - virtgpu: Use GEM object functions
  - virtio: Implement blob resources, host-visible and cross-device features;
    Support mapping of host-allocated resources; Use UUID APi; Cleanups
  - vkms: Use GEM object functions; Switch to SHMEM
  - vmwgfx: TTM-related changes; Inline ttm_bo_swapout_all()
  - xen: Use GEM object functions
  - xlnx: Use GEM object functions

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20201027100936.GA4858@linux-uq9g
55 files changed:
1  2 
Documentation/devicetree/bindings/vendor-prefixes.yaml
Documentation/driver-api/dma-buf.rst
MAINTAINERS
drivers/dma-buf/dma-buf.c
drivers/dma-buf/heaps/heap-helpers.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/armada/armada_gem.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_gem_cma_helper.c
drivers/gpu/drm/drm_gem_shmem_helper.c
drivers/gpu/drm/drm_prime.c
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/selftests/mock_gem_device.c
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/mediatek/mtk_drm_gem.c
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nouveau_mem.c
drivers/gpu/drm/omapdrm/omap_gem.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
drivers/gpu/drm/tegra/gem.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_drv.c
drivers/gpu/drm/vc4/vc4_drv.h
drivers/gpu/drm/virtio/virtgpu_object.c
drivers/gpu/drm/virtio/virtgpu_vq.c
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
drivers/gpu/drm/xen/xen_drm_front_gem.c
drivers/media/common/videobuf2/videobuf2-dma-contig.c
drivers/media/common/videobuf2/videobuf2-dma-sg.c
drivers/media/common/videobuf2/videobuf2-vmalloc.c
drivers/misc/fastrpc.c
include/drm/drm_dp_helper.h
include/drm/drm_prime.h

@@@ -179,8 -179,6 +179,8 @@@ patternProperties
      description: CALAO Systems SAS
    "^calxeda,.*":
      description: Calxeda
 +  "^caninos,.*":
 +    description: Caninos Loucos Program
    "^capella,.*":
      description: Capella Microsystems, Inc
    "^cascoda,.*":
      description: Denx Software Engineering
    "^devantech,.*":
      description: Devantech, Ltd.
 +  "^dfi,.*":
 +    description: DFI Inc.
    "^dh,.*":
      description: DH electronics GmbH
    "^difrnce,.*":
      description: Gumstix, Inc.
    "^gw,.*":
      description: Gateworks Corporation
 +                 use "gateworks" vendor prefix
 +    deprecated: true
    "^hannstar,.*":
      description: HannStar Display Corporation
    "^haoyu,.*":
      description: MEMSIC Inc.
    "^menlo,.*":
      description: Menlo Systems GmbH
 +  "^meraki,.*":
 +    description: Cisco Meraki, LLC
    "^merrii,.*":
      description: Merrii Technology Co., Ltd.
    "^micrel,.*":
      description: Realtek Semiconductor Corp.
    "^renesas,.*":
      description: Renesas Electronics Corporation
 +  "^rex,.*":
 +    description: iMX6 Rex Project
    "^rervision,.*":
      description: Shenzhen Rervision Technology Co., Ltd.
    "^richtek,.*":
      description: Rikomagic Tech Corp. Ltd
    "^riscv,.*":
      description: RISC-V Foundation
 +  "^riot,.*":
 +    description: Embest RIoT
    "^rockchip,.*":
      description: Fuzhou Rockchip Electronics Co., Ltd
    "^rocktech,.*":
      description: Ronbo Electronics
    "^roofull,.*":
      description: Shenzhen Roofull Technology Co, Ltd
 +  "^roseapplepi,.*":
 +    description: RoseapplePi.org
    "^samsung,.*":
      description: Samsung Semiconductor
    "^samtec,.*":
      description: Schindler
    "^seagate,.*":
      description: Seagate Technology PLC
 +  "^seeed,.*":
 +    description: Seeed Technology Co., Ltd
    "^seirobotics,.*":
      description: Shenzhen SEI Robotics Co., Ltd
    "^semtech,.*":
      description: Trusted Computing Group
    "^tcl,.*":
      description: Toby Churchill Ltd.
+   "^tdo,.*":
+     description: Shangai Top Display Optoelectronics Co., Ltd
    "^technexion,.*":
      description: TechNexion
    "^technologic,.*":
      description: Vision Optical Technology Co., Ltd.
    "^vxt,.*":
      description: VXT Ltd
 +  "^wand,.*":
 +    description: Wandbord (Technexion)
    "^waveshare,.*":
      description: Waveshare Electronics
    "^wd,.*":
      description: Shenzhen Xingbangda Display Technology Co., Ltd
    "^xinpeng,.*":
      description: Shenzhen Xinpeng Technology Co., Ltd
 +  "^xiphera,.*":
 +    description: Xiphera Ltd.
    "^xlnx,.*":
      description: Xilinx
    "^xnano,.*":
      description: Shenzhen Xunlong Software CO.,Limited
    "^xylon,.*":
      description: Xylon
+   "^yes-optoelectronics,.*":
+     description: Yes Optoelectronics Co.,Ltd.
    "^ylm,.*":
      description: Shenzhen Yangliming Electronic Technology Co., Ltd.
    "^yna,.*":
      description: Yones Toptech Co., Ltd.
    "^ysoft,.*":
      description: Y Soft Corporation a.s.
 +  "^zealz,.*":
 +    description: Zealz
    "^zarlink,.*":
      description: Zarlink Semiconductor
    "^zeitec,.*":
      description: Shenzhen Zidoo Technology Co., Ltd.
    "^zii,.*":
      description: Zodiac Inflight Innovations
 +  "^zinitix,.*":
 +    description: Zinitix Co., Ltd
 +  "^zkmagic,.*":
 +    description: Shenzhen Zkmagic Technology Co., Ltd.
    "^zte,.*":
      description: ZTE Corp.
    "^zyxel,.*":
@@@ -85,7 -85,7 +85,7 @@@ consider though
  - Memory mapping the contents of the DMA buffer is also supported. See the
    discussion below on `CPU Access to DMA Buffer Objects`_ for the full details.
  
 -- The DMA buffer FD is also pollable, see `Fence Poll Support`_ below for
 +- The DMA buffer FD is also pollable, see `Implicit Fence Poll Support`_ below for
    details.
  
  Basic Operation and Device DMA Access
@@@ -115,6 -115,15 +115,15 @@@ Kernel Functions and Structures Referen
  .. kernel-doc:: include/linux/dma-buf.h
     :internal:
  
+ Buffer Mapping Helpers
+ ~~~~~~~~~~~~~~~~~~~~~~
+ .. kernel-doc:: include/linux/dma-buf-map.h
+    :doc: overview
+ .. kernel-doc:: include/linux/dma-buf-map.h
+    :internal:
  Reservation Objects
  -------------------
  
diff --combined MAINTAINERS
@@@ -405,7 -405,7 +405,7 @@@ F: drivers/platform/x86/i2c-multi-insta
  ACPI PMIC DRIVERS
  M:    "Rafael J. Wysocki" <rjw@rjwysocki.net>
  M:    Len Brown <lenb@kernel.org>
 -R:    Andy Shevchenko <andriy.shevchenko@linux.intel.com>
 +R:    Andy Shevchenko <andy@kernel.org>
  R:    Mika Westerberg <mika.westerberg@linux.intel.com>
  L:    linux-acpi@vger.kernel.org
  S:    Supported
@@@ -802,13 -802,6 +802,13 @@@ S:       Maintaine
  F:    Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.txt
  F:    drivers/irqchip/irq-al-fic.c
  
 +AMAZON ANNAPURNA LABS MEMORY CONTROLLER EDAC
 +M:    Talel Shenhar <talel@amazon.com>
 +M:    Talel Shenhar <talelshenhar@gmail.com>
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/edac/amazon,al-mc-edac.yaml
 +F:    drivers/edac/al_mc_edac.c
 +
  AMAZON ANNAPURNA LABS THERMAL MMIO DRIVER
  M:    Talel Shenhar <talel@amazon.com>
  S:    Maintained
@@@ -950,12 -943,37 +950,12 @@@ S:      Supporte
  F:    arch/arm64/boot/dts/amd/amd-seattle-xgbe*.dtsi
  F:    drivers/net/ethernet/amd/xgbe/
  
 -ANALOG DEVICES INC AD5686 DRIVER
 -M:    Michael Hennerich <Michael.Hennerich@analog.com>
 -L:    linux-pm@vger.kernel.org
 -S:    Supported
 -W:    http://ez.analog.com/community/linux-device-drivers
 -F:    drivers/iio/dac/ad5686*
 -F:    drivers/iio/dac/ad5696*
 -
 -ANALOG DEVICES INC AD5758 DRIVER
 -M:    Michael Hennerich <Michael.Hennerich@analog.com>
 -L:    linux-iio@vger.kernel.org
 -S:    Supported
 -W:    http://ez.analog.com/community/linux-device-drivers
 -F:    Documentation/devicetree/bindings/iio/dac/ad5758.txt
 -F:    drivers/iio/dac/ad5758.c
 -
 -ANALOG DEVICES INC AD7091R5 DRIVER
 -M:    Beniamin Bia <beniamin.bia@analog.com>
 -L:    linux-iio@vger.kernel.org
 -S:    Supported
 -W:    http://ez.analog.com/community/linux-device-drivers
 -F:    Documentation/devicetree/bindings/iio/adc/adi,ad7091r5.yaml
 -F:    drivers/iio/adc/ad7091r5.c
 -
 -ANALOG DEVICES INC AD7124 DRIVER
 -M:    Michael Hennerich <Michael.Hennerich@analog.com>
 +AMS AS73211 DRIVER
 +M:    Christian Eggers <ceggers@arri.de>
  L:    linux-iio@vger.kernel.org
 -S:    Supported
 -W:    http://ez.analog.com/community/linux-device-drivers
 -F:    Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml
 -F:    drivers/iio/adc/ad7124.c
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/iio/light/ams,as73211.yaml
 +F:    drivers/iio/light/as73211.c
  
  ANALOG DEVICES INC AD7192 DRIVER
  M:    Alexandru Tachici <alexandru.tachici@analog.com>
@@@ -973,6 -991,15 +973,6 @@@ W:        http://ez.analog.com/community/linux
  F:    Documentation/devicetree/bindings/iio/adc/adi,ad7292.yaml
  F:    drivers/iio/adc/ad7292.c
  
 -ANALOG DEVICES INC AD7606 DRIVER
 -M:    Michael Hennerich <Michael.Hennerich@analog.com>
 -M:    Beniamin Bia <beniamin.bia@analog.com>
 -L:    linux-iio@vger.kernel.org
 -S:    Supported
 -W:    http://ez.analog.com/community/linux-device-drivers
 -F:    Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
 -F:    drivers/iio/adc/ad7606.c
 -
  ANALOG DEVICES INC AD7768-1 DRIVER
  M:    Michael Hennerich <Michael.Hennerich@analog.com>
  L:    linux-iio@vger.kernel.org
@@@ -1034,6 -1061,7 +1034,6 @@@ F:      drivers/iio/imu/adis16475.
  F:    Documentation/devicetree/bindings/iio/imu/adi,adis16475.yaml
  
  ANALOG DEVICES INC ADM1177 DRIVER
 -M:    Beniamin Bia <beniamin.bia@analog.com>
  M:    Michael Hennerich <Michael.Hennerich@analog.com>
  L:    linux-hwmon@vger.kernel.org
  S:    Supported
@@@ -1080,13 -1108,6 +1080,13 @@@ L:    linux-media@vger.kernel.or
  S:    Maintained
  F:    drivers/media/i2c/adv7842*
  
 +ANALOG DEVICES INC ADXRS290 DRIVER
 +M:    Nishant Malpani <nish.malpani25@gmail.com>
 +L:    linux-iio@vger.kernel.org
 +S:    Supported
 +F:    drivers/iio/gyro/adxrs290.c
 +F:    Documentation/devicetree/bindings/iio/gyroscope/adi,adxrs290.yaml
 +
  ANALOG DEVICES INC ASOC CODEC DRIVERS
  M:    Lars-Peter Clausen <lars@metafoo.de>
  M:    Nuno Sá <nuno.sa@analog.com>
@@@ -1107,6 -1128,15 +1107,6 @@@ S:     Supporte
  W:    http://ez.analog.com/community/linux-device-drivers
  F:    drivers/dma/dma-axi-dmac.c
  
 -ANALOG DEVICES INC HMC425A DRIVER
 -M:    Beniamin Bia <beniamin.bia@analog.com>
 -M:    Michael Hennerich <michael.hennerich@analog.com>
 -L:    linux-iio@vger.kernel.org
 -S:    Supported
 -W:    http://ez.analog.com/community/linux-device-drivers
 -F:    Documentation/devicetree/bindings/iio/amplifiers/adi,hmc425a.yaml
 -F:    drivers/iio/amplifiers/hmc425a.c
 -
  ANALOG DEVICES INC IIO DRIVERS
  M:    Lars-Peter Clausen <lars@metafoo.de>
  M:    Michael Hennerich <Michael.Hennerich@analog.com>
@@@ -1115,11 -1145,8 +1115,11 @@@ W:    http://wiki.analog.com
  W:    http://ez.analog.com/community/linux-device-drivers
  F:    Documentation/ABI/testing/sysfs-bus-iio-frequency-ad9523
  F:    Documentation/ABI/testing/sysfs-bus-iio-frequency-adf4350
 +F:    Documentation/devicetree/bindings/iio/*/adi,*
 +F:    Documentation/devicetree/bindings/iio/dac/ad5758.txt
  F:    drivers/iio/*/ad*
  F:    drivers/iio/adc/ltc249*
 +F:    drivers/iio/amplifiers/hmc425a.c
  F:    drivers/staging/iio/*/ad*
  X:    drivers/iio/*/adjd*
  
@@@ -1259,7 -1286,7 +1259,7 @@@ S:      Supporte
  F:    Documentation/devicetree/bindings/net/apm-xgene-enet.txt
  F:    Documentation/devicetree/bindings/net/apm-xgene-mdio.txt
  F:    drivers/net/ethernet/apm/xgene/
 -F:    drivers/net/phy/mdio-xgene.c
 +F:    drivers/net/mdio/mdio-xgene.c
  
  APPLIED MICRO (APM) X-GENE SOC PMU
  M:    Khuong Dinh <khuong@os.amperecomputing.com>
@@@ -1433,11 -1460,6 +1433,11 @@@ S:    Odd Fixe
  F:    drivers/amba/
  F:    include/linux/amba/bus.h
  
 +ARM PRIMECELL CLCD PL110 DRIVER
 +M:    Russell King <linux@armlinux.org.uk>
 +S:    Odd Fixes
 +F:    drivers/video/fbdev/amba-clcd.*
 +
  ARM PRIMECELL KMI PL050 DRIVER
  M:    Russell King <linux@armlinux.org.uk>
  S:    Odd Fixes
@@@ -1484,7 -1506,8 +1484,7 @@@ L:      linux-arm-kernel@lists.infradead.or
  S:    Maintained
  F:    Documentation/devicetree/bindings/iommu/arm,smmu*
  F:    drivers/iommu/arm/
 -F:    drivers/iommu/io-pgtable-arm-v7s.c
 -F:    drivers/iommu/io-pgtable-arm.c
 +F:    drivers/iommu/io-pgtable-arm*
  
  ARM SUB-ARCHITECTURES
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@@ -1500,11 -1523,10 +1500,11 @@@ L:   linux-arm-kernel@lists.infradead.or
  S:    Maintained
  F:    Documentation/devicetree/bindings/arm/actions.yaml
  F:    Documentation/devicetree/bindings/clock/actions,owl-cmu.txt
 -F:    Documentation/devicetree/bindings/dma/owl-dma.txt
 +F:    Documentation/devicetree/bindings/dma/owl-dma.yaml
  F:    Documentation/devicetree/bindings/i2c/i2c-owl.txt
 +F:    Documentation/devicetree/bindings/interrupt-controller/actions,owl-sirq.yaml
  F:    Documentation/devicetree/bindings/mmc/owl-mmc.yaml
 -F:    Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt
 +F:    Documentation/devicetree/bindings/pinctrl/actions,*
  F:    Documentation/devicetree/bindings/power/actions,owl-sps.txt
  F:    Documentation/devicetree/bindings/timer/actions,owl-timer.txt
  F:    arch/arm/boot/dts/owl-*
@@@ -1514,7 -1536,6 +1514,7 @@@ F:      drivers/clk/actions
  F:    drivers/clocksource/timer-owl*
  F:    drivers/dma/owl-dma.c
  F:    drivers/i2c/busses/i2c-owl.c
 +F:    drivers/irqchip/irq-owl-sirq.c
  F:    drivers/mmc/host/owl-mmc.c
  F:    drivers/pinctrl/actions/*
  F:    drivers/soc/actions/
@@@ -1602,7 -1623,7 +1602,7 @@@ N:      meso
  
  ARM/Annapurna Labs ALPINE ARCHITECTURE
  M:    Tsahee Zidenberg <tsahee@annapurnalabs.com>
 -M:    Antoine Tenart <antoine.tenart@bootlin.com>
 +M:    Antoine Tenart <atenart@kernel.org>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  F:    arch/arm/boot/dts/alpine*
@@@ -1725,7 -1746,6 +1725,7 @@@ ARM/CORESIGHT FRAMEWORK AND DRIVER
  M:    Mathieu Poirier <mathieu.poirier@linaro.org>
  R:    Suzuki K Poulose <suzuki.poulose@arm.com>
  R:    Mike Leach <mike.leach@linaro.org>
 +L:    coresight@lists.linaro.org (moderated for non-subscribers)
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  F:    Documentation/ABI/testing/sysfs-bus-coresight-devices-*
@@@ -2115,7 -2135,6 +2115,7 @@@ M:      Steen Hegelund <Steen.Hegelund@micro
  M:    Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Supported
 +T:    git git://github.com/microchip-ung/linux-upstream.git
  F:    arch/arm64/boot/dts/microchip/
  N:    sparx5
  
@@@ -2131,7 -2150,9 +2131,7 @@@ L:      linux-arm-kernel@lists.infradead.or
  S:    Maintained
  W:    http://linux-chenxing.org/
  F:    Documentation/devicetree/bindings/arm/mstar/*
 -F:    arch/arm/boot/dts/infinity*.dtsi
 -F:    arch/arm/boot/dts/mercury*.dtsi
 -F:    arch/arm/boot/dts/mstar-v7.dtsi
 +F:    arch/arm/boot/dts/mstar-*
  F:    arch/arm/mach-mstar/
  
  ARM/NEC MOBILEPRO 900/c MACHINE SUPPORT
@@@ -2198,8 -2219,8 +2198,8 @@@ ARM/OPENMOKO NEO FREERUNNER (GTA02) MAC
  L:    openmoko-kernel@lists.openmoko.org (subscribers-only)
  S:    Orphan
  W:    http://wiki.openmoko.org/wiki/Neo_FreeRunner
 -F:    arch/arm/mach-s3c24xx/gta02.h
 -F:    arch/arm/mach-s3c24xx/mach-gta02.c
 +F:    arch/arm/mach-s3c/gta02.h
 +F:    arch/arm/mach-s3c/mach-gta02.c
  
  ARM/Orion SoC/Technologic Systems TS-78xx platform support
  M:    Alexander Clouter <alex@digriz.org.uk>
@@@ -2375,9 -2396,10 +2375,9 @@@ F:     sound/soc/rockchip
  N:    rockchip
  
  ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
 -M:    Kukjin Kim <kgene@kernel.org>
  M:    Krzysztof Kozlowski <krzk@kernel.org>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 -L:    linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 +L:    linux-samsung-soc@vger.kernel.org
  S:    Maintained
  Q:    https://patchwork.kernel.org/project/linux-samsung-soc/list/
  F:    Documentation/arm/samsung/
@@@ -2387,8 -2409,10 +2387,8 @@@ F:     arch/arm/boot/dts/exynos
  F:    arch/arm/boot/dts/s3c*
  F:    arch/arm/boot/dts/s5p*
  F:    arch/arm/mach-exynos*/
 -F:    arch/arm/mach-s3c24*/
 -F:    arch/arm/mach-s3c64xx/
 +F:    arch/arm/mach-s3c/
  F:    arch/arm/mach-s5p*/
 -F:    arch/arm/plat-samsung/
  F:    arch/arm64/boot/dts/exynos/
  F:    drivers/*/*/*s3c24*
  F:    drivers/*/*s3c24*
@@@ -2399,9 -2423,6 +2399,9 @@@ F:      drivers/soc/samsung
  F:    drivers/tty/serial/samsung*
  F:    include/linux/soc/samsung/
  N:    exynos
 +N:    s3c2410
 +N:    s3c64xx
 +N:    s5pv210
  
  ARM/SAMSUNG MOBILE MACHINE SUPPORT
  M:    Kyungmin Park <kyungmin.park@samsung.com>
@@@ -2420,11 -2441,11 +2420,11 @@@ F:   drivers/media/platform/s5p-g2d
  
  ARM/SAMSUNG S5P SERIES HDMI CEC SUBSYSTEM SUPPORT
  M:    Marek Szyprowski <m.szyprowski@samsung.com>
 -L:    linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 +L:    linux-samsung-soc@vger.kernel.org
  L:    linux-media@vger.kernel.org
  S:    Maintained
  F:    Documentation/devicetree/bindings/media/s5p-cec.txt
 -F:    drivers/media/platform/s5p-cec/
 +F:    drivers/media/cec/platform/s5p/
  
  ARM/SAMSUNG S5P SERIES JPEG CODEC SUPPORT
  M:    Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
@@@ -2483,7 -2504,7 +2483,7 @@@ S:      Maintaine
  F:    drivers/clk/socfpga/
  
  ARM/SOCFPGA EDAC SUPPORT
 -M:    Thor Thayer <thor.thayer@linux.intel.com>
 +M:    Dinh Nguyen <dinguyen@kernel.org>
  S:    Maintained
  F:    drivers/edac/altera_edac.
  
@@@ -2569,7 -2590,7 +2569,7 @@@ L:      linux-tegra@vger.kernel.or
  L:    linux-media@vger.kernel.org
  S:    Maintained
  F:    Documentation/devicetree/bindings/media/tegra-cec.txt
 -F:    drivers/media/platform/tegra-cec/
 +F:    drivers/media/cec/platform/tegra/
  
  ARM/TETON BGA MACHINE SUPPORT
  M:    "Mark F. Brown" <mark.brown314@gmail.com>
@@@ -2614,7 -2635,7 +2614,7 @@@ M:      Tero Kristo <t-kristo@ti.com
  M:    Nishanth Menon <nm@ti.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Supported
 -F:    Documentation/devicetree/bindings/arm/ti/k3.txt
 +F:    Documentation/devicetree/bindings/arm/ti/k3.yaml
  F:    arch/arm64/boot/dts/ti/Makefile
  F:    arch/arm64/boot/dts/ti/k3-*
  F:    include/dt-bindings/pinctrl/k3.h
@@@ -2629,20 -2650,11 +2629,20 @@@ M:   Dmitry Eremin-Solenikov <dbaryshkov@
  M:    Dirk Opfer <dirk@opfer-online.de>
  S:    Maintained
  
 +ARM/TOSHIBA VISCONTI ARCHITECTURE
 +M:    Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
 +L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 +S:    Supported
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/iwamatsu/linux-visconti.git
 +F:    Documentation/devicetree/bindings/arm/toshiba.yaml
 +F:    Documentation/devicetree/bindings/pinctrl/toshiba,tmpv7700-pinctrl.yaml
 +F:    arch/arm64/boot/dts/toshiba/
 +F:    drivers/pinctrl/visconti/
 +N:    visconti
 +
  ARM/UNIPHIER ARCHITECTURE
 -M:    Masahiro Yamada <yamada.masahiro@socionext.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 -S:    Maintained
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-uniphier.git
 +S:    Orphan
  F:    Documentation/devicetree/bindings/arm/socionext/uniphier.yaml
  F:    Documentation/devicetree/bindings/gpio/socionext,uniphier-gpio.yaml
  F:    Documentation/devicetree/bindings/pinctrl/socionext,uniphier-pinctrl.yaml
@@@ -3246,14 -3258,13 +3246,14 @@@ M:   Daniel Borkmann <daniel@iogearbox.ne
  R:    Martin KaFai Lau <kafai@fb.com>
  R:    Song Liu <songliubraving@fb.com>
  R:    Yonghong Song <yhs@fb.com>
 -R:    Andrii Nakryiko <andriin@fb.com>
 +R:    Andrii Nakryiko <andrii@kernel.org>
  R:    John Fastabend <john.fastabend@gmail.com>
  R:    KP Singh <kpsingh@chromium.org>
  L:    netdev@vger.kernel.org
  L:    bpf@vger.kernel.org
  S:    Supported
 -Q:    https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147
 +W:    https://bpf.io/
 +Q:    https://patchwork.kernel.org/project/netdevbpf/list/?delegate=121173
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git
  F:    Documentation/bpf/
@@@ -3423,7 -3434,7 +3423,7 @@@ M:      bcm-kernel-feedback-list@broadcom.co
  L:    linux-arm-kernel@lists.infradead.org
  S:    Maintained
  F:    arch/arm/boot/dts/bcm470*
 -F:    arch/arm/boot/dts/bcm5301x*.dtsi
 +F:    arch/arm/boot/dts/bcm5301*
  F:    arch/arm/boot/dts/bcm953012*
  F:    arch/arm/mach-bcm/bcm_5301x.c
  
@@@ -3464,14 -3475,6 +3464,14 @@@ F:    drivers/bus/brcmstb_gisb.
  F:    drivers/pci/controller/pcie-brcmstb.c
  N:    brcmstb
  
 +BROADCOM BDC DRIVER
 +M:    Al Cooper <alcooperx@gmail.com>
 +L:    linux-usb@vger.kernel.org
 +L:    bcm-kernel-feedback-list@broadcom.com
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/usb/brcm,bdc.txt
 +F:    drivers/usb/gadget/udc/bdc/
 +
  BROADCOM BMIPS CPUFREQ DRIVER
  M:    Markus Mayer <mmayer@broadcom.com>
  M:    bcm-kernel-feedback-list@broadcom.com
@@@ -3489,7 -3492,6 +3489,7 @@@ F:      arch/mips/bmips/
  F:    arch/mips/boot/dts/brcm/bcm*.dts*
  F:    arch/mips/include/asm/mach-bmips/*
  F:    arch/mips/kernel/*bmips*
 +F:    drivers/soc/bcm/bcm63xx
  F:    drivers/irqchip/irq-bcm63*
  F:    drivers/irqchip/irq-bcm7*
  F:    drivers/irqchip/irq-brcmstb*
@@@ -3505,17 -3507,13 +3505,17 @@@ F:   drivers/net/ethernet/broadcom/bnx2.
  F:    drivers/net/ethernet/broadcom/bnx2_*
  
  BROADCOM BNX2FC 10 GIGABIT FCOE DRIVER
 -M:    QLogic-Storage-Upstream@qlogic.com
 +M:    Saurav Kashyap <skashyap@marvell.com>
 +M:    Javed Hasan <jhasan@marvell.com>
 +M:    GR-QLogic-Storage-Upstream@marvell.com
  L:    linux-scsi@vger.kernel.org
  S:    Supported
  F:    drivers/scsi/bnx2fc/
  
  BROADCOM BNX2I 1/10 GIGABIT iSCSI DRIVER
 -M:    QLogic-Storage-Upstream@qlogic.com
 +M:    Nilesh Javali <njavali@marvell.com>
 +M:    Manish Rangankar <mrangankar@marvell.com>
 +M:    GR-QLogic-Storage-Upstream@marvell.com
  L:    linux-scsi@vger.kernel.org
  S:    Supported
  F:    drivers/scsi/bnx2i/
@@@ -3850,16 -3848,6 +3850,16 @@@ S:    Orpha
  F:    Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
  F:    drivers/mtd/nand/raw/cadence-nand-controller.c
  
 +CADENCE USB3 DRD IP DRIVER
 +M:    Peter Chen <peter.chen@nxp.com>
 +M:    Pawel Laszczak <pawell@cadence.com>
 +M:    Roger Quadros <rogerq@ti.com>
 +L:    linux-usb@vger.kernel.org
 +S:    Maintained
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
 +F:    Documentation/devicetree/bindings/usb/cdns-usb3.txt
 +F:    drivers/usb/cdns3/
 +
  CADET FM/AM RADIO RECEIVER DRIVER
  M:    Hans Verkuil <hverkuil@xs4all.nl>
  L:    linux-media@vger.kernel.org
@@@ -3924,7 -3912,6 +3924,7 @@@ F:      include/net/netns/can.
  F:    include/uapi/linux/can.h
  F:    include/uapi/linux/can/bcm.h
  F:    include/uapi/linux/can/gw.h
 +F:    include/uapi/linux/can/isotp.h
  F:    include/uapi/linux/can/raw.h
  F:    net/can/
  
@@@ -4043,7 -4030,7 +4043,7 @@@ S:      Supporte
  W:    http://linuxtv.org
  T:    git git://linuxtv.org/media_tree.git
  F:    Documentation/devicetree/bindings/media/cec-gpio.txt
 -F:    drivers/media/platform/cec-gpio/
 +F:    drivers/media/cec/platform/cec-gpio/
  
  CELL BROADBAND ENGINE ARCHITECTURE
  M:    Arnd Bergmann <arnd@arndb.de>
@@@ -4113,11 -4100,6 +4113,11 @@@ T:    git git://git.kernel.org/pub/scm/lin
  F:    drivers/char/
  F:    drivers/misc/
  F:    include/linux/miscdevice.h
 +X:    drivers/char/agp/
 +X:    drivers/char/hw_random/
 +X:    drivers/char/ipmi/
 +X:    drivers/char/random.c
 +X:    drivers/char/tpm/
  
  CHECKPATCH
  M:    Andy Whitcroft <apw@canonical.com>
@@@ -4188,7 -4170,6 +4188,7 @@@ CIRRUS LOGIC AUDIO CODEC DRIVER
  M:    James Schulman <james.schulman@cirrus.com>
  M:    David Rhodes <david.rhodes@cirrus.com>
  L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
 +L:    patches@opensource.cirrus.com
  S:    Maintained
  F:    sound/soc/codecs/cs*
  
@@@ -4266,6 -4247,7 +4266,6 @@@ F:      drivers/net/ethernet/cisco/enic
  CISCO VIC LOW LATENCY NIC DRIVER
  M:    Christian Benvenuti <benve@cisco.com>
  M:    Nelson Escobar <neescoba@cisco.com>
 -M:    Parvi Kaustubhi <pkaustub@cisco.com>
  S:    Supported
  F:    drivers/infiniband/hw/usnic/
  
@@@ -4283,7 -4265,6 +4283,7 @@@ W:      https://clangbuiltlinux.github.io
  B:    https://github.com/ClangBuiltLinux/linux/issues
  C:    irc://chat.freenode.net/clangbuiltlinux
  F:    Documentation/kbuild/llvm.rst
 +F:    scripts/clang-tools/
  K:    \b(?i:clang|llvm)\b
  
  CLEANCACHE API
@@@ -4426,7 -4407,12 +4426,7 @@@ S:     Supporte
  T:    git git://git.infradead.org/users/hch/configfs.git
  F:    fs/configfs/
  F:    include/linux/configfs.h
 -
 -CONNECTOR
 -M:    Evgeniy Polyakov <zbr@ioremap.net>
 -L:    netdev@vger.kernel.org
 -S:    Maintained
 -F:    drivers/connector/
 +F:    samples/configfs/
  
  CONSOLE SUBSYSTEM
  M:    Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@@ -4594,14 -4580,6 +4594,14 @@@ L:    linux-arm-kernel@lists.infradead.or
  S:    Supported
  F:    drivers/cpuidle/cpuidle-psci.c
  
 +CPUIDLE DRIVER - ARM PSCI PM DOMAIN
 +M:    Ulf Hansson <ulf.hansson@linaro.org>
 +L:    linux-pm@vger.kernel.org
 +L:    linux-arm-kernel@lists.infradead.org
 +S:    Supported
 +F:    drivers/cpuidle/cpuidle-psci.h
 +F:    drivers/cpuidle/cpuidle-psci-domain.c
 +
  CRAMFS FILESYSTEM
  M:    Nicolas Pitre <nico@fluxnic.net>
  S:    Maintained
@@@ -4732,15 -4710,6 +4732,15 @@@ S:    Supporte
  W:    http://www.chelsio.com
  F:    drivers/crypto/chelsio
  
 +CXGB4 INLINE CRYPTO DRIVER
 +M:    Ayush Sawal <ayush.sawal@chelsio.com>
 +M:    Vinay Kumar Yadav <vinay.yadav@chelsio.com>
 +M:    Rohit Maheshwari <rohitm@chelsio.com>
 +L:    netdev@vger.kernel.org
 +S:    Supported
 +W:    http://www.chelsio.com
 +F:    drivers/net/ethernet/chelsio/inline_crypto/
 +
  CXGB4 ETHERNET DRIVER (CXGB4)
  M:    Vishal Kulkarni <vishal@chelsio.com>
  L:    netdev@vger.kernel.org
@@@ -5003,8 -4972,9 +5003,8 @@@ T:      git git://linuxtv.org/media_tree.gi
  F:    drivers/media/platform/sti/delta
  
  DENALI NAND DRIVER
 -M:    Masahiro Yamada <yamada.masahiro@socionext.com>
  L:    linux-mtd@lists.infradead.org
 -S:    Supported
 +S:    Orphan
  F:    drivers/mtd/nand/raw/denali*
  
  DESIGNWARE EDMA CORE IP DRIVER
@@@ -5042,12 -5012,6 +5042,12 @@@ S:    Maintaine
  F:    drivers/base/devcoredump.c
  F:    include/linux/devcoredump.h
  
 +DEVICE DEPENDENCY HELPER SCRIPT
 +M:    Saravana Kannan <saravanak@google.com>
 +L:    linux-kernel@vger.kernel.org
 +S:    Maintained
 +F:    scripts/dev-needs.sh
 +
  DEVICE DIRECT ACCESS (DAX)
  M:    Dan Williams <dan.j.williams@intel.com>
  M:    Vishal Verma <vishal.l.verma@intel.com>
@@@ -5244,11 -5208,12 +5244,11 @@@ T:   git git://git.infradead.org/users/hc
  F:    include/asm-generic/dma-mapping.h
  F:    include/linux/dma-direct.h
  F:    include/linux/dma-mapping.h
 -F:    include/linux/dma-noncoherent.h
 +F:    include/linux/dma-map-ops.h
  F:    kernel/dma/
  
  DMA-BUF HEAPS FRAMEWORK
  M:    Sumit Semwal <sumit.semwal@linaro.org>
 -R:    Andrew F. Davis <afd@ti.com>
  R:    Benjamin Gaignard <benjamin.gaignard@linaro.org>
  R:    Liam Mark <lmark@codeaurora.org>
  R:    Laura Abbott <labbott@redhat.com>
@@@ -5414,11 -5379,12 +5414,11 @@@ F:   include/linux/debugfs.
  F:    include/linux/kobj*
  F:    lib/kobj*
  
 -DRIVERS FOR ADAPTIVE VOLTAGE SCALING (AVS)
 -M:    Kevin Hilman <khilman@kernel.org>
 +DRIVERS FOR OMAP ADAPTIVE VOLTAGE SCALING (AVS)
  M:    Nishanth Menon <nm@ti.com>
  L:    linux-pm@vger.kernel.org
  S:    Maintained
 -F:    drivers/power/avs/
 +F:    drivers/soc/ti/smartreflex.c
  F:    include/linux/power/smartreflex.h
  
  DRM DRIVER FOR ALLWINNER DE2 AND DE3 ENGINE
@@@ -5576,6 -5542,13 +5576,13 @@@ T:    git git://anongit.freedesktop.org/dr
  F:    Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml
  F:    drivers/gpu/drm/panel/panel-novatek-nt35510.c
  
+ DRM DRIVER FOR NOVATEK NT36672A PANELS
+ M:    Sumit Semwal <sumit.semwal@linaro.org>
+ S:    Maintained
+ T:    git git://anongit.freedesktop.org/drm/drm-misc
+ F:    Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
+ F:    drivers/gpu/drm/panel/panel-novatek-nt36672a.c
  DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
  M:    Ben Skeggs <bskeggs@redhat.com>
  L:    dri-devel@lists.freedesktop.org
@@@ -5628,13 -5601,12 +5635,13 @@@ S:   Maintaine
  F:    Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml
  F:    drivers/gpu/drm/panel/panel-raydium-rm67191.c
  
 -DRM DRIVER FOR ROCKTECH JH057N00900 PANELS
 +DRM DRIVER FOR SITRONIX ST7703 PANELS
  M:    Guido Günther <agx@sigxcpu.org>
  R:    Purism Kernel Team <kernel@puri.sm>
 +R:    Ondrej Jirman <megous@megous.com>
  S:    Maintained
 -F:    Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.txt
 -F:    drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
 +F:    Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.yaml
 +F:    drivers/gpu/drm/panel/panel-sitronix-st7703.c
  
  DRM DRIVER FOR SAVAGE VIDEO CARDS
  S:    Orphan / Obsolete
@@@ -5862,7 -5834,6 +5869,7 @@@ L:      dri-devel@lists.freedesktop.or
  S:    Supported
  F:    Documentation/devicetree/bindings/display/mediatek/
  F:    drivers/gpu/drm/mediatek/
 +F:    drivers/phy/mediatek/phy-mtk-hdmi*
  
  DRM DRIVERS FOR NVIDIA TEGRA
  M:    Thierry Reding <thierry.reding@gmail.com>
@@@ -5884,7 -5855,7 +5891,7 @@@ L:      linux-renesas-soc@vger.kernel.or
  S:    Supported
  T:    git git://linuxtv.org/pinchartl/media drm/du/next
  F:    Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt
 -F:    Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
 +F:    Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml
  F:    Documentation/devicetree/bindings/display/renesas,du.txt
  F:    drivers/gpu/drm/rcar-du/
  F:    drivers/gpu/drm/shmobile/
@@@ -5955,6 -5926,7 +5962,7 @@@ F:      include/uapi/drm/v3d_drm.
  
  DRM DRIVERS FOR VC4
  M:    Eric Anholt <eric@anholt.net>
+ M:    Maxime Ripard <mripard@kernel.org>
  S:    Supported
  T:    git git://github.com/anholt/linux
  T:    git git://anongit.freedesktop.org/drm/drm-misc
@@@ -6231,12 -6203,12 +6239,12 @@@ F:   Documentation/devicetree/bindings/ed
  F:    drivers/edac/aspeed_edac.c
  
  EDAC-BLUEFIELD
 -M:    Shravan Kumar Ramani <sramani@nvidia.com>
 +M:    Shravan Kumar Ramani <shravankr@nvidia.com>
  S:    Supported
  F:    drivers/edac/bluefield_edac.c
  
  EDAC-CALXEDA
 -M:    Robert Richter <rric@kernel.org>
 +M:    Andre Przywara <andre.przywara@arm.com>
  L:    linux-edac@vger.kernel.org
  S:    Maintained
  F:    drivers/edac/highbank*
@@@ -6572,14 -6544,11 +6580,14 @@@ F:   Documentation/devicetree/bindings/ne
  F:    Documentation/devicetree/bindings/net/mdio*
  F:    Documentation/devicetree/bindings/net/qca,ar803x.yaml
  F:    Documentation/networking/phy.rst
 +F:    drivers/net/mdio/
 +F:    drivers/net/mdio/of_mdio.c
 +F:    drivers/net/pcs/
  F:    drivers/net/phy/
 -F:    drivers/of/of_mdio.c
  F:    drivers/of/of_net.c
  F:    include/dt-bindings/net/qca-ar803x.h
  F:    include/linux/*mdio*.h
 +F:    include/linux/mdio/*.h
  F:    include/linux/of_net.h
  F:    include/linux/phy.h
  F:    include/linux/phy_fixed.h
@@@ -6655,7 -6624,6 +6663,7 @@@ F:      fs/proc/bootconfig.
  F:    include/linux/bootconfig.h
  F:    lib/bootconfig.c
  F:    tools/bootconfig/*
 +F:    tools/bootconfig/scripts/*
  
  EXYNOS DP DRIVER
  M:    Jingoo Han <jingoohan1@gmail.com>
@@@ -6669,6 -6637,13 +6677,6 @@@ L:     iommu@lists.linux-foundation.or
  S:    Maintained
  F:    drivers/iommu/exynos-iommu.c
  
 -EZchip NPS platform support
 -M:    Vineet Gupta <vgupta@synopsys.com>
 -M:    Ofer Levi <oferle@nvidia.com>
 -S:    Supported
 -F:    arch/arc/boot/dts/eznps.dts
 -F:    arch/arc/plat-eznps
 -
  F2FS FILE SYSTEM
  M:    Jaegeuk Kim <jaegeuk@kernel.org>
  M:    Chao Yu <yuchao0@huawei.com>
@@@ -6876,17 -6851,14 +6884,17 @@@ F:   drivers/net/ethernet/nvidia/
  
  FPGA DFL DRIVERS
  M:    Wu Hao <hao.wu@intel.com>
 +R:    Tom Rix <trix@redhat.com>
  L:    linux-fpga@vger.kernel.org
  S:    Maintained
 +F:    Documentation/ABI/testing/sysfs-bus-dfl
  F:    Documentation/fpga/dfl.rst
  F:    drivers/fpga/dfl*
  F:    include/uapi/linux/fpga-dfl.h
  
  FPGA MANAGER FRAMEWORK
  M:    Moritz Fischer <mdf@kernel.org>
 +R:    Tom Rix <trix@redhat.com>
  L:    linux-fpga@vger.kernel.org
  S:    Maintained
  W:    http://www.rocketboards.org
@@@ -6911,10 -6883,9 +6919,9 @@@ F:     drivers/net/wan/dlci.
  F:    drivers/net/wan/sdla.c
  
  FRAMEBUFFER LAYER
- M:    Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
  L:    dri-devel@lists.freedesktop.org
  L:    linux-fbdev@vger.kernel.org
- S:    Maintained
+ S:    Orphan
  Q:    http://patchwork.kernel.org/project/linux-fbdev/list/
  T:    git git://anongit.freedesktop.org/drm/drm-misc
  F:    Documentation/fb/
@@@ -7009,7 -6980,7 +7016,7 @@@ M:      Frank Li <Frank.li@nxp.com
  L:    linux-arm-kernel@lists.infradead.org
  S:    Maintained
  F:    Documentation/admin-guide/perf/imx-ddr.rst
 -F:    Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt
 +F:    Documentation/devicetree/bindings/perf/fsl-imx-ddr.yaml
  F:    drivers/perf/fsl_imx8_ddr_perf.c
  
  FREESCALE IMX I2C DRIVER
@@@ -7017,7 -6988,7 +7024,7 @@@ M:      Oleksij Rempel <o.rempel@pengutronix
  R:    Pengutronix Kernel Team <kernel@pengutronix.de>
  L:    linux-i2c@vger.kernel.org
  S:    Maintained
 -F:    Documentation/devicetree/bindings/i2c/i2c-imx.txt
 +F:    Documentation/devicetree/bindings/i2c/i2c-imx.yaml
  F:    drivers/i2c/busses/i2c-imx.c
  
  FREESCALE IMX LPI2C DRIVER
@@@ -7025,7 -6996,7 +7032,7 @@@ M:      Dong Aisheng <aisheng.dong@nxp.com
  L:    linux-i2c@vger.kernel.org
  L:    linux-imx@nxp.com
  S:    Maintained
 -F:    Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
 +F:    Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml
  F:    drivers/i2c/busses/i2c-imx-lpi2c.c
  
  FREESCALE QORIQ DPAA ETHERNET DRIVER
@@@ -7248,7 -7219,7 +7255,7 @@@ FUSE: FILESYSTEM IN USERSPAC
  M:    Miklos Szeredi <miklos@szeredi.hu>
  L:    linux-fsdevel@vger.kernel.org
  S:    Maintained
 -W:    http://fuse.sourceforge.net/
 +W:    https://github.com/libfuse/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse.git
  F:    Documentation/filesystems/fuse.rst
  F:    fs/fuse/
@@@ -7292,7 -7263,7 +7299,7 @@@ F:      drivers/staging/gasket
  GCC PLUGINS
  M:    Kees Cook <keescook@chromium.org>
  R:    Emese Revfy <re.emese@gmail.com>
 -L:    kernel-hardening@lists.openwall.com
 +L:    linux-hardening@vger.kernel.org
  S:    Maintained
  F:    Documentation/kbuild/gcc-plugins.rst
  F:    scripts/Makefile.gcc-plugins
@@@ -7792,8 -7763,8 +7799,8 @@@ F:      Documentation/watchdog/hpwdt.rs
  F:    drivers/watchdog/hpwdt.c
  
  HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa)
 -M:    Don Brace <don.brace@microsemi.com>
 -L:    esc.storagedev@microsemi.com
 +M:    Don Brace <don.brace@microchip.com>
 +L:    storagedev@microchip.com
  L:    linux-scsi@vger.kernel.org
  S:    Supported
  F:    Documentation/scsi/hpsa.rst
@@@ -7802,8 -7773,8 +7809,8 @@@ F:      include/linux/cciss*.
  F:    include/uapi/linux/cciss*.h
  
  HFI1 DRIVER
 -M:    Mike Marciniszyn <mike.marciniszyn@intel.com>
 -M:    Dennis Dalessandro <dennis.dalessandro@intel.com>
 +M:    Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
 +M:    Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
  L:    linux-rdma@vger.kernel.org
  S:    Supported
  F:    drivers/infiniband/hw/hfi1
@@@ -7936,13 -7907,6 +7943,13 @@@ W:    http://www.hisilicon.co
  F:    Documentation/devicetree/bindings/net/hisilicon*.txt
  F:    drivers/net/ethernet/hisilicon/
  
 +HIKEY960 ONBOARD USB GPIO HUB DRIVER
 +M:    John Stultz <john.stultz@linaro.org>
 +L:    linux-kernel@vger.kernel.org
 +S:    Maintained
 +F:    drivers/misc/hisi_hikey_usb.c
 +F:    Documentation/devicetree/bindings/misc/hisilicon-hikey-usb.yaml
 +
  HISILICON PMU DRIVER
  M:    Shaokun Zhang <zhangshaokun@hisilicon.com>
  S:    Supported
@@@ -7986,12 -7950,6 +7993,12 @@@ F:    drivers/crypto/hisilicon/sec2/sec_cr
  F:    drivers/crypto/hisilicon/sec2/sec_crypto.h
  F:    drivers/crypto/hisilicon/sec2/sec_main.c
  
 +HISILICON STAGING DRIVERS FOR HIKEY 960/970
 +M:    Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
 +L:    devel@driverdev.osuosl.org
 +S:    Maintained
 +F:    drivers/staging/hikey9xx/
 +
  HISILICON TRUE RANDOM NUMBER GENERATOR V2 SUPPORT
  M:    Zaibo Xu <xuzaibo@huawei.com>
  S:    Maintained
@@@ -8393,9 -8351,8 +8400,9 @@@ S:      Supporte
  F:    drivers/pci/hotplug/rpaphp*
  
  IBM Power SRIOV Virtual NIC Device Driver
 -M:    Thomas Falcon <tlfalcon@linux.ibm.com>
 -M:    John Allen <jallen@linux.ibm.com>
 +M:    Dany Madden <drt@linux.ibm.com>
 +M:    Lijun Pan <ljp@linux.ibm.com>
 +M:    Sukadev Bhattiprolu <sukadev@linux.ibm.com>
  L:    netdev@vger.kernel.org
  S:    Supported
  F:    drivers/net/ethernet/ibm/ibmvnic.*
@@@ -8409,7 -8366,7 +8416,7 @@@ F:      arch/powerpc/platforms/powernv/copy-
  F:    arch/powerpc/platforms/powernv/vas*
  
  IBM Power Virtual Ethernet Device Driver
 -M:    Thomas Falcon <tlfalcon@linux.ibm.com>
 +M:    Cristobal Forno <cforno12@linux.ibm.com>
  L:    netdev@vger.kernel.org
  S:    Supported
  F:    drivers/net/ethernet/ibm/ibmveth.*
@@@ -8569,6 -8526,7 +8576,6 @@@ F:      drivers/iio/multiplexer/iio-mux.
  
  IIO SUBSYSTEM AND DRIVERS
  M:    Jonathan Cameron <jic23@kernel.org>
 -R:    Hartmut Knaack <knaack.h@gmx.de>
  R:    Lars-Peter Clausen <lars@metafoo.de>
  R:    Peter Meerwald-Stadler <pmeerw@pmeerw.net>
  L:    linux-iio@vger.kernel.org
@@@ -8684,9 -8642,8 +8691,9 @@@ INGENIC JZ47xx SoC
  M:    Paul Cercueil <paul@crapouillou.net>
  S:    Maintained
  F:    arch/mips/boot/dts/ingenic/
 -F:    arch/mips/include/asm/mach-jz4740/
 -F:    arch/mips/jz4740/
 +F:    arch/mips/generic/board-ingenic.c
 +F:    arch/mips/include/asm/mach-ingenic/
 +F:    arch/mips/ingenic/Kconfig
  F:    drivers/clk/ingenic/
  F:    drivers/dma/dma-jz4780.c
  F:    drivers/gpu/drm/ingenic/
@@@ -8743,7 -8700,7 +8750,7 @@@ F:      drivers/input/input-mt.
  K:    \b(ABS|SYN)_MT_
  
  INSIDE SECURE CRYPTO DRIVER
 -M:    Antoine Tenart <antoine.tenart@bootlin.com>
 +M:    Antoine Tenart <atenart@kernel.org>
  L:    linux-crypto@vger.kernel.org
  S:    Maintained
  F:    drivers/crypto/inside-secure/
@@@ -8822,8 -8779,7 +8829,8 @@@ F:      include/drm/i915
  F:    include/uapi/drm/i915_drm.h
  
  INTEL ETHERNET DRIVERS
 -M:    Jeff Kirsher <jeffrey.t.kirsher@intel.com>
 +M:    Jesse Brandeburg <jesse.brandeburg@intel.com>
 +M:    Tony Nguyen <anthony.l.nguyen@intel.com>
  L:    intel-wired-lan@lists.osuosl.org (moderated for non-subscribers)
  S:    Supported
  W:    http://www.intel.com/support/feedback.htm
@@@ -8922,7 -8878,7 +8929,7 @@@ INTEL IPU3 CSI-2 CIO2 DRIVE
  M:    Yong Zhi <yong.zhi@intel.com>
  M:    Sakari Ailus <sakari.ailus@linux.intel.com>
  M:    Bingbu Cao <bingbu.cao@intel.com>
 -R:    Tian Shu Qiu <tian.shu.qiu@intel.com>
 +R:    Tianshu Qiu <tian.shu.qiu@intel.com>
  L:    linux-media@vger.kernel.org
  S:    Maintained
  F:    Documentation/userspace-api/media/v4l/pixfmt-srggb10-ipu3.rst
@@@ -8931,7 -8887,7 +8938,7 @@@ F:      drivers/media/pci/intel/ipu3
  INTEL IPU3 CSI-2 IMGU DRIVER
  M:    Sakari Ailus <sakari.ailus@linux.intel.com>
  R:    Bingbu Cao <bingbu.cao@intel.com>
 -R:    Tian Shu Qiu <tian.shu.qiu@intel.com>
 +R:    Tianshu Qiu <tian.shu.qiu@intel.com>
  L:    linux-media@vger.kernel.org
  S:    Maintained
  F:    Documentation/admin-guide/media/ipu3.rst
@@@ -8972,6 -8928,22 +8979,6 @@@ S:     Supporte
  W:    https://01.org/linux-acpi
  F:    drivers/platform/x86/intel_menlow.c
  
 -INTEL MIC DRIVERS (mic)
 -M:    Sudeep Dutt <sudeep.dutt@intel.com>
 -M:    Ashutosh Dixit <ashutosh.dixit@intel.com>
 -S:    Supported
 -W:    https://github.com/sudeepdutt/mic
 -W:    http://software.intel.com/en-us/mic-developer
 -F:    Documentation/misc-devices/mic/
 -F:    drivers/dma/mic_x100_dma.c
 -F:    drivers/dma/mic_x100_dma.h
 -F:    drivers/misc/mic/
 -F:    include/linux/mic_bus.h
 -F:    include/linux/scif.h
 -F:    include/uapi/linux/mic_common.h
 -F:    include/uapi/linux/mic_ioctl.h
 -F:    include/uapi/linux/scif_ioctl.h
 -
  INTEL P-Unit IPC DRIVER
  M:    Zha Qipeng <qipeng.zha@intel.com>
  L:    platform-driver-x86@vger.kernel.org
@@@ -8980,8 -8952,8 +8987,8 @@@ F:      arch/x86/include/asm/intel_punit_ipc
  F:    drivers/platform/x86/intel_punit_ipc.c
  
  INTEL PMC CORE DRIVER
 -M:    Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
 -M:    Vishwanath Somayaji <vishwanath.somayaji@intel.com>
 +M:    Rajneesh Bhardwaj <irenic.rajneesh@gmail.com>
 +M:    David E Box <david.e.box@intel.com>
  L:    platform-driver-x86@vger.kernel.org
  S:    Maintained
  F:    drivers/platform/x86/intel_pmc_core*
@@@ -8994,7 -8966,7 +9001,7 @@@ F:      drivers/gpio/gpio-*cove.
  F:    drivers/gpio/gpio-msic.c
  
  INTEL PMIC MULTIFUNCTION DEVICE DRIVERS
 -R:    Andy Shevchenko <andriy.shevchenko@linux.intel.com>
 +M:    Andy Shevchenko <andy@kernel.org>
  S:    Maintained
  F:    drivers/mfd/intel_msic.c
  F:    drivers/mfd/intel_soc_pmic*
@@@ -9175,7 -9147,6 +9182,7 @@@ L:      iommu@lists.linux-foundation.or
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
  F:    Documentation/devicetree/bindings/iommu/
 +F:    Documentation/userspace-api/iommu.rst
  F:    drivers/iommu/
  F:    include/linux/iommu.h
  F:    include/linux/iova.h
@@@ -9302,7 -9273,7 +9309,7 @@@ F:      drivers/firmware/iscsi_ibft
  
  ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
  M:    Sagi Grimberg <sagi@grimberg.me>
 -M:    Max Gurtovoy <maxg@nvidia.com>
 +M:    Max Gurtovoy <mgurtovoy@nvidia.com>
  L:    linux-rdma@vger.kernel.org
  S:    Supported
  W:    http://www.openfabrics.org
@@@ -9558,7 -9529,6 +9565,7 @@@ F:      include/linux/sunrpc
  F:    include/uapi/linux/nfsd/
  F:    include/uapi/linux/sunrpc/
  F:    net/sunrpc/
 +F:    Documentation/filesystems/nfs/
  
  KERNEL SELFTEST FRAMEWORK
  M:    Shuah Khan <shuah@kernel.org>
@@@ -9710,7 -9680,7 +9717,7 @@@ F:      security/keys/encrypted-keys
  
  KEYS-TRUSTED
  M:    James Bottomley <jejb@linux.ibm.com>
 -M:    Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
 +M:    Jarkko Sakkinen <jarkko@kernel.org>
  M:    Mimi Zohar <zohar@linux.ibm.com>
  L:    linux-integrity@vger.kernel.org
  L:    keyrings@vger.kernel.org
@@@ -9722,7 -9692,7 +9729,7 @@@ F:      security/keys/trusted-keys
  
  KEYS/KEYRINGS
  M:    David Howells <dhowells@redhat.com>
 -M:    Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
 +M:    Jarkko Sakkinen <jarkko@kernel.org>
  L:    keyrings@vger.kernel.org
  S:    Maintained
  F:    Documentation/security/keys/core.rst
@@@ -9769,8 -9739,8 +9776,8 @@@ M:      Catalin Marinas <catalin.marinas@arm
  S:    Maintained
  F:    Documentation/dev-tools/kmemleak.rst
  F:    include/linux/kmemleak.h
 -F:    mm/kmemleak-test.c
  F:    mm/kmemleak.c
 +F:    samples/kmemleak/kmemleak-test.c
  
  KMOD KERNEL MODULE LOADER - USERMODE HELPER
  M:    Luis Chamberlain <mcgrof@kernel.org>
@@@ -9799,12 -9769,6 +9806,12 @@@ F:    Documentation/admin-guide/auxdisplay
  F:    drivers/auxdisplay/ks0108.c
  F:    include/linux/ks0108.h
  
 +KTD253 BACKLIGHT DRIVER
 +M:    Linus Walleij <linus.walleij@linaro.org>
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/leds/backlight/kinetic,ktd253.yaml
 +F:    drivers/video/backlight/ktd253-backlight.c
 +
  L3MDEV
  M:    David Ahern <dsahern@kernel.org>
  L:    netdev@vger.kernel.org
@@@ -9859,7 -9823,7 +9866,7 @@@ F:      drivers/scsi/53c700
  LEAKING_ADDRESSES
  M:    Tobin C. Harding <me@tobin.cc>
  M:    Tycho Andersen <tycho@tycho.pizza>
 -L:    kernel-hardening@lists.openwall.com
 +L:    linux-hardening@vger.kernel.org
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tobin/leaks.git
  F:    scripts/leaking_addresses.pl
@@@ -9930,6 -9894,15 +9937,6 @@@ T:     git git://git.kernel.org/pub/scm/lin
  F:    drivers/ata/pata_arasan_cf.c
  F:    include/linux/pata_arasan_cf_data.h
  
 -LIBATA PATA DRIVERS
 -M:    Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 -M:    Jens Axboe <axboe@kernel.dk>
 -L:    linux-ide@vger.kernel.org
 -S:    Maintained
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
 -F:    drivers/ata/ata_generic.c
 -F:    drivers/ata/pata_*.c
 -
  LIBATA PATA FARADAY FTIDE010 AND GEMINI SATA BRIDGE DRIVERS
  M:    Linus Walleij <linus.walleij@linaro.org>
  L:    linux-ide@vger.kernel.org
@@@ -10355,13 -10328,6 +10362,13 @@@ S: Maintaine
  W:    http://linux-test-project.github.io/
  T:    git git://github.com/linux-test-project/ltp.git
  
 +LYNX PCS MODULE
 +M:    Ioana Ciornei <ioana.ciornei@nxp.com>
 +L:    netdev@vger.kernel.org
 +S:    Supported
 +F:    drivers/net/pcs/pcs-lynx.c
 +F:    include/linux/pcs-lynx.h
 +
  M68K ARCHITECTURE
  M:    Geert Uytterhoeven <geert@linux-m68k.org>
  L:    linux-m68k@lists.linux-m68k.org
@@@ -10569,7 -10535,7 +10576,7 @@@ M:   Tobias Waldekranz <tobias@waldekranz
  L:    netdev@vger.kernel.org
  S:    Maintained
  F:    Documentation/devicetree/bindings/net/marvell,mvusb.yaml
 -F:    drivers/net/phy/mdio-mvusb.c
 +F:    drivers/net/mdio/mdio-mvusb.c
  
  MARVELL XENON MMC/SD/SDIO HOST CONTROLLER DRIVER
  M:    Hu Ziji <huziji@marvell.com>
@@@ -10716,15 -10682,6 +10723,15 @@@ L: linux-input@vger.kernel.or
  S:    Maintained
  F:    drivers/hid/hid-mcp2221.c
  
 +MCP251XFD SPI-CAN NETWORK DRIVER
 +M:    Marc Kleine-Budde <mkl@pengutronix.de>
 +M:    Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
 +R:    Thomas Kopp <thomas.kopp@microchip.com>
 +L:    linux-can@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml
 +F:    drivers/net/can/spi/mcp251xfd/
 +
  MCP4018 AND MCP4531 MICROCHIP DIGITAL POTENTIOMETER DRIVERS
  M:    Peter Rosin <peda@axentia.se>
  L:    linux-iio@vger.kernel.org
@@@ -11107,7 -11064,6 +11114,7 @@@ F:   drivers/char/hw_random/mtk-rng.
  
  MEDIATEK SWITCH DRIVER
  M:    Sean Wang <sean.wang@mediatek.com>
 +M:    Landen Chao <Landen.Chao@mediatek.com>
  L:    netdev@vger.kernel.org
  S:    Maintained
  F:    drivers/net/dsa/mt7530.*
@@@ -11115,7 -11071,7 +11122,7 @@@ F:   net/dsa/tag_mtk.
  
  MEDIATEK USB3 DRD IP DRIVER
  M:    Chunfeng Yun <chunfeng.yun@mediatek.com>
 -L:    linux-usb@vger.kernel.org (moderated for non-subscribers)
 +L:    linux-usb@vger.kernel.org
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  L:    linux-mediatek@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
@@@ -11162,12 -11118,6 +11169,12 @@@ W: http://www.melfas.co
  F:    Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt
  F:    drivers/input/touchscreen/melfas_mip4.c
  
 +MELLANOX BLUEFIELD I2C DRIVER
 +M:    Khalil Blaiech <kblaiech@mellanox.com>
 +L:    linux-i2c@vger.kernel.org
 +S:    Supported
 +F:    drivers/i2c/busses/i2c-mlxbf.c
 +
  MELLANOX ETHERNET DRIVER (mlx4_en)
  M:    Tariq Toukan <tariqt@nvidia.com>
  L:    netdev@vger.kernel.org
@@@ -11389,8 -11339,8 +11396,8 @@@ S:   Supporte
  W:    http://linux-meson.com/
  T:    git git://linuxtv.org/media_tree.git
  F:    Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml
 -F:    drivers/media/platform/meson/ao-cec-g12a.c
 -F:    drivers/media/platform/meson/ao-cec.c
 +F:    drivers/media/cec/platform/meson/ao-cec-g12a.c
 +F:    drivers/media/cec/platform/meson/ao-cec.c
  
  MESON NAND CONTROLLER DRIVER FOR AMLOGIC SOCS
  M:    Liang Yang <liang.yang@amlogic.com>
@@@ -11400,6 -11350,7 +11407,6 @@@ F:   Documentation/devicetree/bindings/mt
  F:    drivers/mtd/nand/raw/meson_*
  
  MESON VIDEO DECODER DRIVER FOR AMLOGIC SOCS
 -M:    Maxime Jourdan <mjourdan@baylibre.com>
  M:    Neil Armstrong <narmstrong@baylibre.com>
  L:    linux-media@vger.kernel.org
  L:    linux-amlogic@lists.infradead.org
@@@ -11419,7 -11370,6 +11426,7 @@@ M:   Hemant Kumar <hemantk@codeaurora.org
  L:    linux-arm-msm@vger.kernel.org
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/mani/mhi.git
 +F:    Documentation/ABI/stable/sysfs-bus-mhi
  F:    Documentation/mhi/
  F:    drivers/bus/mhi/
  F:    include/linux/mhi.h
@@@ -11613,14 -11563,13 +11620,14 @@@ M:        Microchip Linux Driver Support <UNGL
  L:    linux-mips@vger.kernel.org
  S:    Supported
  F:    Documentation/devicetree/bindings/mips/mscc.txt
 +F:    Documentation/devicetree/bindings/power/reset/ocelot-reset.txt
  F:    arch/mips/boot/dts/mscc/
  F:    arch/mips/configs/generic/board-ocelot.config
  F:    arch/mips/generic/board-ocelot.c
  
  MICROSEMI SMART ARRAY SMARTPQI DRIVER (smartpqi)
 -M:    Don Brace <don.brace@microsemi.com>
 -L:    esc.storagedev@microsemi.com
 +M:    Don Brace <don.brace@microchip.com>
 +L:    storagedev@microchip.com
  L:    linux-scsi@vger.kernel.org
  S:    Supported
  F:    Documentation/scsi/smartpqi.rst
@@@ -11678,7 -11627,7 +11685,7 @@@ MIPS GENERIC PLATFOR
  M:    Paul Burton <paulburton@kernel.org>
  L:    linux-mips@vger.kernel.org
  S:    Supported
 -F:    Documentation/devicetree/bindings/power/mti,mips-cpc.txt
 +F:    Documentation/devicetree/bindings/power/mti,mips-cpc.yaml
  F:    arch/mips/generic/
  F:    arch/mips/tools/generic-board-config.sh
  
@@@ -11734,7 -11683,7 +11741,7 @@@ S:   Odd Fixe
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/lkundrak/linux-mmp.git
  F:    arch/arm/boot/dts/mmp*
  F:    arch/arm/mach-mmp/
 -F:    linux/soc/mmp/
 +F:    include/linux/soc/mmp/
  
  MMP USB PHY DRIVERS
  R:    Lubomir Rintel <lkundrak@v3.sk>
@@@ -11852,13 -11801,6 +11859,13 @@@ Q: http://patchwork.linuxtv.org/project
  T:    git git://linuxtv.org/anttip/media_tree.git
  F:    drivers/media/usb/msi2500/
  
 +MSTAR INTERRUPT CONTROLLER DRIVER
 +M:    Mark-PK Tsai <mark-pk.tsai@mediatek.com>
 +M:    Daniel Palmer <daniel@thingy.jp>
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/interrupt-controller/mstar,mst-intc.yaml
 +F:    drivers/irqchip/irq-mst-intc.c
 +
  MSYSTEMS DISKONCHIP G3 MTD DRIVER
  M:    Robert Jarzmik <robert.jarzmik@free.fr>
  L:    linux-mtd@lists.infradead.org
@@@ -12121,6 -12063,7 +12128,6 @@@ M:   Neil Horman <nhorman@tuxdriver.com
  L:    netdev@vger.kernel.org
  S:    Maintained
  W:    https://fedorahosted.org/dropwatch/
 -F:    include/net/drop_monitor.h
  F:    include/uapi/linux/net_dropmon.h
  F:    net/core/drop_monitor.c
  
@@@ -12134,7 -12077,6 +12141,7 @@@ Q:   http://patchwork.ozlabs.org/project/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
  F:    Documentation/devicetree/bindings/net/
 +F:    drivers/connector/
  F:    drivers/net/
  F:    include/linux/etherdevice.h
  F:    include/linux/fcdevice.h
@@@ -12160,7 -12102,6 +12167,7 @@@ NETWORKING [DSA
  M:    Andrew Lunn <andrew@lunn.ch>
  M:    Vivien Didelot <vivien.didelot@gmail.com>
  M:    Florian Fainelli <f.fainelli@gmail.com>
 +M:    Vladimir Oltean <olteanv@gmail.com>
  S:    Maintained
  F:    Documentation/devicetree/bindings/net/dsa/
  F:    drivers/net/dsa/
@@@ -12215,7 -12156,6 +12222,7 @@@ F:   net/ipv6/ipcomp6.
  F:    net/ipv6/xfrm*
  F:    net/key/
  F:    net/xfrm/
 +F:    tools/testing/selftests/net/ipsec.c
  
  NETWORKING [IPv4/IPv6]
  M:    "David S. Miller" <davem@davemloft.net>
@@@ -12340,7 -12280,6 +12347,7 @@@ F:   include/linux/sunrpc
  F:    include/uapi/linux/nfs*
  F:    include/uapi/linux/sunrpc/
  F:    net/sunrpc/
 +F:    Documentation/filesystems/nfs/
  
  NILFS2 FILESYSTEM
  M:    Ryusuke Konishi <konishi.ryusuke@gmail.com>
@@@ -12376,19 -12315,6 +12383,19 @@@ S: Maintaine
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/lftan/nios2.git
  F:    arch/nios2/
  
 +NITRO ENCLAVES (NE)
 +M:    Andra Paraschiv <andraprs@amazon.com>
 +M:    Alexandru Vasile <lexnv@amazon.com>
 +M:    Alexandru Ciobotaru <alcioa@amazon.com>
 +L:    linux-kernel@vger.kernel.org
 +S:    Supported
 +W:    https://aws.amazon.com/ec2/nitro/nitro-enclaves/
 +F:    Documentation/virt/ne_overview.rst
 +F:    drivers/virt/nitro_enclaves/
 +F:    include/linux/nitro_enclaves.h
 +F:    include/uapi/linux/nitro_enclaves.h
 +F:    samples/nitro_enclaves/
 +
  NOHZ, DYNTICKS SUPPORT
  M:    Frederic Weisbecker <fweisbec@gmail.com>
  M:    Thomas Gleixner <tglx@linutronix.de>
@@@ -12559,18 -12485,11 +12566,18 @@@ S:        Maintaine
  F:    Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml
  F:    drivers/gpu/drm/imx/dcss/
  
 +NXP PTN5150A CC LOGIC AND EXTCON DRIVER
 +M:    Krzysztof Kozlowski <krzk@kernel.org>
 +L:    linux-kernel@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml
 +F:    drivers/extcon/extcon-ptn5150.c
 +
  NXP SGTL5000 DRIVER
  M:    Fabio Estevam <festevam@gmail.com>
  L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
  S:    Maintained
 -F:    Documentation/devicetree/bindings/sound/sgtl5000.txt
 +F:    Documentation/devicetree/bindings/sound/sgtl5000.yaml
  F:    sound/soc/codecs/sgtl5000*
  
  NXP SJA1105 ETHERNET SWITCH DRIVER
@@@ -12616,7 -12535,6 +12623,7 @@@ M:   Josh Poimboeuf <jpoimboe@redhat.com
  M:    Peter Zijlstra <peterz@infradead.org>
  S:    Supported
  F:    tools/objtool/
 +F:    include/linux/objtool.h
  
  OCELOT ETHERNET SWITCH DRIVER
  M:    Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
@@@ -12629,7 -12547,6 +12636,7 @@@ F:   drivers/net/dsa/ocelot/
  F:    drivers/net/ethernet/mscc/
  F:    include/soc/mscc/ocelot*
  F:    net/dsa/tag_ocelot.c
 +F:    tools/testing/selftests/drivers/net/ocelot/*
  
  OCXL (Open Coherent Accelerator Processor Interface OpenCAPI) DRIVER
  M:    Frederic Barrat <fbarrat@linux.ibm.com>
@@@ -12879,7 -12796,7 +12886,7 @@@ T:   git git://linuxtv.org/media_tree.gi
  F:    drivers/media/i2c/ov2685.c
  
  OMNIVISION OV2740 SENSOR DRIVER
 -M:    Tianshu Qiu <tian.shu.qiua@intel.com>
 +M:    Tianshu Qiu <tian.shu.qiu@intel.com>
  R:    Shawn Tu <shawnx.tu@intel.com>
  R:    Bingbu Cao <bingbu.cao@intel.com>
  L:    linux-media@vger.kernel.org
@@@ -12895,12 -12812,10 +12902,12 @@@ T:        git git://linuxtv.org/media_tree.gi
  F:    drivers/media/i2c/ov5640.c
  
  OMNIVISION OV5647 SENSOR DRIVER
 -M:    Luis Oliveira <lolivei@synopsys.com>
 +M:    Dave Stevenson <dave.stevenson@raspberrypi.com>
 +M:    Jacopo Mondi <jacopo@jmondi.org>
  L:    linux-media@vger.kernel.org
  S:    Maintained
  T:    git git://linuxtv.org/media_tree.git
 +F:    Documentation/devicetree/bindings/media/i2c/ov5647.yaml
  F:    drivers/media/i2c/ov5647.c
  
  OMNIVISION OV5670 SENSOR DRIVER
@@@ -13001,8 -12916,8 +13008,8 @@@ S:   Maintaine
  F:    drivers/char/hw_random/optee-rng.c
  
  OPA-VNIC DRIVER
 -M:    Dennis Dalessandro <dennis.dalessandro@intel.com>
 -M:    Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
 +M:    Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
 +M:    Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
  L:    linux-rdma@vger.kernel.org
  S:    Supported
  F:    drivers/infiniband/ulp/opa_vnic
@@@ -13163,9 -13078,7 +13170,9 @@@ F:   lib/packing.
  
  PADATA PARALLEL EXECUTION MECHANISM
  M:    Steffen Klassert <steffen.klassert@secunet.com>
 +M:    Daniel Jordan <daniel.m.jordan@oracle.com>
  L:    linux-crypto@vger.kernel.org
 +L:    linux-kernel@vger.kernel.org
  S:    Maintained
  F:    Documentation/core-api/padata.rst
  F:    include/linux/padata.h
@@@ -13302,7 -13215,6 +13309,7 @@@ F:   drivers/firmware/pcdp.
  
  PCI DRIVER FOR AARDVARK (Marvell Armada 3700)
  M:    Thomas Petazzoni <thomas.petazzoni@bootlin.com>
 +M:    Pali Rohár <pali@kernel.org>
  L:    linux-pci@vger.kernel.org
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
@@@ -13439,7 -13351,7 +13446,7 @@@ PCI DRIVER FOR SAMSUNG EXYNO
  M:    Jingoo Han <jingoohan1@gmail.com>
  L:    linux-pci@vger.kernel.org
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 -L:    linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 +L:    linux-samsung-soc@vger.kernel.org
  S:    Maintained
  F:    drivers/pci/controller/dwc/pci-exynos.c
  
@@@ -13837,16 -13749,17 +13844,16 @@@ PIN CONTROLLER - RENESA
  M:    Geert Uytterhoeven <geert+renesas@glider.be>
  L:    linux-renesas-soc@vger.kernel.org
  S:    Supported
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-drivers.git sh-pfc
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-drivers.git renesas-pinctrl
  F:    Documentation/devicetree/bindings/pinctrl/renesas,*
 -F:    drivers/pinctrl/pinctrl-rz*
 -F:    drivers/pinctrl/sh-pfc/
 +F:    drivers/pinctrl/renesas/
  
  PIN CONTROLLER - SAMSUNG
  M:    Tomasz Figa <tomasz.figa@gmail.com>
  M:    Krzysztof Kozlowski <krzk@kernel.org>
  M:    Sylwester Nawrocki <s.nawrocki@samsung.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 -L:    linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 +L:    linux-samsung-soc@vger.kernel.org
  S:    Maintained
  Q:    https://patchwork.kernel.org/project/linux-samsung-soc/list/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/samsung.git
@@@ -14072,7 -13985,6 +14079,7 @@@ PRINT
  M:    Petr Mladek <pmladek@suse.com>
  M:    Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
  R:    Steven Rostedt <rostedt@goodmis.org>
 +R:    John Ogness <john.ogness@linutronix.de>
  S:    Maintained
  F:    include/linux/printk.h
  F:    kernel/printk/
@@@ -14303,24 -14215,20 +14310,24 @@@ F:        drivers/firmware/qemu_fw_cfg.
  F:    include/uapi/linux/qemu_fw_cfg.h
  
  QIB DRIVER
 -M:    Dennis Dalessandro <dennis.dalessandro@intel.com>
 -M:    Mike Marciniszyn <mike.marciniszyn@intel.com>
 +M:    Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
 +M:    Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
  L:    linux-rdma@vger.kernel.org
  S:    Supported
  F:    drivers/infiniband/hw/qib/
  
  QLOGIC QL41xxx FCOE DRIVER
 -M:    QLogic-Storage-Upstream@cavium.com
 +M:    Saurav Kashyap <skashyap@marvell.com>
 +M:    Javed Hasan <jhasan@marvell.com>
 +M:    GR-QLogic-Storage-Upstream@marvell.com
  L:    linux-scsi@vger.kernel.org
  S:    Supported
  F:    drivers/scsi/qedf/
  
  QLOGIC QL41xxx ISCSI DRIVER
 -M:    QLogic-Storage-Upstream@cavium.com
 +M:    Nilesh Javali <njavali@marvell.com>
 +M:    Manish Rangankar <mrangankar@marvell.com>
 +M:    GR-QLogic-Storage-Upstream@marvell.com
  L:    linux-scsi@vger.kernel.org
  S:    Supported
  F:    drivers/scsi/qedi/
@@@ -14353,20 -14261,21 +14360,20 @@@ M:        Nilesh Javali <njavali@marvell.com
  M:    GR-QLogic-Storage-Upstream@marvell.com
  L:    linux-scsi@vger.kernel.org
  S:    Supported
 -F:    Documentation/scsi/LICENSE.qla2xxx
  F:    drivers/scsi/qla2xxx/
  
  QLOGIC QLA3XXX NETWORK DRIVER
  M:    GR-Linux-NIC-Dev@marvell.com
  L:    netdev@vger.kernel.org
  S:    Supported
 -F:    Documentation/networking/device_drivers/ethernet/qlogic/LICENSE.qla3xxx
  F:    drivers/net/ethernet/qlogic/qla3xxx.*
  
  QLOGIC QLA4XXX iSCSI DRIVER
 -M:    QLogic-Storage-Upstream@qlogic.com
 +M:    Nilesh Javali <njavali@marvell.com>
 +M:    Manish Rangankar <mrangankar@marvell.com>
 +M:    GR-QLogic-Storage-Upstream@marvell.com
  L:    linux-scsi@vger.kernel.org
  S:    Supported
 -F:    Documentation/scsi/LICENSE.qla4xxx
  F:    drivers/scsi/qla4xxx/
  
  QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
@@@ -14459,7 -14368,7 +14466,7 @@@ L:   linux-pm@vger.kernel.or
  L:    linux-arm-msm@vger.kernel.org
  S:    Maintained
  F:    Documentation/devicetree/bindings/power/avs/qcom,cpr.txt
 -F:    drivers/power/avs/qcom-cpr.c
 +F:    drivers/soc/qcom/cpr.c
  
  QUALCOMM CPUFREQ DRIVER MSM8996/APQ8096
  M:    Ilia Lin <ilia.lin@kernel.org>
@@@ -14714,9 -14623,9 +14721,9 @@@ M:   Niklas Söderlund <niklas.soderlund+
  L:    linux-media@vger.kernel.org
  S:    Maintained
  F:    Documentation/devicetree/bindings/media/i2c/imi,rdacm2x-gmsl.yaml
 -F:    drivers/media/i2c/rdacm20.c
  F:    drivers/media/i2c/max9271.c
  F:    drivers/media/i2c/max9271.h
 +F:    drivers/media/i2c/rdacm20.c
  
  RDC R-321X SoC
  M:    Florian Fainelli <florian@openwrt.org>
@@@ -14729,8 -14638,8 +14736,8 @@@ S:   Maintaine
  F:    drivers/net/ethernet/rdc/r6040.c
  
  RDMAVT - RDMA verbs software
 -M:    Dennis Dalessandro <dennis.dalessandro@intel.com>
 -M:    Mike Marciniszyn <mike.marciniszyn@intel.com>
 +M:    Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
 +M:    Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
  L:    linux-rdma@vger.kernel.org
  S:    Supported
  F:    drivers/infiniband/sw/rdmavt
@@@ -15010,11 -14919,8 +15017,11 @@@ F: include/linux/hid-roccat
  
  ROCKCHIP ISP V1 DRIVER
  M:    Helen Koike <helen.koike@collabora.com>
 +M:    Dafna Hirschfeld <dafna.hirschfeld@collabora.com>
  L:    linux-media@vger.kernel.org
  S:    Maintained
 +F:    Documentation/admin-guide/media/rkisp1.rst
 +F:    Documentation/userspace-api/media/v4l/pixfmt-meta-rkisp1.rst
  F:    drivers/staging/media/rkisp1/
  
  ROCKCHIP RASTER 2D GRAPHIC ACCELERATION UNIT DRIVER
@@@ -15289,14 -15195,6 +15296,14 @@@ F: Documentation/s390/vfio-ccw.rs
  F:    drivers/s390/cio/vfio_ccw*
  F:    include/uapi/linux/vfio_ccw.h
  
 +S390 VFIO-PCI DRIVER
 +M:    Matthew Rosato <mjrosato@linux.ibm.com>
 +L:    linux-s390@vger.kernel.org
 +L:    kvm@vger.kernel.org
 +S:    Supported
 +F:    drivers/vfio/pci/vfio_pci_zdev.c
 +F:    include/uapi/linux/vfio_zdev.h
 +
  S390 ZCRYPT DRIVER
  M:    Harald Freudenberger <freude@linux.ibm.com>
  L:    linux-s390@vger.kernel.org
@@@ -15352,6 -15250,7 +15359,6 @@@ F:   security/safesetid
  
  SAMSUNG AUDIO (ASoC) DRIVERS
  M:    Krzysztof Kozlowski <krzk@kernel.org>
 -M:    Sangbeom Kim <sbkim73@samsung.com>
  M:    Sylwester Nawrocki <s.nawrocki@samsung.com>
  L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
  S:    Supported
@@@ -15386,6 -15285,7 +15393,6 @@@ S:   Maintaine
  F:    drivers/platform/x86/samsung-laptop.c
  
  SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS
 -M:    Sangbeom Kim <sbkim73@samsung.com>
  M:    Krzysztof Kozlowski <krzk@kernel.org>
  M:    Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
  L:    linux-kernel@vger.kernel.org
@@@ -15405,17 -15305,16 +15412,17 @@@ F:        include/linux/mfd/samsung
  SAMSUNG S3C24XX/S3C64XX SOC SERIES CAMIF DRIVER
  M:    Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
  L:    linux-media@vger.kernel.org
 -L:    linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 +L:    linux-samsung-soc@vger.kernel.org
  S:    Maintained
  F:    drivers/media/platform/s3c-camif/
  F:    include/media/drv-intf/s3c_camif.h
  
  SAMSUNG S3FWRN5 NFC DRIVER
 -M:    Robert Baldyga <r.baldyga@samsung.com>
 +M:    Krzysztof Kozlowski <krzk@kernel.org>
  M:    Krzysztof Opasiak <k.opasiak@samsung.com>
  L:    linux-nfc@lists.01.org (moderated for non-subscribers)
 -S:    Supported
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
  F:    drivers/nfc/s3fwrn5
  
  SAMSUNG S5C73M3 CAMERA DRIVER
@@@ -15455,7 -15354,7 +15462,7 @@@ SAMSUNG SOC CLOCK DRIVER
  M:    Sylwester Nawrocki <s.nawrocki@samsung.com>
  M:    Tomasz Figa <tomasz.figa@gmail.com>
  M:    Chanwoo Choi <cw00.choi@samsung.com>
 -L:    linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 +L:    linux-samsung-soc@vger.kernel.org
  S:    Supported
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/snawrocki/clk.git
  F:    Documentation/devicetree/bindings/clock/exynos*.txt
@@@ -15463,19 -15362,17 +15470,19 @@@ F:        Documentation/devicetree/bindings/cl
  F:    Documentation/devicetree/bindings/clock/samsung,s5p*
  F:    drivers/clk/samsung/
  F:    include/dt-bindings/clock/exynos*.h
 +F:    include/linux/clk/samsung.h
 +F:    include/linux/platform_data/clk-s3c2410.h
  
  SAMSUNG SPI DRIVERS
 -M:    Kukjin Kim <kgene@kernel.org>
  M:    Krzysztof Kozlowski <krzk@kernel.org>
  M:    Andi Shyti <andi@etezian.org>
  L:    linux-spi@vger.kernel.org
 -L:    linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 +L:    linux-samsung-soc@vger.kernel.org
  S:    Maintained
  F:    Documentation/devicetree/bindings/spi/spi-samsung.txt
  F:    drivers/spi/spi-s3c*
  F:    include/linux/platform_data/spi-s3c64xx.h
 +F:    include/linux/spi/s3c24xx-fiq.h
  
  SAMSUNG SXGBE DRIVERS
  M:    Byungho An <bh74.an@samsung.com>
@@@ -15519,7 -15416,6 +15526,7 @@@ R:   Dietmar Eggemann <dietmar.eggemann@a
  R:    Steven Rostedt <rostedt@goodmis.org> (SCHED_FIFO/SCHED_RR)
  R:    Ben Segall <bsegall@google.com> (CONFIG_CFS_BANDWIDTH)
  R:    Mel Gorman <mgorman@suse.de> (CONFIG_NUMA_BALANCING)
 +R:    Daniel Bristot de Oliveira <bristot@redhat.com> (SCHED_DEADLINE)
  L:    linux-kernel@vger.kernel.org
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
@@@ -15641,8 -15537,8 +15648,8 @@@ F:   drivers/mmc/host/sdricoh_cs.
  SECO BOARDS CEC DRIVER
  M:    Ettore Chimenti <ek5.chimenti@gmail.com>
  S:    Maintained
 -F:    drivers/media/platform/seco-cec/seco-cec.c
 -F:    drivers/media/platform/seco-cec/seco-cec.h
 +F:    drivers/media/cec/platform/seco/seco-cec.c
 +F:    drivers/media/cec/platform/seco/seco-cec.h
  
  SECURE COMPUTING
  M:    Kees Cook <keescook@chromium.org>
@@@ -15735,7 -15631,6 +15742,7 @@@ T:   git git://git.kernel.org/pub/scm/lin
  F:    Documentation/ABI/obsolete/sysfs-selinux-checkreqprot
  F:    Documentation/ABI/obsolete/sysfs-selinux-disable
  F:    Documentation/admin-guide/LSM/SELinux.rst
 +F:    include/trace/events/avc.h
  F:    include/uapi/linux/selinux_netlink.h
  F:    scripts/selinux/
  F:    security/selinux/
@@@ -15804,7 -15699,6 +15811,7 @@@ L:   netdev@vger.kernel.or
  S:    Maintained
  F:    drivers/net/phy/phylink.c
  F:    drivers/net/phy/sfp*
 +F:    include/linux/mdio/mdio-i2c.h
  F:    include/linux/phylink.h
  F:    include/linux/sfp.h
  K:    phylink\.h|struct\s+phylink|\.phylink|>phylink_|phylink_(autoneg|clear|connect|create|destroy|disconnect|ethtool|helper|mac|mii|of|set|start|stop|test|validate)
@@@ -15993,17 -15887,19 +16000,17 @@@ F:        drivers/video/fbdev/simplefb.
  F:    include/linux/platform_data/simplefb.h
  
  SIMTEC EB110ATX (Chalice CATS)
 -M:    Vincent Sanders <vince@simtec.co.uk>
  M:    Simtec Linux Team <linux@simtec.co.uk>
  S:    Supported
  W:    http://www.simtec.co.uk/products/EB110ATX/
  
  SIMTEC EB2410ITX (BAST)
 -M:    Vincent Sanders <vince@simtec.co.uk>
  M:    Simtec Linux Team <linux@simtec.co.uk>
  S:    Supported
  W:    http://www.simtec.co.uk/products/EB2410ITX/
 -F:    arch/arm/mach-s3c24xx/bast-ide.c
 -F:    arch/arm/mach-s3c24xx/bast-irq.c
 -F:    arch/arm/mach-s3c24xx/mach-bast.c
 +F:    arch/arm/mach-s3c/bast-ide.c
 +F:    arch/arm/mach-s3c/bast-irq.c
 +F:    arch/arm/mach-s3c/mach-bast.c
  
  SIOX
  M:    Thorsten Scherer <t.scherer@eckelmann.de>
@@@ -16042,13 -15938,6 +16049,13 @@@ F: Documentation/fb/sisfb.rs
  F:    drivers/video/fbdev/sis/
  F:    include/video/sisfb.h
  
 +SIS I2C TOUCHSCREEN DRIVER
 +M:    Mika Penttilä <mika.penttila@nextfour.com>
 +L:    linux-input@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/input/touchscreen/sis_i2c.txt
 +F:    drivers/input/touchscreen/sis_i2c.c
 +
  SIS USB2VGA DRIVER
  M:    Thomas Winischhofer <thomas@winischhofer.net>
  S:    Maintained
@@@ -16216,6 -16105,7 +16223,6 @@@ F:   include/uapi/rdma/rdma_user_rxe.
  SOFTLOGIC 6x10 MPEG CODEC
  M:    Bluecherry Maintainers <maintainers@bluecherrydvr.com>
  M:    Anton Sviridenko <anton@corp.bluecherry.net>
 -M:    Andrey Utkin <andrey.utkin@corp.bluecherry.net>
  M:    Andrey Utkin <andrey_utkin@fastmail.com>
  M:    Ismael Luceno <ismael@iodev.co.uk>
  L:    linux-media@vger.kernel.org
@@@ -16297,7 -16187,7 +16304,7 @@@ M:   Leon Luo <leonl@leopardimaging.com
  L:    linux-media@vger.kernel.org
  S:    Maintained
  T:    git git://linuxtv.org/media_tree.git
 -F:    Documentation/devicetree/bindings/media/i2c/imx274.txt
 +F:    Documentation/devicetree/bindings/media/i2c/sony,imx274.yaml
  F:    drivers/media/i2c/imx274.c
  
  SONY IMX290 SENSOR DRIVER
@@@ -16635,6 -16525,7 +16642,6 @@@ F:   drivers/staging/rtl8712
  
  STAGING - SEPS525 LCD CONTROLLER DRIVERS
  M:    Michael Hennerich <michael.hennerich@analog.com>
 -M:    Beniamin Bia <beniamin.bia@analog.com>
  L:    linux-fbdev@vger.kernel.org
  S:    Supported
  F:    Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
@@@ -16682,7 -16573,7 +16689,7 @@@ STI CEC DRIVE
  M:    Benjamin Gaignard <benjamin.gaignard@linaro.org>
  S:    Maintained
  F:    Documentation/devicetree/bindings/media/stih-cec.txt
 -F:    drivers/media/platform/sti/cec/
 +F:    drivers/media/cec/platform/sti/
  
  STK1160 USB VIDEO CAPTURE DRIVER
  M:    Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
@@@ -16866,13 -16757,6 +16873,13 @@@ S: Maintaine
  F:    Documentation/devicetree/bindings/gpio/snps,dw-apb-gpio.yaml
  F:    drivers/gpio/gpio-dwapb.c
  
 +SYNOPSYS DESIGNWARE APB SSI DRIVER
 +M:    Serge Semin <fancer.lancer@gmail.com>
 +L:    linux-spi@vger.kernel.org
 +S:    Supported
 +F:    Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
 +F:    drivers/spi/spi-dw*
 +
  SYNOPSYS DESIGNWARE AXI DMAC DRIVER
  M:    Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
  S:    Maintained
@@@ -16883,7 -16767,7 +16890,7 @@@ SYNOPSYS DESIGNWARE DMAC DRIVE
  M:    Viresh Kumar <vireshk@kernel.org>
  R:    Andy Shevchenko <andriy.shevchenko@linux.intel.com>
  S:    Maintained
 -F:    Documentation/devicetree/bindings/dma/snps-dma.txt
 +F:    Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml
  F:    drivers/dma/dw/
  F:    include/dt-bindings/dma/dw-dmac.h
  F:    include/linux/dma/dw.h
@@@ -16899,8 -16783,8 +16906,8 @@@ SYNOPSYS DESIGNWARE ETHERNET XPCS DRIVE
  M:    Jose Abreu <Jose.Abreu@synopsys.com>
  L:    netdev@vger.kernel.org
  S:    Supported
 -F:    drivers/net/phy/mdio-xpcs.c
 -F:    include/linux/mdio-xpcs.h
 +F:    drivers/net/pcs/pcs-xpcs.c
 +F:    include/linux/pcs/pcs-xpcs.h
  
  SYNOPSYS DESIGNWARE I2C DRIVER
  M:    Jarkko Nikula <jarkko.nikula@linux.intel.com>
@@@ -17414,7 -17298,7 +17421,7 @@@ S:   Maintaine
  F:    drivers/thermal/ti-soc-thermal/
  
  TI BQ27XXX POWER SUPPLY DRIVER
 -R:    Andrew F. Davis <afd@ti.com>
 +R:    Dan Murphy <dmurphy@ti.com>
  F:    drivers/power/supply/bq27xxx_battery.c
  F:    drivers/power/supply/bq27xxx_battery_i2c.c
  F:    include/linux/power/bq27xxx_battery.h
@@@ -17688,9 -17572,8 +17695,9 @@@ S:   Supporte
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
  F:    Documentation/RCU/torture.rst
  F:    kernel/locking/locktorture.c
 -F:    kernel/rcu/rcuperf.c
 +F:    kernel/rcu/rcuscale.c
  F:    kernel/rcu/rcutorture.c
 +F:    kernel/rcu/refscale.c
  F:    kernel/torture.c
  
  TOSHIBA ACPI EXTRAS DRIVER
@@@ -17734,13 -17617,13 +17741,13 @@@ F:        drivers/platform/x86/toshiba-wmi.
  
  TPM DEVICE DRIVER
  M:    Peter Huewe <peterhuewe@gmx.de>
 -M:    Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
 +M:    Jarkko Sakkinen <jarkko@kernel.org>
  R:    Jason Gunthorpe <jgg@ziepe.ca>
  L:    linux-integrity@vger.kernel.org
  S:    Maintained
  W:    https://kernsec.org/wiki/index.php/Linux_Kernel_Integrity
  Q:    https://patchwork.kernel.org/project/linux-integrity/list/
 -T:    git git://git.infradead.org/users/jjs/linux-tpmdd.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jarkko/linux-tpmdd.git
  F:    drivers/char/tpm/
  
  TRACING
@@@ -17877,7 -17760,6 +17884,7 @@@ S:   Supporte
  W:    http://www.linux-mtd.infradead.org/doc/ubifs.html
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs.git next
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs.git fixes
 +F:    Documentation/filesystems/ubifs-authentication.rst
  F:    Documentation/filesystems/ubifs.rst
  F:    fs/ubifs/
  
@@@ -18271,6 -18153,14 +18278,6 @@@ T:  git git://linuxtv.org/media_tree.gi
  F:    drivers/media/usb/uvc/
  F:    include/uapi/linux/uvcvideo.h
  
 -USB VISION DRIVER
 -M:    Hans Verkuil <hverkuil@xs4all.nl>
 -L:    linux-media@vger.kernel.org
 -S:    Odd Fixes
 -W:    https://linuxtv.org
 -T:    git git://linuxtv.org/media_tree.git
 -F:    drivers/staging/media/usbvision/
 -
  USB WEBCAM GADGET
  M:    Laurent Pinchart <laurent.pinchart@ideasonboard.com>
  L:    linux-usb@vger.kernel.org
@@@ -18400,12 -18290,6 +18407,12 @@@ F: drivers/vfio
  F:    include/linux/vfio.h
  F:    include/uapi/linux/vfio.h
  
 +VFIO FSL-MC DRIVER
 +M:    Diana Craciun <diana.craciun@oss.nxp.com>
 +L:    kvm@vger.kernel.org
 +S:    Maintained
 +F:    drivers/vfio/fsl-mc/
 +
  VFIO MEDIATED DEVICE DRIVERS
  M:    Kirti Wankhede <kwankhede@nvidia.com>
  L:    kvm@vger.kernel.org
@@@ -18430,8 -18314,7 +18437,8 @@@ F:   drivers/gpu/vga/vga_switcheroo.
  F:    include/linux/vga_switcheroo.h
  
  VIA RHINE NETWORK DRIVER
 -S:    Orphan
 +S:    Maintained
 +M:    Kevin Brace <kevinbrace@bracecomputerlab.com>
  F:    drivers/net/ethernet/via/via-rhine.c
  
  VIA SD/MMC CARD CONTROLLER DRIVER
@@@ -18476,8 -18359,10 +18483,8 @@@ S:  Maintaine
  F:    drivers/media/platform/video-mux.c
  
  VIDEOBUF2 FRAMEWORK
 -M:    Pawel Osciak <pawel@osciak.com>
 +M:    Tomasz Figa <tfiga@chromium.org>
  M:    Marek Szyprowski <m.szyprowski@samsung.com>
 -M:    Kyungmin Park <kyungmin.park@samsung.com>
 -R:    Tomasz Figa <tfiga@chromium.org>
  L:    linux-media@vger.kernel.org
  S:    Maintained
  F:    drivers/media/common/videobuf2/*
@@@ -18635,7 -18520,6 +18642,7 @@@ VIRTIO MEM DRIVE
  M:    David Hildenbrand <david@redhat.com>
  L:    virtualization@lists.linux-foundation.org
  S:    Maintained
 +W:    https://virtio-mem.gitlab.io/
  F:    drivers/virtio/virtio_mem.c
  F:    include/uapi/linux/virtio_mem.h
  
@@@ -18668,14 -18552,6 +18675,14 @@@ W: https://linuxtv.or
  T:    git git://linuxtv.org/media_tree.git
  F:    drivers/media/test-drivers/vivid/*
  
 +VIDTV VIRTUAL DIGITAL TV DRIVER
 +M:    Daniel W. S. Almeida <dwlsalmeida@gmail.com>
 +L:    linux-media@vger.kernel.org
 +S:    Maintained
 +W:    https://linuxtv.org
 +T:    git git://linuxtv.org/media_tree.git
 +F:    drivers/media/test-drivers/vidtv/*
 +
  VLYNQ BUS
  M:    Florian Fainelli <f.fainelli@gmail.com>
  L:    openwrt-devel@lists.openwrt.org (subscribers-only)
@@@ -18942,7 -18818,7 +18949,7 @@@ F:   Documentation/devicetree/bindings/mf
  F:    Documentation/devicetree/bindings/regulator/wlf,arizona.yaml
  F:    Documentation/devicetree/bindings/sound/wlf,arizona.yaml
  F:    Documentation/hwmon/wm83??.rst
 -F:    arch/arm/mach-s3c64xx/mach-crag6410*
 +F:    arch/arm/mach-s3c/mach-crag6410*
  F:    drivers/clk/clk-wm83*.c
  F:    drivers/extcon/extcon-arizona.c
  F:    drivers/gpio/gpio-*wm*.c
@@@ -19043,11 -18919,11 +19050,11 @@@ T:        git git://git.kernel.org/pub/scm/lin
  F:    arch/x86/mm/
  
  X86 PLATFORM DRIVERS
 -M:    Darren Hart <dvhart@infradead.org>
 -M:    Andy Shevchenko <andy@infradead.org>
 +M:    Hans de Goede <hdegoede@redhat.com>
 +M:    Mark Gross <mgross@linux.intel.com>
  L:    platform-driver-x86@vger.kernel.org
 -S:    Odd Fixes
 -T:    git git://git.infradead.org/linux-platform-drivers-x86.git
 +S:    Maintained
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/pdx86/platform-drivers-x86.git
  F:    drivers/platform/olpc/
  F:    drivers/platform/x86/
  
@@@ -19396,16 -19272,6 +19403,16 @@@ T: git git://git.kernel.org/pub/scm/lin
  F:    Documentation/filesystems/zonefs.rst
  F:    fs/zonefs/
  
 +ZR36067 VIDEO FOR LINUX DRIVER
 +M:    Corentin Labbe <clabbe@baylibre.com>
 +L:    mjpeg-users@lists.sourceforge.net
 +L:    linux-media@vger.kernel.org
 +S:    Maintained
 +W:    http://mjpeg.sourceforge.net/driver-zoran/
 +Q:    https://patchwork.linuxtv.org/project/linux-media/list/
 +F:    Documentation/driver-api/media/drivers/zoran.rst
 +F:    drivers/staging/media/zoran/
 +
  ZPOOL COMPRESSED PAGE STORAGE API
  M:    Dan Streetman <ddstreet@ieee.org>
  L:    linux-mm@kvack.org
@@@ -59,8 -59,6 +59,8 @@@ static void dma_buf_release(struct dent
        struct dma_buf *dmabuf;
  
        dmabuf = dentry->d_fsdata;
 +      if (unlikely(!dmabuf))
 +              return;
  
        BUG_ON(dmabuf->vmapping_counter);
  
@@@ -851,6 -849,9 +851,9 @@@ EXPORT_SYMBOL_GPL(dma_buf_unpin)
   * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
   * on error. May return -EINTR if it is interrupted by a signal.
   *
+  * On success, the DMA addresses and lengths in the returned scatterlist are
+  * PAGE_SIZE aligned.
+  *
   * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
   * the underlying backing storage is pinned for as long as a mapping exists,
   * therefore users/importers should not hold onto a mapping for undue amounts of
@@@ -904,6 -905,24 +907,24 @@@ struct sg_table *dma_buf_map_attachment
                attach->dir = direction;
        }
  
+ #ifdef CONFIG_DMA_API_DEBUG
+       {
+               struct scatterlist *sg;
+               u64 addr;
+               int len;
+               int i;
+               for_each_sgtable_dma_sg(sg_table, sg, i) {
+                       addr = sg_dma_address(sg);
+                       len = sg_dma_len(sg);
+                       if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
+                               pr_debug("%s: addr %llx or len %x is not page aligned!\n",
+                                        __func__, addr, len);
+                       }
+               }
+       }
+ #endif /* CONFIG_DMA_API_DEBUG */
        return sg_table;
  }
  EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
@@@ -1188,68 -1207,72 +1209,72 @@@ EXPORT_SYMBOL_GPL(dma_buf_mmap)
   * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
   * address space. Same restrictions as for vmap and friends apply.
   * @dmabuf:   [in]    buffer to vmap
+  * @map:      [out]   returns the vmap pointer
   *
   * This call may fail due to lack of virtual mapping address space.
   * These calls are optional in drivers. The intended use for them
   * is for mapping objects linear in kernel space for high use objects.
   * Please attempt to use kmap/kunmap before thinking about these interfaces.
   *
-  * Returns NULL on error.
+  * Returns 0 on success, or a negative errno code otherwise.
   */
void *dma_buf_vmap(struct dma_buf *dmabuf)
int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
  {
-       void *ptr;
+       struct dma_buf_map ptr;
+       int ret = 0;
+       dma_buf_map_clear(map);
  
        if (WARN_ON(!dmabuf))
-               return NULL;
+               return -EINVAL;
  
        if (!dmabuf->ops->vmap)
-               return NULL;
+               return -EINVAL;
  
        mutex_lock(&dmabuf->lock);
        if (dmabuf->vmapping_counter) {
                dmabuf->vmapping_counter++;
-               BUG_ON(!dmabuf->vmap_ptr);
-               ptr = dmabuf->vmap_ptr;
+               BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
+               *map = dmabuf->vmap_ptr;
                goto out_unlock;
        }
  
-       BUG_ON(dmabuf->vmap_ptr);
+       BUG_ON(dma_buf_map_is_set(&dmabuf->vmap_ptr));
  
-       ptr = dmabuf->ops->vmap(dmabuf);
-       if (WARN_ON_ONCE(IS_ERR(ptr)))
-               ptr = NULL;
-       if (!ptr)
+       ret = dmabuf->ops->vmap(dmabuf, &ptr);
+       if (WARN_ON_ONCE(ret))
                goto out_unlock;
  
        dmabuf->vmap_ptr = ptr;
        dmabuf->vmapping_counter = 1;
  
+       *map = dmabuf->vmap_ptr;
  out_unlock:
        mutex_unlock(&dmabuf->lock);
-       return ptr;
+       return ret;
  }
  EXPORT_SYMBOL_GPL(dma_buf_vmap);
  
  /**
   * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
   * @dmabuf:   [in]    buffer to vunmap
-  * @vaddr:    [in]    vmap to vunmap
+  * @map:      [in]    vmap pointer to vunmap
   */
- void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+ void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
  {
        if (WARN_ON(!dmabuf))
                return;
  
-       BUG_ON(!dmabuf->vmap_ptr);
+       BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
        BUG_ON(dmabuf->vmapping_counter == 0);
-       BUG_ON(dmabuf->vmap_ptr != vaddr);
+       BUG_ON(!dma_buf_map_is_equal(&dmabuf->vmap_ptr, map));
  
        mutex_lock(&dmabuf->lock);
        if (--dmabuf->vmapping_counter == 0) {
                if (dmabuf->ops->vunmap)
-                       dmabuf->ops->vunmap(dmabuf, vaddr);
-               dmabuf->vmap_ptr = NULL;
+                       dmabuf->ops->vunmap(dmabuf, map);
+               dma_buf_map_clear(&dmabuf->vmap_ptr);
        }
        mutex_unlock(&dmabuf->lock);
  }
@@@ -140,12 -140,13 +140,12 @@@ struct sg_table *dma_heap_map_dma_buf(s
                                      enum dma_data_direction direction)
  {
        struct dma_heaps_attachment *a = attachment->priv;
 -      struct sg_table *table;
 -
 -      table = &a->table;
 +      struct sg_table *table = &a->table;
 +      int ret;
  
 -      if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
 -                      direction))
 -              table = ERR_PTR(-ENOMEM);
 +      ret = dma_map_sgtable(attachment->dev, table, direction, 0);
 +      if (ret)
 +              table = ERR_PTR(ret);
        return table;
  }
  
@@@ -153,7 -154,7 +153,7 @@@ static void dma_heap_unmap_dma_buf(stru
                                   struct sg_table *table,
                                   enum dma_data_direction direction)
  {
 -      dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
 +      dma_unmap_sgtable(attachment->dev, table, direction, 0);
  }
  
  static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf)
@@@ -235,7 -236,7 +235,7 @@@ static int dma_heap_dma_buf_end_cpu_acc
        return 0;
  }
  
- static void *dma_heap_dma_buf_vmap(struct dma_buf *dmabuf)
+ static int dma_heap_dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
  {
        struct heap_helper_buffer *buffer = dmabuf->priv;
        void *vaddr;
        vaddr = dma_heap_buffer_vmap_get(buffer);
        mutex_unlock(&buffer->lock);
  
-       return vaddr;
+       if (!vaddr)
+               return -ENOMEM;
+       dma_buf_map_set_vaddr(map, vaddr);
+       return 0;
  }
  
- static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+ static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
  {
        struct heap_helper_buffer *buffer = dmabuf->priv;
  
@@@ -996,7 -996,7 +996,7 @@@ create_evict_fence_fail
        return ret;
  }
  
 -int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
 +int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid,
                                          void **vm, void **process_info,
                                          struct dma_fence **ef)
  {
@@@ -1032,7 -1032,7 +1032,7 @@@ amdgpu_vm_init_fail
  }
  
  int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
 -                                         struct file *filp, unsigned int pasid,
 +                                         struct file *filp, u32 pasid,
                                           void **vm, void **process_info,
                                           struct dma_fence **ef)
  {
@@@ -1479,7 -1479,7 +1479,7 @@@ int amdgpu_amdkfd_gpuvm_map_memory_to_g
                }
        }
  
-       if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
+       if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
                amdgpu_bo_fence(bo,
                                &avm->process_info->eviction_fence->base,
                                true);
@@@ -1558,7 -1558,8 +1558,8 @@@ int amdgpu_amdkfd_gpuvm_unmap_memory_fr
         * required.
         */
        if (mem->mapped_to_gpu_memory == 0 &&
-           !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
+           !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
+           !mem->bo->tbo.pin_count)
                amdgpu_amdkfd_remove_eviction_fence(mem->bo,
                                                process_info->eviction_fence);
  
@@@ -267,7 -267,7 +267,7 @@@ static int  amdgpu_debugfs_process_reg_
                } else {
                        r = get_user(value, (uint32_t *)buf);
                        if (!r)
 -                              amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0);
 +                              amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value);
                }
                if (r) {
                        result = r;
@@@ -1319,6 -1319,7 +1319,7 @@@ static int amdgpu_debugfs_evict_gtt(str
        struct drm_info_node *node = (struct drm_info_node *)m->private;
        struct drm_device *dev = node->minor->dev;
        struct amdgpu_device *adev = drm_to_adev(dev);
+       struct ttm_resource_manager *man;
        int r;
  
        r = pm_runtime_get_sync(dev->dev);
                return r;
        }
  
-       seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT));
+       man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
+       r = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
+       seq_printf(m, "(%d)\n", r);
  
        pm_runtime_mark_last_busy(dev->dev);
        pm_runtime_put_autosuspend(dev->dev);
@@@ -132,10 -132,7 +132,7 @@@ static void amdgpu_display_unpin_work_f
        /* unpin of the old buffer */
        r = amdgpu_bo_reserve(work->old_abo, true);
        if (likely(r == 0)) {
-               r = amdgpu_bo_unpin(work->old_abo);
-               if (unlikely(r != 0)) {
-                       DRM_ERROR("failed to unpin buffer after flip\n");
-               }
+               amdgpu_bo_unpin(work->old_abo);
                amdgpu_bo_unreserve(work->old_abo);
        } else
                DRM_ERROR("failed to reserve buffer after flip\n");
@@@ -249,8 -246,7 +246,7 @@@ pflip_cleanup
        }
  unpin:
        if (!adev->enable_virtual_display)
-               if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
-                       DRM_ERROR("failed to unpin new abo in error path\n");
+               amdgpu_bo_unpin(new_abo);
  
  unreserve:
        amdgpu_bo_unreserve(new_abo);
@@@ -297,7 -293,7 +293,7 @@@ int amdgpu_display_crtc_set_config(stru
           take the current one */
        if (active && !adev->have_disp_power_ref) {
                adev->have_disp_power_ref = true;
 -              goto out;
 +              return ret;
        }
        /* if we have no active crtcs, then drop the power ref
           we got before */
@@@ -32,6 -32,7 +32,6 @@@
  #include <drm/drm_pciids.h>
  #include <linux/console.h>
  #include <linux/module.h>
 -#include <linux/pci.h>
  #include <linux/pm_runtime.h>
  #include <linux/vga_switcheroo.h>
  #include <drm/drm_probe_helper.h>
@@@ -147,7 -148,7 +147,7 @@@ int amdgpu_async_gfx_ring = 1
  int amdgpu_mcbp = 0;
  int amdgpu_discovery = -1;
  int amdgpu_mes = 0;
 -int amdgpu_noretry;
 +int amdgpu_noretry = -1;
  int amdgpu_force_asic_type = -1;
  int amdgpu_tmz = 0;
  int amdgpu_reset_method = -1; /* auto */
@@@ -596,13 -597,8 +596,13 @@@ MODULE_PARM_DESC(mes
        "Enable Micro Engine Scheduler (0 = disabled (default), 1 = enabled)");
  module_param_named(mes, amdgpu_mes, int, 0444);
  
 +/**
 + * DOC: noretry (int)
 + * Disable retry faults in the GPU memory controller.
 + * (0 = retry enabled, 1 = retry disabled, -1 auto (default))
 + */
  MODULE_PARM_DESC(noretry,
 -      "Disable retry faults (0 = retry enabled (default), 1 = retry disabled)");
 +      "Disable retry faults (0 = retry enabled, 1 = retry disabled, -1 auto (default))");
  module_param_named(noretry, amdgpu_noretry, int, 0644);
  
  /**
@@@ -1066,7 -1062,6 +1066,7 @@@ static const struct pci_device_id pciid
        {0x1002, 0x7319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
        {0x1002, 0x731A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
        {0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
 +      {0x1002, 0x731E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
        {0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
        /* Navi14 */
        {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
        {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
  
        /* Navi12 */
 -      {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
 -      {0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
 +      {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
 +      {0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
 +
 +      /* Sienna_Cichlid */
 +      {0x1002, 0x73A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
 +      {0x1002, 0x73A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
 +      {0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
 +      {0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
 +      {0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
 +      {0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
  
        {0, 0, 0}
  };
@@@ -1115,16 -1102,6 +1115,16 @@@ static int amdgpu_pci_probe(struct pci_
                return -ENODEV;
        }
  
 +      /* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping,
 +       * however, SME requires an indirect IOMMU mapping because the encryption
 +       * bit is beyond the DMA mask of the chip.
 +       */
 +      if (mem_encrypt_active() && ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) {
 +              dev_info(&pdev->dev,
 +                       "SME is not compatible with RAVEN\n");
 +              return -ENOTSUPP;
 +      }
 +
  #ifdef CONFIG_DRM_AMDGPU_SI
        if (!amdgpu_si_support) {
                switch (flags & AMD_ASIC_MASK) {
@@@ -1331,7 -1308,7 +1331,7 @@@ static int amdgpu_pmops_runtime_suspend
                if (amdgpu_is_atpx_hybrid()) {
                        pci_ignore_hotplug(pdev);
                } else {
 -                      pci_save_state(pdev);
 +                      amdgpu_device_cache_pci_state(pdev);
                        pci_disable_device(pdev);
                        pci_ignore_hotplug(pdev);
                        pci_set_power_state(pdev, PCI_D3cold);
@@@ -1364,7 -1341,7 +1364,7 @@@ static int amdgpu_pmops_runtime_resume(
                        pci_set_master(pdev);
                } else {
                        pci_set_power_state(pdev, PCI_D0);
 -                      pci_restore_state(pdev);
 +                      amdgpu_device_load_pci_state(pdev);
                        ret = pci_enable_device(pdev);
                        if (ret)
                                return ret;
@@@ -1520,19 -1497,13 +1520,13 @@@ static struct drm_driver kms_driver = 
        .lastclose = amdgpu_driver_lastclose_kms,
        .irq_handler = amdgpu_irq_handler,
        .ioctls = amdgpu_ioctls_kms,
-       .gem_free_object_unlocked = amdgpu_gem_object_free,
-       .gem_open_object = amdgpu_gem_object_open,
-       .gem_close_object = amdgpu_gem_object_close,
        .dumb_create = amdgpu_mode_dumb_create,
        .dumb_map_offset = amdgpu_mode_dumb_mmap,
        .fops = &amdgpu_driver_kms_fops,
  
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-       .gem_prime_export = amdgpu_gem_prime_export,
        .gem_prime_import = amdgpu_gem_prime_import,
-       .gem_prime_vmap = amdgpu_gem_prime_vmap,
-       .gem_prime_vunmap = amdgpu_gem_prime_vunmap,
        .gem_prime_mmap = amdgpu_gem_prime_mmap,
  
        .name = DRIVER_NAME,
        .patchlevel = KMS_DRIVER_PATCHLEVEL,
  };
  
 +static struct pci_error_handlers amdgpu_pci_err_handler = {
 +      .error_detected = amdgpu_pci_error_detected,
 +      .mmio_enabled   = amdgpu_pci_mmio_enabled,
 +      .slot_reset     = amdgpu_pci_slot_reset,
 +      .resume         = amdgpu_pci_resume,
 +};
 +
  static struct pci_driver amdgpu_kms_pci_driver = {
        .name = DRIVER_NAME,
        .id_table = pciidlist,
        .remove = amdgpu_pci_remove,
        .shutdown = amdgpu_pci_shutdown,
        .driver.pm = &amdgpu_pm_ops,
 +      .err_handler = &amdgpu_pci_err_handler,
  };
  
  static int __init amdgpu_init(void)
  
  #include "amdgpu.h"
  #include "amdgpu_display.h"
+ #include "amdgpu_dma_buf.h"
  #include "amdgpu_xgmi.h"
  
- void amdgpu_gem_object_free(struct drm_gem_object *gobj)
+ static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
+ static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
  {
        struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
  
@@@ -87,6 -90,7 +90,7 @@@ retry
                return r;
        }
        *obj = &bo->tbo.base;
+       (*obj)->funcs = &amdgpu_gem_object_funcs;
  
        return 0;
  }
@@@ -119,8 -123,8 +123,8 @@@ void amdgpu_gem_force_release(struct am
   * Call from drm_gem_handle_create which appear in both new and open ioctl
   * case.
   */
- int amdgpu_gem_object_open(struct drm_gem_object *obj,
-                          struct drm_file *file_priv)
static int amdgpu_gem_object_open(struct drm_gem_object *obj,
+                                 struct drm_file *file_priv)
  {
        struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
        struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
        return 0;
  }
  
- void amdgpu_gem_object_close(struct drm_gem_object *obj,
-                            struct drm_file *file_priv)
static void amdgpu_gem_object_close(struct drm_gem_object *obj,
+                                   struct drm_file *file_priv)
  {
        struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@@ -211,6 -215,15 +215,15 @@@ out_unlock
        ttm_eu_backoff_reservation(&ticket, &list);
  }
  
+ static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
+       .free = amdgpu_gem_object_free,
+       .open = amdgpu_gem_object_open,
+       .close = amdgpu_gem_object_close,
+       .export = amdgpu_gem_prime_export,
+       .vmap = amdgpu_gem_prime_vmap,
+       .vunmap = amdgpu_gem_prime_vunmap,
+ };
  /*
   * GEM ioctls.
   */
@@@ -596,7 -609,6 +609,7 @@@ int amdgpu_gem_va_ioctl(struct drm_devi
        struct ww_acquire_ctx ticket;
        struct list_head list, duplicates;
        uint64_t va_flags;
 +      uint64_t vm_size;
        int r = 0;
  
        if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
  
        args->va_address &= AMDGPU_GMC_HOLE_MASK;
  
 +      vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
 +      vm_size -= AMDGPU_VA_RESERVED_SIZE;
 +      if (args->va_address + args->map_size > vm_size) {
 +              dev_dbg(&dev->pdev->dev,
 +                      "va_address 0x%llx is in top reserved area 0x%llx\n",
 +                      args->va_address + args->map_size, vm_size);
 +              return -EINVAL;
 +      }
 +
        if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
                dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
                        args->flags);
@@@ -870,7 -873,7 +883,7 @@@ static int amdgpu_debugfs_gem_bo_info(i
        seq_printf(m, "\t0x%08x: %12ld byte %s",
                   id, amdgpu_bo_size(bo), placement);
  
-       pin_count = READ_ONCE(bo->pin_count);
+       pin_count = READ_ONCE(bo->tbo.pin_count);
        if (pin_count)
                seq_printf(m, " pin count %d", pin_count);
  
@@@ -45,12 -45,10 +45,10 @@@ void amdgpu_gmc_get_pde_for_bo(struct a
                               uint64_t *addr, uint64_t *flags)
  {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-       struct ttm_dma_tt *ttm;
  
        switch (bo->tbo.mem.mem_type) {
        case TTM_PL_TT:
-               ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
-               *addr = ttm->dma_address[0];
+               *addr = bo->tbo.ttm->dma_address[0];
                break;
        case TTM_PL_VRAM:
                *addr = amdgpu_bo_gpu_offset(bo);
@@@ -122,16 -120,14 +120,14 @@@ int amdgpu_gmc_set_pte_pde(struct amdgp
  uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
  {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
-       struct ttm_dma_tt *ttm;
  
-       if (bo->num_pages != 1 || bo->ttm->caching_state == tt_cached)
+       if (bo->num_pages != 1 || bo->ttm->caching == ttm_cached)
                return AMDGPU_BO_INVALID_OFFSET;
  
-       ttm = container_of(bo->ttm, struct ttm_dma_tt, ttm);
-       if (ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
+       if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
                return AMDGPU_BO_INVALID_OFFSET;
  
-       return adev->gmc.agp_start + ttm->dma_address[0];
+       return adev->gmc.agp_start + bo->ttm->dma_address[0];
  }
  
  /**
@@@ -413,44 -409,6 +409,44 @@@ void amdgpu_gmc_tmz_set(struct amdgpu_d
        }
  }
  
 +/**
 + * amdgpu_noretry_set -- set per asic noretry defaults
 + * @adev: amdgpu_device pointer
 + *
 + * Set a per asic default for the no-retry parameter.
 + *
 + */
 +void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
 +{
 +      struct amdgpu_gmc *gmc = &adev->gmc;
 +
 +      switch (adev->asic_type) {
 +      case CHIP_RAVEN:
 +              /* Raven currently has issues with noretry
 +               * regardless of what we decide for other
 +               * asics, we should leave raven with
 +               * noretry = 0 until we root cause the
 +               * issues.
 +               */
 +              if (amdgpu_noretry == -1)
 +                      gmc->noretry = 0;
 +              else
 +                      gmc->noretry = amdgpu_noretry;
 +              break;
 +      default:
 +              /* default this to 0 for now, but we may want
 +               * to change this in the future for certain
 +               * GPUs as it can increase performance in
 +               * certain cases.
 +               */
 +              if (amdgpu_noretry == -1)
 +                      gmc->noretry = 0;
 +              else
 +                      gmc->noretry = amdgpu_noretry;
 +              break;
 +      }
 +}
 +
  void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
                                   bool enable)
  {
@@@ -66,6 -66,8 +66,8 @@@
  static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
                                   struct ttm_tt *ttm,
                                   struct ttm_resource *bo_mem);
+ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
+                                     struct ttm_tt *ttm);
  
  static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
                                    unsigned int type,
@@@ -92,7 -94,7 +94,7 @@@ static void amdgpu_evict_flags(struct t
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = TTM_PL_SYSTEM,
-               .flags = TTM_PL_MASK_CACHING
+               .flags = 0
        };
  
        /* Don't handle scatter gather BOs */
@@@ -292,11 -294,9 +294,9 @@@ static int amdgpu_ttm_map_buffer(struc
        cpu_addr = &job->ibs[0].ptr[num_dw];
  
        if (mem->mem_type == TTM_PL_TT) {
-               struct ttm_dma_tt *dma;
                dma_addr_t *dma_address;
  
-               dma = container_of(bo->ttm, struct ttm_dma_tt, ttm);
-               dma_address = &dma->dma_address[offset >> PAGE_SHIFT];
+               dma_address = &bo->ttm->dma_address[offset >> PAGE_SHIFT];
                r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
                                    cpu_addr);
                if (r)
@@@ -538,19 -538,13 +538,13 @@@ static int amdgpu_move_vram_ram(struct 
        placements.fpfn = 0;
        placements.lpfn = 0;
        placements.mem_type = TTM_PL_TT;
-       placements.flags = TTM_PL_MASK_CACHING;
+       placements.flags = 0;
        r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
        if (unlikely(r)) {
                pr_err("Failed to find GTT space for blit from VRAM\n");
                return r;
        }
  
-       /* set caching flags */
-       r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
-       if (unlikely(r)) {
-               goto out_cleanup;
-       }
        r = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
        if (unlikely(r))
                goto out_cleanup;
                goto out_cleanup;
        }
  
-       /* move BO (in tmp_mem) to new_mem */
-       r = ttm_bo_move_ttm(bo, ctx, new_mem);
+       r = ttm_bo_wait_ctx(bo, ctx);
+       if (unlikely(r))
+               goto out_cleanup;
+       amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
+       ttm_resource_free(bo, &bo->mem);
+       ttm_bo_assign_mem(bo, new_mem);
  out_cleanup:
        ttm_resource_free(bo, &tmp_mem);
        return r;
@@@ -599,7 -598,7 +598,7 @@@ static int amdgpu_move_ram_vram(struct 
        placements.fpfn = 0;
        placements.lpfn = 0;
        placements.mem_type = TTM_PL_TT;
-       placements.flags = TTM_PL_MASK_CACHING;
+       placements.flags = 0;
        r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
        if (unlikely(r)) {
                pr_err("Failed to find GTT space for blit to VRAM\n");
        }
  
        /* move/bind old memory to GTT space */
-       r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
+       r = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
+       if (unlikely(r))
+               return r;
+       r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, &tmp_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
  
+       ttm_bo_assign_mem(bo, &tmp_mem);
        /* copy to VRAM */
        r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
        if (unlikely(r)) {
@@@ -660,9 -664,17 +664,17 @@@ static int amdgpu_bo_move(struct ttm_bu
        struct ttm_resource *old_mem = &bo->mem;
        int r;
  
+       if (new_mem->mem_type == TTM_PL_TT) {
+               r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
+               if (r)
+                       return r;
+       }
+       amdgpu_bo_move_notify(bo, evict, new_mem);
        /* Can't move a pinned BO */
        abo = ttm_to_amdgpu_bo(bo);
-       if (WARN_ON_ONCE(abo->pin_count > 0))
+       if (WARN_ON_ONCE(abo->tbo.pin_count > 0))
                return -EINVAL;
  
        adev = amdgpu_ttm_adev(bo->bdev);
                ttm_bo_move_null(bo, new_mem);
                return 0;
        }
-       if ((old_mem->mem_type == TTM_PL_TT &&
-            new_mem->mem_type == TTM_PL_SYSTEM) ||
-           (old_mem->mem_type == TTM_PL_SYSTEM &&
-            new_mem->mem_type == TTM_PL_TT)) {
-               /* bind is enough */
+       if (old_mem->mem_type == TTM_PL_SYSTEM &&
+           new_mem->mem_type == TTM_PL_TT) {
                ttm_bo_move_null(bo, new_mem);
                return 0;
        }
+       if (old_mem->mem_type == TTM_PL_TT &&
+           new_mem->mem_type == TTM_PL_SYSTEM) {
+               r = ttm_bo_wait_ctx(bo, ctx);
+               if (r)
+                       goto fail;
+               amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
+               ttm_resource_free(bo, &bo->mem);
+               ttm_bo_assign_mem(bo, new_mem);
+               return 0;
+       }
        if (old_mem->mem_type == AMDGPU_PL_GDS ||
            old_mem->mem_type == AMDGPU_PL_GWS ||
            old_mem->mem_type == AMDGPU_PL_OA ||
@@@ -712,12 -734,12 +734,12 @@@ memcpy
                if (!amdgpu_mem_visible(adev, old_mem) ||
                    !amdgpu_mem_visible(adev, new_mem)) {
                        pr_err("Move buffer fallback to memcpy unavailable\n");
-                       return r;
+                       goto fail;
                }
  
                r = ttm_bo_move_memcpy(bo, ctx, new_mem);
                if (r)
-                       return r;
+                       goto fail;
        }
  
        if (bo->type == ttm_bo_type_device &&
        /* update statistics */
        atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
        return 0;
+ fail:
+       swap(*new_mem, bo->mem);
+       amdgpu_bo_move_notify(bo, false, new_mem);
+       swap(*new_mem, bo->mem);
+       return r;
  }
  
  /**
@@@ -767,6 -794,7 +794,7 @@@ static int amdgpu_ttm_io_mem_reserve(st
  
                mem->bus.offset += adev->gmc.aper_base;
                mem->bus.is_iomem = true;
+               mem->bus.caching = ttm_write_combined;
                break;
        default:
                return -EINVAL;
@@@ -811,7 -839,7 +839,7 @@@ uint64_t amdgpu_ttm_domain_start(struc
   * TTM backend functions.
   */
  struct amdgpu_ttm_tt {
-       struct ttm_dma_tt       ttm;
+       struct ttm_tt   ttm;
        struct drm_gem_object   *gobj;
        u64                     offset;
        uint64_t                userptr;
@@@ -943,7 -971,7 +971,7 @@@ bool amdgpu_ttm_tt_get_user_pages_done(
        if (!gtt || !gtt->userptr)
                return false;
  
-       DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n",
+       DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
                gtt->userptr, ttm->num_pages);
  
        WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
@@@ -1016,7 -1044,6 +1044,7 @@@ static int amdgpu_ttm_tt_pin_userptr(st
  
  release_sg:
        kfree(ttm->sg);
 +      ttm->sg = NULL;
        return r;
  }
  
@@@ -1095,7 -1122,7 +1123,7 @@@ static int amdgpu_ttm_gart_bind(struct 
  
  gart_bind_fail:
        if (r)
-               DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+               DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
                          ttm->num_pages, gtt->offset);
  
        return r;
@@@ -1130,7 -1157,7 +1158,7 @@@ static int amdgpu_ttm_backend_bind(stru
                }
        }
        if (!ttm->num_pages) {
-               WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+               WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
                     ttm->num_pages, bo_mem, ttm);
        }
  
                ttm->pages, gtt->ttm.dma_address, flags);
  
        if (r)
-               DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+               DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
                          ttm->num_pages, gtt->offset);
        gtt->bound = true;
        return r;
  }
  
  /**
 - * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object
 + * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
 + * through AGP or GART aperture.
 + *
 + * If bo is accessible through AGP aperture, then use AGP aperture
 + * to access bo; otherwise allocate logical space in GART aperture
 + * and map bo to GART aperture.
   */
  int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
  {
@@@ -1267,8 -1289,8 +1295,8 @@@ static void amdgpu_ttm_backend_unbind(s
        /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
        r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
        if (r)
-               DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
-                         gtt->ttm.ttm.num_pages, gtt->offset);
+               DRM_ERROR("failed to unbind %u pages at 0x%08llX\n",
+                         gtt->ttm.num_pages, gtt->offset);
        gtt->bound = false;
  }
  
@@@ -1282,7 -1304,7 +1310,7 @@@ static void amdgpu_ttm_backend_destroy(
        if (gtt->usertask)
                put_task_struct(gtt->usertask);
  
-       ttm_dma_tt_fini(&gtt->ttm);
+       ttm_tt_fini(&gtt->ttm);
        kfree(gtt);
  }
  
  static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
                                           uint32_t page_flags)
  {
+       struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
        struct amdgpu_ttm_tt *gtt;
+       enum ttm_caching caching;
  
        gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
        if (gtt == NULL) {
        }
        gtt->gobj = &bo->base;
  
+       if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
+               caching = ttm_write_combined;
+       else
+               caching = ttm_cached;
        /* allocate space for the uninitialized page entries */
-       if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
+       if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) {
                kfree(gtt);
                return NULL;
        }
-       return &gtt->ttm.ttm;
+       return &gtt->ttm;
  }
  
  /**
@@@ -1332,7 -1361,6 +1367,6 @@@ static int amdgpu_ttm_tt_populate(struc
                        return -ENOMEM;
  
                ttm->page_flags |= TTM_PAGE_FLAG_SG;
-               ttm_tt_set_populated(ttm);
                return 0;
        }
  
                drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
                                                 gtt->ttm.dma_address,
                                                 ttm->num_pages);
-               ttm_tt_set_populated(ttm);
                return 0;
        }
  
@@@ -1478,7 -1505,7 +1511,7 @@@ bool amdgpu_ttm_tt_affect_userptr(struc
        /* Return false if no part of the ttm_tt object lies within
         * the range
         */
-       size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
+       size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
        if (gtt->userptr > end || gtt->userptr + size <= start)
                return false;
  
@@@ -1529,7 -1556,7 +1562,7 @@@ uint64_t amdgpu_ttm_tt_pde_flags(struc
        if (mem && mem->mem_type == TTM_PL_TT) {
                flags |= AMDGPU_PTE_SYSTEM;
  
-               if (ttm->caching_state == tt_cached)
+               if (ttm->caching == ttm_cached)
                        flags |= AMDGPU_PTE_SNOOPED;
        }
  
@@@ -1699,20 -1726,23 +1732,23 @@@ static int amdgpu_ttm_access_memory(str
        return ret;
  }
  
+ static void
+ amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
+ {
+       amdgpu_bo_move_notify(bo, false, NULL);
+ }
  static struct ttm_bo_driver amdgpu_bo_driver = {
        .ttm_tt_create = &amdgpu_ttm_tt_create,
        .ttm_tt_populate = &amdgpu_ttm_tt_populate,
        .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
-       .ttm_tt_bind = &amdgpu_ttm_backend_bind,
-       .ttm_tt_unbind = &amdgpu_ttm_backend_unbind,
        .ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
        .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
        .evict_flags = &amdgpu_evict_flags,
        .move = &amdgpu_bo_move,
        .verify_access = &amdgpu_verify_access,
-       .move_notify = &amdgpu_bo_move_notify,
+       .delete_mem_notify = &amdgpu_bo_delete_mem_notify,
        .release_notify = &amdgpu_bo_release_notify,
-       .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
        .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
        .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
        .access_memory = &amdgpu_ttm_access_memory,
@@@ -2092,15 -2122,48 +2128,48 @@@ void amdgpu_ttm_set_buffer_funcs_status
        adev->mman.buffer_funcs_enabled = enable;
  }
  
+ static vm_fault_t amdgpu_ttm_fault(struct vm_fault *vmf)
+ {
+       struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
+       vm_fault_t ret;
+       ret = ttm_bo_vm_reserve(bo, vmf);
+       if (ret)
+               return ret;
+       ret = amdgpu_bo_fault_reserve_notify(bo);
+       if (ret)
+               goto unlock;
+       ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+                                      TTM_BO_VM_NUM_PREFAULT, 1);
+       if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+               return ret;
+ unlock:
+       dma_resv_unlock(bo->base.resv);
+       return ret;
+ }
+ static struct vm_operations_struct amdgpu_ttm_vm_ops = {
+       .fault = amdgpu_ttm_fault,
+       .open = ttm_bo_vm_open,
+       .close = ttm_bo_vm_close,
+       .access = ttm_bo_vm_access
+ };
  int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
  {
        struct drm_file *file_priv = filp->private_data;
        struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
+       int r;
  
-       if (adev == NULL)
-               return -EINVAL;
+       r = ttm_bo_mmap(filp, vma, &adev->mman.bdev);
+       if (unlikely(r != 0))
+               return r;
  
-       return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
+       vma->vm_ops = &amdgpu_ttm_vm_ops;
+       return 0;
  }
  
  int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
@@@ -609,7 -609,7 +609,7 @@@ void amdgpu_vm_del_from_lru_notify(stru
        if (!amdgpu_bo_is_amdgpu_bo(bo))
                return;
  
-       if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)
+       if (bo->pin_count)
                return;
  
        abo = ttm_to_amdgpu_bo(bo);
@@@ -1502,8 -1502,6 +1502,8 @@@ static int amdgpu_vm_update_ptes(struc
  
                        pt = cursor.entry->base.bo;
                        shift = parent_shift;
 +                      frag_end = max(frag_end, ALIGN(frag_start + 1,
 +                                 1ULL << shift));
                }
  
                /* Looks good so far, calculate parameters for the update */
                entry_end = min(entry_end, end);
  
                do {
 +                      struct amdgpu_vm *vm = params->vm;
                        uint64_t upd_end = min(entry_end, frag_end);
                        unsigned nptes = (upd_end - frag_start) >> shift;
 +                      uint64_t upd_flags = flags | AMDGPU_PTE_FRAG(frag);
  
                        /* This can happen when we set higher level PDs to
                         * silent to stop fault floods.
                         */
                        nptes = max(nptes, 1u);
 +
 +                      trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
 +                                                  nptes, dst, incr, upd_flags,
 +                                                  vm->task_info.pid,
 +                                                  vm->immediate.fence_context);
                        amdgpu_vm_update_flags(params, pt, cursor.level,
                                               pe_start, dst, nptes, incr,
 -                                             flags | AMDGPU_PTE_FRAG(frag));
 +                                             upd_flags);
  
                        pe_start += nptes * 8;
 -                      dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift;
 +                      dst += nptes * incr;
  
                        frag_start = upd_end;
                        if (frag_start >= frag_end) {
@@@ -1790,7 -1781,6 +1790,6 @@@ int amdgpu_vm_bo_update(struct amdgpu_d
                resv = vm->root.base.bo->tbo.base.resv;
        } else {
                struct drm_gem_object *obj = &bo->tbo.base;
-               struct ttm_dma_tt *ttm;
  
                resv = bo->tbo.base.resv;
                if (obj->import_attach && bo_va->is_xgmi) {
                }
                mem = &bo->tbo.mem;
                nodes = mem->mm_node;
-               if (mem->mem_type == TTM_PL_TT) {
-                       ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
-                       pages_addr = ttm->dma_address;
-               }
+               if (mem->mem_type == TTM_PL_TT)
+                       pages_addr = bo->tbo.ttm->dma_address;
        }
  
        if (bo) {
@@@ -2807,7 -2795,7 +2804,7 @@@ long amdgpu_vm_wait_idle(struct amdgpu_
   * 0 for success, error for failure.
   */
  int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 -                 int vm_context, unsigned int pasid)
 +                 int vm_context, u32 pasid)
  {
        struct amdgpu_bo_param bp;
        struct amdgpu_bo *root;
@@@ -2978,7 -2966,7 +2975,7 @@@ static int amdgpu_vm_check_clean_reserv
   * 0 for success, -errno for errors.
   */
  int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 -                         unsigned int pasid)
 +                         u32 pasid)
  {
        bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
        int r;
@@@ -3276,7 -3264,7 +3273,7 @@@ int amdgpu_vm_ioctl(struct drm_device *
   * @pasid: PASID identifier for VM
   * @task_info: task_info to fill.
   */
 -void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
 +void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
                         struct amdgpu_task_info *task_info)
  {
        struct amdgpu_vm *vm;
@@@ -3320,7 -3308,7 +3317,7 @@@ void amdgpu_vm_set_task_info(struct amd
   * Try to gracefully handle a VM fault. Return true if the fault was handled and
   * shouldn't be reported any more.
   */
 -bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
 +bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
                            uint64_t addr)
  {
        struct amdgpu_bo *root;
@@@ -25,7 -25,7 +25,7 @@@ static vm_fault_t armada_gem_vm_fault(s
        return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
  }
  
- const struct vm_operations_struct armada_gem_vm_ops = {
static const struct vm_operations_struct armada_gem_vm_ops = {
        .fault  = armada_gem_vm_fault,
        .open   = drm_gem_vm_open,
        .close  = drm_gem_vm_close,
@@@ -184,6 -184,12 +184,12 @@@ armada_gem_map_object(struct drm_devic
        return dobj->addr;
  }
  
+ static const struct drm_gem_object_funcs armada_gem_object_funcs = {
+       .free = armada_gem_free_object,
+       .export = armada_gem_prime_export,
+       .vm_ops = &armada_gem_vm_ops,
+ };
  struct armada_gem_object *
  armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
  {
        if (!obj)
                return NULL;
  
+       obj->obj.funcs = &armada_gem_object_funcs;
        drm_gem_private_object_init(dev, &obj->obj, size);
  
        DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
@@@ -214,6 -222,8 +222,8 @@@ static struct armada_gem_object *armada
        if (!obj)
                return NULL;
  
+       obj->obj.funcs = &armada_gem_object_funcs;
        if (drm_gem_object_init(dev, &obj->obj, size)) {
                kfree(obj);
                return NULL;
@@@ -379,7 -389,7 +389,7 @@@ armada_gem_prime_map_dma_buf(struct dma
        struct armada_gem_object *dobj = drm_to_armada_gem(obj);
        struct scatterlist *sg;
        struct sg_table *sgt;
 -      int i, num;
 +      int i;
  
        sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
        if (!sgt)
  
                mapping = dobj->obj.filp->f_mapping;
  
 -              for_each_sg(sgt->sgl, sg, count, i) {
 +              for_each_sgtable_sg(sgt, sg, i) {
                        struct page *page;
  
                        page = shmem_read_mapping_page(mapping, i);
 -                      if (IS_ERR(page)) {
 -                              num = i;
 +                      if (IS_ERR(page))
                                goto release;
 -                      }
  
                        sg_set_page(sg, page, PAGE_SIZE, 0);
                }
  
 -              if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
 -                      num = sgt->nents;
 +              if (dma_map_sgtable(attach->dev, sgt, dir, 0))
                        goto release;
 -              }
        } else if (dobj->page) {
                /* Single contiguous page */
                if (sg_alloc_table(sgt, 1, GFP_KERNEL))
  
                sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
  
 -              if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
 +              if (dma_map_sgtable(attach->dev, sgt, dir, 0))
                        goto free_table;
        } else if (dobj->linear) {
                /* Single contiguous physical region - no struct page */
        return sgt;
  
   release:
 -      for_each_sg(sgt->sgl, sg, num, i)
 -              put_page(sg_page(sg));
 +      for_each_sgtable_sg(sgt, sg, i)
 +              if (sg_page(sg))
 +                      put_page(sg_page(sg));
   free_table:
        sg_free_table(sgt);
   free_sgt:
@@@ -446,12 -459,11 +456,12 @@@ static void armada_gem_prime_unmap_dma_
        int i;
  
        if (!dobj->linear)
 -              dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
 +              dma_unmap_sgtable(attach->dev, sgt, dir, 0);
  
        if (dobj->obj.filp) {
                struct scatterlist *sg;
 -              for_each_sg(sgt->sgl, sg, sgt->nents, i)
 +
 +              for_each_sgtable_sg(sgt, sg, i)
                        put_page(sg_page(sg));
        }
  
   * OF THIS SOFTWARE.
   */
  
 +#include <linux/bitfield.h>
  #include <linux/delay.h>
  #include <linux/errno.h>
  #include <linux/i2c.h>
  #include <linux/init.h>
  #include <linux/kernel.h>
 +#include <linux/random.h>
  #include <linux/sched.h>
  #include <linux/seq_file.h>
  #include <linux/iopoll.h>
@@@ -425,22 -423,6 +425,22 @@@ drm_dp_encode_sideband_req(const struc
                memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
                idx += req->u.i2c_write.num_bytes;
                break;
 +      case DP_QUERY_STREAM_ENC_STATUS: {
 +              const struct drm_dp_query_stream_enc_status *msg;
 +
 +              msg = &req->u.enc_status;
 +              buf[idx] = msg->stream_id;
 +              idx++;
 +              memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id));
 +              idx += sizeof(msg->client_id);
 +              buf[idx] = 0;
 +              buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event);
 +              buf[idx] |= msg->valid_stream_event ? BIT(2) : 0;
 +              buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior);
 +              buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0;
 +              idx++;
 +              }
 +              break;
        }
        raw->cur_len = idx;
  }
@@@ -569,20 -551,6 +569,20 @@@ drm_dp_decode_sideband_req(const struc
                                return -ENOMEM;
                }
                break;
 +      case DP_QUERY_STREAM_ENC_STATUS:
 +              req->u.enc_status.stream_id = buf[idx++];
 +              for (i = 0; i < sizeof(req->u.enc_status.client_id); i++)
 +                      req->u.enc_status.client_id[i] = buf[idx++];
 +
 +              req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0),
 +                                                         buf[idx]);
 +              req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2),
 +                                                               buf[idx]);
 +              req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3),
 +                                                            buf[idx]);
 +              req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5),
 +                                                                  buf[idx]);
 +              break;
        }
  
        return 0;
@@@ -661,16 -629,6 +661,16 @@@ drm_dp_dump_sideband_msg_req_body(cons
                  req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
                  req->u.i2c_write.bytes);
                break;
 +      case DP_QUERY_STREAM_ENC_STATUS:
 +              P("stream_id=%u client_id=%*ph stream_event=%x "
 +                "valid_event=%d stream_behavior=%x valid_behavior=%d",
 +                req->u.enc_status.stream_id,
 +                (int)ARRAY_SIZE(req->u.enc_status.client_id),
 +                req->u.enc_status.client_id, req->u.enc_status.stream_event,
 +                req->u.enc_status.valid_stream_event,
 +                req->u.enc_status.stream_behavior,
 +                req->u.enc_status.valid_stream_behavior);
 +              break;
        default:
                P("???\n");
                break;
@@@ -978,42 -936,6 +978,42 @@@ static bool drm_dp_sideband_parse_power
        return true;
  }
  
 +static bool
 +drm_dp_sideband_parse_query_stream_enc_status(
 +                              struct drm_dp_sideband_msg_rx *raw,
 +                              struct drm_dp_sideband_msg_reply_body *repmsg)
 +{
 +      struct drm_dp_query_stream_enc_status_ack_reply *reply;
 +
 +      reply = &repmsg->u.enc_status;
 +
 +      reply->stream_id = raw->msg[3];
 +
 +      reply->reply_signed = raw->msg[2] & BIT(0);
 +
 +      /*
 +       * NOTE: It's my impression from reading the spec that the below parsing
 +       * is correct. However I noticed while testing with an HDCP 1.4 display
 +       * through an HDCP 2.2 hub that only bit 3 was set. In that case, I
 +       * would expect both bits to be set. So keep the parsing following the
 +       * spec, but beware reality might not match the spec (at least for some
 +       * configurations).
 +       */
 +      reply->hdcp_1x_device_present = raw->msg[2] & BIT(4);
 +      reply->hdcp_2x_device_present = raw->msg[2] & BIT(3);
 +
 +      reply->query_capable_device_present = raw->msg[2] & BIT(5);
 +      reply->legacy_device_present = raw->msg[2] & BIT(6);
 +      reply->unauthorizable_device_present = raw->msg[2] & BIT(7);
 +
 +      reply->auth_completed = !!(raw->msg[1] & BIT(3));
 +      reply->encryption_enabled = !!(raw->msg[1] & BIT(4));
 +      reply->repeater_present = !!(raw->msg[1] & BIT(5));
 +      reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6;
 +
 +      return true;
 +}
 +
  static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
                                        struct drm_dp_sideband_msg_reply_body *msg)
  {
                return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
        case DP_CLEAR_PAYLOAD_ID_TABLE:
                return true; /* since there's nothing to parse */
 +      case DP_QUERY_STREAM_ENC_STATUS:
 +              return drm_dp_sideband_parse_query_stream_enc_status(raw, msg);
        default:
                DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
                          drm_dp_mst_req_type_str(msg->req_type));
@@@ -1203,25 -1123,6 +1203,25 @@@ static void build_power_updown_phy(stru
        msg->path_msg = true;
  }
  
 +static int
 +build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id,
 +                            u8 *q_id)
 +{
 +      struct drm_dp_sideband_msg_req_body req;
 +
 +      req.req_type = DP_QUERY_STREAM_ENC_STATUS;
 +      req.u.enc_status.stream_id = stream_id;
 +      memcpy(req.u.enc_status.client_id, q_id,
 +             sizeof(req.u.enc_status.client_id));
 +      req.u.enc_status.stream_event = 0;
 +      req.u.enc_status.valid_stream_event = false;
 +      req.u.enc_status.stream_behavior = 0;
 +      req.u.enc_status.valid_stream_behavior = false;
 +
 +      drm_dp_encode_sideband_req(&req, msg);
 +      return 0;
 +}
 +
  static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
                                        struct drm_dp_vcpi *vcpi)
  {
@@@ -3254,57 -3155,6 +3254,57 @@@ int drm_dp_send_power_updown_phy(struc
  }
  EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
  
 +int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
 +              struct drm_dp_mst_port *port,
 +              struct drm_dp_query_stream_enc_status_ack_reply *status)
 +{
 +      struct drm_dp_sideband_msg_tx *txmsg;
 +      u8 nonce[7];
 +      int len, ret;
 +
 +      txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
 +      if (!txmsg)
 +              return -ENOMEM;
 +
 +      port = drm_dp_mst_topology_get_port_validated(mgr, port);
 +      if (!port) {
 +              ret = -EINVAL;
 +              goto out_get_port;
 +      }
 +
 +      get_random_bytes(nonce, sizeof(nonce));
 +
 +      /*
 +       * "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message
 +       *  transaction at the MST Branch device directly connected to the
 +       *  Source"
 +       */
 +      txmsg->dst = mgr->mst_primary;
 +
 +      len = build_query_stream_enc_status(txmsg, port->vcpi.vcpi, nonce);
 +
 +      drm_dp_queue_down_tx(mgr, txmsg);
 +
 +      ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg);
 +      if (ret < 0) {
 +              goto out;
 +      } else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
 +              drm_dbg_kms(mgr->dev, "query encryption status nak received\n");
 +              ret = -ENXIO;
 +              goto out;
 +      }
 +
 +      ret = 0;
 +      memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status));
 +
 +out:
 +      drm_dp_mst_topology_put_port(port);
 +out_get_port:
 +      kfree(txmsg);
 +      return ret;
 +}
 +EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
 +
  static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
                                       int id,
                                       struct drm_dp_payload *payload)
@@@ -3686,9 -3536,10 +3686,10 @@@ int drm_dp_mst_topology_mgr_set_mst(str
                WARN_ON(mgr->mst_primary);
  
                /* get dpcd info */
-               ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
-               if (ret != DP_RECEIVER_CAP_SIZE) {
-                       DRM_DEBUG_KMS("failed to read DPCD\n");
+               ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd);
+               if (ret < 0) {
+                       drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n",
+                                   mgr->aux->name, ret);
                        goto out_unlock;
                }
  
@@@ -281,18 -281,12 +281,12 @@@ int drm_fb_helper_restore_fbdev_mode_un
  EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
  
  #ifdef CONFIG_MAGIC_SYSRQ
- /*
-  * restore fbcon display for all kms driver's using this helper, used for sysrq
-  * and panic handling.
-  */
- static bool drm_fb_helper_force_kernel_mode(void)
+ /* emergency restore, don't bother with error reporting */
+ static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
  {
-       bool ret, error = false;
        struct drm_fb_helper *helper;
  
-       if (list_empty(&kernel_fb_helper_list))
-               return false;
+       mutex_lock(&kernel_fb_helper_lock);
        list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
                struct drm_device *dev = helper->dev;
  
                        continue;
  
                mutex_lock(&helper->lock);
-               ret = drm_client_modeset_commit_locked(&helper->client);
-               if (ret)
-                       error = true;
+               drm_client_modeset_commit_locked(&helper->client);
                mutex_unlock(&helper->lock);
        }
-       return error;
+       mutex_unlock(&kernel_fb_helper_lock);
  }
  
- static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
- {
-       bool ret;
-       ret = drm_fb_helper_force_kernel_mode();
-       if (ret == true)
-               DRM_ERROR("Failed to restore crtc configuration\n");
- }
  static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
  
  static void drm_fb_helper_sysrq(int dummy1)
  
  static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
        .handler = drm_fb_helper_sysrq,
 -      .help_msg = "force-fb(V)",
 +      .help_msg = "force-fb(v)",
        .action_msg = "Restore framebuffer console",
  };
  #else
@@@ -247,12 -247,9 +247,9 @@@ drm_gem_object_release_handle(int id, v
  {
        struct drm_file *file_priv = data;
        struct drm_gem_object *obj = ptr;
-       struct drm_device *dev = obj->dev;
  
-       if (obj->funcs && obj->funcs->close)
+       if (obj->funcs->close)
                obj->funcs->close(obj, file_priv);
-       else if (dev->driver->gem_close_object)
-               dev->driver->gem_close_object(obj, file_priv);
  
        drm_gem_remove_prime_handles(obj, file_priv);
        drm_vma_node_revoke(&obj->vma_node, file_priv);
@@@ -403,14 -400,10 +400,10 @@@ drm_gem_handle_create_tail(struct drm_f
        if (ret)
                goto err_remove;
  
-       if (obj->funcs && obj->funcs->open) {
+       if (obj->funcs->open) {
                ret = obj->funcs->open(obj, file_priv);
                if (ret)
                        goto err_revoke;
-       } else if (dev->driver->gem_open_object) {
-               ret = dev->driver->gem_open_object(obj, file_priv);
-               if (ret)
-                       goto err_revoke;
        }
  
        *handlep = handle;
@@@ -982,12 -975,11 +975,11 @@@ drm_gem_object_free(struct kref *kref
  {
        struct drm_gem_object *obj =
                container_of(kref, struct drm_gem_object, refcount);
-       struct drm_device *dev = obj->dev;
  
-       if (obj->funcs)
-               obj->funcs->free(obj);
-       else if (dev->driver->gem_free_object_unlocked)
-               dev->driver->gem_free_object_unlocked(obj);
+       if (WARN_ON(!obj->funcs->free))
+               return;
+       obj->funcs->free(obj);
  }
  EXPORT_SYMBOL(drm_gem_object_free);
  
@@@ -1049,9 -1041,9 +1041,9 @@@ EXPORT_SYMBOL(drm_gem_vm_close)
   * @obj_size: the object size to be mapped, in bytes
   * @vma: VMA for the area to be mapped
   *
-  * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
-  * provided by the driver. Depending on their requirements, drivers can either
-  * provide a fault handler in their gem_vm_ops (in which case any accesses to
+  * Set up the VMA to prepare mapping of the GEM object using the GEM object's
+  * vm_ops. Depending on their requirements, GEM objects can either
+  * provide a fault handler in their vm_ops (in which case any accesses to
   * the object will be trapped, to perform migration, GTT binding, surface
   * register allocation, or performance monitoring), or mmap the buffer memory
   * synchronously after calling drm_gem_mmap_obj.
   * callers must verify access restrictions before calling this helper.
   *
   * Return 0 or success or -EINVAL if the object size is smaller than the VMA
-  * size, or if no gem_vm_ops are provided.
+  * size, or if no vm_ops are provided.
   */
  int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
                     struct vm_area_struct *vma)
  {
-       struct drm_device *dev = obj->dev;
        int ret;
  
        /* Check for valid size. */
         */
        drm_gem_object_get(obj);
  
-       if (obj->funcs && obj->funcs->mmap) {
 +      vma->vm_private_data = obj;
 +
+       if (obj->funcs->mmap) {
                ret = obj->funcs->mmap(obj, vma);
                if (ret) {
                        drm_gem_object_put(obj);
                }
                WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
        } else {
-               if (obj->funcs && obj->funcs->vm_ops)
+               if (obj->funcs->vm_ops)
                        vma->vm_ops = obj->funcs->vm_ops;
-               else if (dev->driver->gem_vm_ops)
-                       vma->vm_ops = dev->driver->gem_vm_ops;
                else {
                        drm_gem_object_put(obj);
                        return -EINVAL;
                vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
        }
  
 -      vma->vm_private_data = obj;
 -
        return 0;
  }
  EXPORT_SYMBOL(drm_gem_mmap_obj);
@@@ -1198,36 -1187,30 +1187,30 @@@ void drm_gem_print_info(struct drm_prin
        drm_printf_indent(p, indent, "imported=%s\n",
                          obj->import_attach ? "yes" : "no");
  
-       if (obj->funcs && obj->funcs->print_info)
+       if (obj->funcs->print_info)
                obj->funcs->print_info(p, indent, obj);
  }
  
  int drm_gem_pin(struct drm_gem_object *obj)
  {
-       if (obj->funcs && obj->funcs->pin)
+       if (obj->funcs->pin)
                return obj->funcs->pin(obj);
-       else if (obj->dev->driver->gem_prime_pin)
-               return obj->dev->driver->gem_prime_pin(obj);
        else
                return 0;
  }
  
  void drm_gem_unpin(struct drm_gem_object *obj)
  {
-       if (obj->funcs && obj->funcs->unpin)
+       if (obj->funcs->unpin)
                obj->funcs->unpin(obj);
-       else if (obj->dev->driver->gem_prime_unpin)
-               obj->dev->driver->gem_prime_unpin(obj);
  }
  
  void *drm_gem_vmap(struct drm_gem_object *obj)
  {
        void *vaddr;
  
-       if (obj->funcs && obj->funcs->vmap)
+       if (obj->funcs->vmap)
                vaddr = obj->funcs->vmap(obj);
-       else if (obj->dev->driver->gem_prime_vmap)
-               vaddr = obj->dev->driver->gem_prime_vmap(obj);
        else
                vaddr = ERR_PTR(-EOPNOTSUPP);
  
@@@ -1242,10 -1225,8 +1225,8 @@@ void drm_gem_vunmap(struct drm_gem_obje
        if (!vaddr)
                return;
  
-       if (obj->funcs && obj->funcs->vunmap)
+       if (obj->funcs->vunmap)
                obj->funcs->vunmap(obj, vaddr);
-       else if (obj->dev->driver->gem_prime_vunmap)
-               obj->dev->driver->gem_prime_vunmap(obj, vaddr);
  }
  
  /**
@@@ -171,17 -171,16 +171,16 @@@ drm_gem_cma_create_with_handle(struct d
   * GEM object state and frees the memory used to store the object itself.
   * If the buffer is imported and the virtual address is set, it is released.
   * Drivers using the CMA helpers should set this as their
-  * &drm_driver.gem_free_object_unlocked callback.
+  * &drm_gem_object_funcs.free callback.
   */
  void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
  {
-       struct drm_gem_cma_object *cma_obj;
-       cma_obj = to_drm_gem_cma_obj(gem_obj);
+       struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem_obj);
+       struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(cma_obj->vaddr);
  
        if (gem_obj->import_attach) {
                if (cma_obj->vaddr)
-                       dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr);
+                       dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map);
                drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
        } else if (cma_obj->vaddr) {
                dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
@@@ -419,7 -418,7 +418,7 @@@ EXPORT_SYMBOL(drm_gem_cma_print_info)
   *
   * This function exports a scatter/gather table suitable for PRIME usage by
   * calling the standard DMA mapping API. Drivers using the CMA helpers should
-  * set this as their &drm_driver.gem_prime_get_sg_table callback.
+  * set this as their &drm_gem_object_funcs.get_sg_table callback.
   *
   * Returns:
   * A pointer to the scatter/gather table of pinned pages or NULL on failure.
@@@ -471,9 -470,26 +470,9 @@@ drm_gem_cma_prime_import_sg_table(struc
  {
        struct drm_gem_cma_object *cma_obj;
  
 -      if (sgt->nents != 1) {
 -              /* check if the entries in the sg_table are contiguous */
 -              dma_addr_t next_addr = sg_dma_address(sgt->sgl);
 -              struct scatterlist *s;
 -              unsigned int i;
 -
 -              for_each_sg(sgt->sgl, s, sgt->nents, i) {
 -                      /*
 -                       * sg_dma_address(s) is only valid for entries
 -                       * that have sg_dma_len(s) != 0
 -                       */
 -                      if (!sg_dma_len(s))
 -                              continue;
 -
 -                      if (sg_dma_address(s) != next_addr)
 -                              return ERR_PTR(-EINVAL);
 -
 -                      next_addr = sg_dma_address(s) + sg_dma_len(s);
 -              }
 -      }
 +      /* check if the entries in the sg_table are contiguous */
 +      if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size)
 +              return ERR_PTR(-EINVAL);
  
        /* Create a CMA GEM buffer. */
        cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
@@@ -525,7 -541,7 +524,7 @@@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mma
   * virtual address space. Since the CMA buffers are already mapped into the
   * kernel virtual address space this simply returns the cached virtual
   * address. Drivers using the CMA helpers should set this as their DRM
-  * driver's &drm_driver.gem_prime_vmap callback.
+  * driver's &drm_gem_object_funcs.vmap callback.
   *
   * Returns:
   * The kernel virtual address of the CMA GEM object's backing store.
@@@ -547,7 -563,7 +546,7 @@@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vma
   * This function removes a buffer exported via DRM PRIME from the kernel's
   * virtual address space. This is a no-op because CMA buffers cannot be
   * unmapped from kernel space. Drivers using the CMA helpers should set this
-  * as their &drm_driver.gem_prime_vunmap callback.
+  * as their &drm_gem_object_funcs.vunmap callback.
   */
  void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
  {
@@@ -617,22 -633,23 +616,23 @@@ drm_gem_cma_prime_import_sg_table_vmap(
  {
        struct drm_gem_cma_object *cma_obj;
        struct drm_gem_object *obj;
-       void *vaddr;
+       struct dma_buf_map map;
+       int ret;
  
-       vaddr = dma_buf_vmap(attach->dmabuf);
-       if (!vaddr) {
+       ret = dma_buf_vmap(attach->dmabuf, &map);
+       if (ret) {
                DRM_ERROR("Failed to vmap PRIME buffer\n");
-               return ERR_PTR(-ENOMEM);
+               return ERR_PTR(ret);
        }
  
        obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
        if (IS_ERR(obj)) {
-               dma_buf_vunmap(attach->dmabuf, vaddr);
+               dma_buf_vunmap(attach->dmabuf, &map);
                return obj;
        }
  
        cma_obj = to_drm_gem_cma_obj(obj);
-       cma_obj->vaddr = vaddr;
+       cma_obj->vaddr = map.vaddr;
  
        return obj;
  }
@@@ -126,8 -126,8 +126,8 @@@ void drm_gem_shmem_free_object(struct d
                drm_prime_gem_destroy(obj, shmem->sgt);
        } else {
                if (shmem->sgt) {
 -                      dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
 -                                   shmem->sgt->nents, DMA_BIDIRECTIONAL);
 +                      dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
 +                                        DMA_BIDIRECTIONAL, 0);
                        sg_free_table(shmem->sgt);
                        kfree(shmem->sgt);
                }
@@@ -261,13 -261,16 +261,16 @@@ EXPORT_SYMBOL(drm_gem_shmem_unpin)
  static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
  {
        struct drm_gem_object *obj = &shmem->base;
-       int ret;
+       struct dma_buf_map map;
+       int ret = 0;
  
        if (shmem->vmap_use_count++ > 0)
                return shmem->vaddr;
  
        if (obj->import_attach) {
-               shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
+               ret = dma_buf_vmap(obj->import_attach->dmabuf, &map);
+               if (!ret)
+                       shmem->vaddr = map.vaddr;
        } else {
                pgprot_t prot = PAGE_KERNEL;
  
                        prot = pgprot_writecombine(prot);
                shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
                                    VM_MAP, prot);
+               if (!shmem->vaddr)
+                       ret = -ENOMEM;
        }
  
-       if (!shmem->vaddr) {
-               DRM_DEBUG_KMS("Failed to vmap pages\n");
-               ret = -ENOMEM;
+       if (ret) {
+               DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
                goto err_put_pages;
        }
  
@@@ -333,6 -337,7 +337,7 @@@ EXPORT_SYMBOL(drm_gem_shmem_vmap)
  static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
  {
        struct drm_gem_object *obj = &shmem->base;
+       struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(shmem->vaddr);
  
        if (WARN_ON_ONCE(!shmem->vmap_use_count))
                return;
                return;
  
        if (obj->import_attach)
-               dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr);
+               dma_buf_vunmap(obj->import_attach->dmabuf, &map);
        else
                vunmap(shmem->vaddr);
  
@@@ -424,7 -429,8 +429,7 @@@ void drm_gem_shmem_purge_locked(struct 
  
        WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
  
 -      dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
 -                   shmem->sgt->nents, DMA_BIDIRECTIONAL);
 +      dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
        sg_free_table(shmem->sgt);
        kfree(shmem->sgt);
        shmem->sgt = NULL;
@@@ -593,13 -599,8 +598,13 @@@ int drm_gem_shmem_mmap(struct drm_gem_o
        /* Remove the fake offset */
        vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
  
 -      if (obj->import_attach)
 +      if (obj->import_attach) {
 +              /* Drop the reference drm_gem_mmap_obj() acquired.*/
 +              drm_gem_object_put(obj);
 +              vma->vm_private_data = NULL;
 +
                return dma_buf_mmap(obj->dma_buf, vma, 0);
 +      }
  
        shmem = to_drm_gem_shmem_obj(obj);
  
@@@ -701,17 -702,12 +706,17 @@@ struct sg_table *drm_gem_shmem_get_page
                goto err_put_pages;
        }
        /* Map the pages for use by the h/w. */
 -      dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
 +      ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
 +      if (ret)
 +              goto err_free_sgt;
  
        shmem->sgt = sgt;
  
        return sgt;
  
 +err_free_sgt:
 +      sg_free_table(sgt);
 +      kfree(sgt);
  err_put_pages:
        drm_gem_shmem_put_pages(shmem);
        return ERR_PTR(ret);
@@@ -386,8 -386,6 +386,6 @@@ static struct dma_buf *export_and_regis
  
        if (obj->funcs && obj->funcs->export)
                dmabuf = obj->funcs->export(obj, flags);
-       else if (dev->driver->gem_prime_export)
-               dmabuf = dev->driver->gem_prime_export(obj, flags);
        else
                dmabuf = drm_gem_prime_export(obj, flags);
        if (IS_ERR(dmabuf)) {
   * This is the PRIME export function which must be used mandatorily by GEM
   * drivers to ensure correct lifetime management of the underlying GEM object.
   * The actual exporting from GEM object to a dma-buf is done through the
-  * &drm_driver.gem_prime_export driver callback.
+  * &drm_gem_object_funcs.export callback.
   */
  int drm_gem_prime_handle_to_fd(struct drm_device *dev,
                               struct drm_file *file_priv, uint32_t handle,
@@@ -617,22 -615,22 +615,24 @@@ struct sg_table *drm_gem_map_dma_buf(st
  {
        struct drm_gem_object *obj = attach->dmabuf->priv;
        struct sg_table *sgt;
 +      int ret;
  
        if (WARN_ON(dir == DMA_NONE))
                return ERR_PTR(-EINVAL);
  
-       if (obj->funcs)
-               sgt = obj->funcs->get_sg_table(obj);
-       else
-               sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
+       if (WARN_ON(!obj->funcs->get_sg_table))
+               return ERR_PTR(-ENOSYS);
+       sgt = obj->funcs->get_sg_table(obj);
+       if (IS_ERR(sgt))
+               return sgt;
  
 -      if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
 -                            DMA_ATTR_SKIP_CPU_SYNC)) {
 +      ret = dma_map_sgtable(attach->dev, sgt, dir,
 +                            DMA_ATTR_SKIP_CPU_SYNC);
 +      if (ret) {
                sg_free_table(sgt);
                kfree(sgt);
 -              sgt = ERR_PTR(-ENOMEM);
 +              sgt = ERR_PTR(ret);
        }
  
        return sgt;
@@@ -654,7 -652,8 +654,7 @@@ void drm_gem_unmap_dma_buf(struct dma_b
        if (!sgt)
                return;
  
 -      dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
 -                         DMA_ATTR_SKIP_CPU_SYNC);
 +      dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
        sg_free_table(sgt);
        kfree(sgt);
  }
@@@ -663,38 -662,41 +663,41 @@@ EXPORT_SYMBOL(drm_gem_unmap_dma_buf)
  /**
   * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
   * @dma_buf: buffer to be mapped
+  * @map: the virtual address of the buffer
   *
   * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
   * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
   *
   * Returns the kernel virtual address or NULL on failure.
   */
void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
  {
        struct drm_gem_object *obj = dma_buf->priv;
        void *vaddr;
  
        vaddr = drm_gem_vmap(obj);
        if (IS_ERR(vaddr))
-               vaddr = NULL;
+               return PTR_ERR(vaddr);
  
-       return vaddr;
+       dma_buf_map_set_vaddr(map, vaddr);
+       return 0;
  }
  EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
  
  /**
   * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
   * @dma_buf: buffer to be unmapped
-  * @vaddr: the virtual address of the buffer
+  * @map: the virtual address of the buffer
   *
   * Releases a kernel virtual mapping. This can be used as the
   * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
   */
- void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
  {
        struct drm_gem_object *obj = dma_buf->priv;
  
-       drm_gem_vunmap(obj, vaddr);
+       drm_gem_vunmap(obj, map->vaddr);
  }
  EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
  
@@@ -794,7 -796,6 +797,7 @@@ static const struct dma_buf_ops drm_gem
  
  /**
   * drm_prime_pages_to_sg - converts a page array into an sg list
 + * @dev: DRM device
   * @pages: pointer to the array of page pointers to convert
   * @nr_pages: length of the page vector
   *
  struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
                                       struct page **pages, unsigned int nr_pages)
  {
 -      struct sg_table *sg = NULL;
 +      struct sg_table *sg;
 +      struct scatterlist *sge;
        size_t max_segment = 0;
 -      int ret;
  
        sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
 -      if (!sg) {
 -              ret = -ENOMEM;
 -              goto out;
 -      }
 +      if (!sg)
 +              return ERR_PTR(-ENOMEM);
  
        if (dev)
                max_segment = dma_max_mapping_size(dev->dev);
        if (max_segment == 0 || max_segment > SCATTERLIST_MAX_SEGMENT)
                max_segment = SCATTERLIST_MAX_SEGMENT;
 -      ret = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
 +      sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
                                          nr_pages << PAGE_SHIFT,
 -                                        max_segment, GFP_KERNEL);
 -      if (ret)
 -              goto out;
 -
 +                                        max_segment,
 +                                        NULL, 0, GFP_KERNEL);
 +      if (IS_ERR(sge)) {
 +              kfree(sg);
 +              sg = ERR_CAST(sge);
 +      }
        return sg;
 -out:
 -      kfree(sg);
 -      return ERR_PTR(ret);
  }
  EXPORT_SYMBOL(drm_prime_pages_to_sg);
  
  /**
 + * drm_prime_get_contiguous_size - returns the contiguous size of the buffer
 + * @sgt: sg_table describing the buffer to check
 + *
 + * This helper calculates the contiguous size in the DMA address space
 + * of the the buffer described by the provided sg_table.
 + *
 + * This is useful for implementing
 + * &drm_gem_object_funcs.gem_prime_import_sg_table.
 + */
 +unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt)
 +{
 +      dma_addr_t expected = sg_dma_address(sgt->sgl);
 +      struct scatterlist *sg;
 +      unsigned long size = 0;
 +      int i;
 +
 +      for_each_sgtable_dma_sg(sgt, sg, i) {
 +              unsigned int len = sg_dma_len(sg);
 +
 +              if (!len)
 +                      break;
 +              if (sg_dma_address(sg) != expected)
 +                      break;
 +              expected += len;
 +              size += len;
 +      }
 +      return size;
 +}
 +EXPORT_SYMBOL(drm_prime_get_contiguous_size);
 +
 +/**
   * drm_gem_prime_export - helper library implementation of the export callback
   * @obj: GEM object to export
   * @flags: flags like DRM_CLOEXEC and DRM_RDWR
@@@ -996,26 -969,45 +999,26 @@@ EXPORT_SYMBOL(drm_gem_prime_import)
  int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
                                     dma_addr_t *addrs, int max_entries)
  {
 -      unsigned count;
 -      struct scatterlist *sg;
 -      struct page *page;
 -      u32 page_len, page_index;
 -      dma_addr_t addr;
 -      u32 dma_len, dma_index;
 -
 -      /*
 -       * Scatterlist elements contains both pages and DMA addresses, but
 -       * one shoud not assume 1:1 relation between them. The sg->length is
 -       * the size of the physical memory chunk described by the sg->page,
 -       * while sg_dma_len(sg) is the size of the DMA (IO virtual) chunk
 -       * described by the sg_dma_address(sg).
 -       */
 -      page_index = 0;
 -      dma_index = 0;
 -      for_each_sg(sgt->sgl, sg, sgt->nents, count) {
 -              page_len = sg->length;
 -              page = sg_page(sg);
 -              dma_len = sg_dma_len(sg);
 -              addr = sg_dma_address(sg);
 -
 -              while (pages && page_len > 0) {
 -                      if (WARN_ON(page_index >= max_entries))
 +      struct sg_dma_page_iter dma_iter;
 +      struct sg_page_iter page_iter;
 +      struct page **p = pages;
 +      dma_addr_t *a = addrs;
 +
 +      if (pages) {
 +              for_each_sgtable_page(sgt, &page_iter, 0) {
 +                      if (WARN_ON(p - pages >= max_entries))
                                return -1;
 -                      pages[page_index] = page;
 -                      page++;
 -                      page_len -= PAGE_SIZE;
 -                      page_index++;
 +                      *p++ = sg_page_iter_page(&page_iter);
                }
 -              while (addrs && dma_len > 0) {
 -                      if (WARN_ON(dma_index >= max_entries))
 +      }
 +      if (addrs) {
 +              for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
 +                      if (WARN_ON(a - addrs >= max_entries))
                                return -1;
 -                      addrs[dma_index] = addr;
 -                      addr += PAGE_SIZE;
 -                      dma_len -= PAGE_SIZE;
 -                      dma_index++;
 +                      *a++ = sg_page_iter_dma_address(&dma_iter);
                }
        }
 +
        return 0;
  }
  EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
@@@ -27,7 -27,7 +27,7 @@@ static void etnaviv_gem_scatter_map(str
         * because display controller, GPU, etc. are not coherent.
         */
        if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
 -              dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
 +              dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
  }
  
  static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
@@@ -51,7 -51,7 +51,7 @@@
         * discard those writes.
         */
        if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
 -              dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
 +              dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
  }
  
  /* called with etnaviv_obj->lock held */
@@@ -171,7 -171,7 +171,7 @@@ int etnaviv_gem_mmap(struct file *filp
        return obj->ops->mmap(obj, vma);
  }
  
- vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
  {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_gem_object *obj = vma->vm_private_data;
@@@ -405,8 -405,9 +405,8 @@@ int etnaviv_gem_cpu_prep(struct drm_gem
        }
  
        if (etnaviv_obj->flags & ETNA_BO_CACHED) {
 -              dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
 -                                  etnaviv_obj->sgt->nents,
 -                                  etnaviv_op_to_dma_dir(op));
 +              dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
 +                                       etnaviv_op_to_dma_dir(op));
                etnaviv_obj->last_cpu_prep_op = op;
        }
  
@@@ -421,7 -422,8 +421,7 @@@ int etnaviv_gem_cpu_fini(struct drm_gem
        if (etnaviv_obj->flags & ETNA_BO_CACHED) {
                /* fini without a prep is almost certainly a userspace error */
                WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
 -              dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
 -                      etnaviv_obj->sgt->nents,
 +              dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
                        etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
                etnaviv_obj->last_cpu_prep_op = 0;
        }
@@@ -559,6 -561,22 +559,22 @@@ void etnaviv_gem_obj_add(struct drm_dev
        mutex_unlock(&priv->gem_lock);
  }
  
+ static const struct vm_operations_struct vm_ops = {
+       .fault = etnaviv_gem_fault,
+       .open = drm_gem_vm_open,
+       .close = drm_gem_vm_close,
+ };
+ static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
+       .free = etnaviv_gem_free_object,
+       .pin = etnaviv_gem_prime_pin,
+       .unpin = etnaviv_gem_prime_unpin,
+       .get_sg_table = etnaviv_gem_prime_get_sg_table,
+       .vmap = etnaviv_gem_prime_vmap,
+       .vunmap = etnaviv_gem_prime_vunmap,
+       .vm_ops = &vm_ops,
+ };
  static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
        const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
  {
        INIT_LIST_HEAD(&etnaviv_obj->vram_list);
  
        *obj = &etnaviv_obj->base;
+       (*obj)->funcs = &etnaviv_gem_object_funcs;
  
        return 0;
  }
@@@ -42,6 -42,8 +42,6 @@@ static int exynos_drm_alloc_buf(struct 
        if (exynos_gem->flags & EXYNOS_BO_WC ||
                        !(exynos_gem->flags & EXYNOS_BO_CACHABLE))
                attr |= DMA_ATTR_WRITE_COMBINE;
 -      else
 -              attr |= DMA_ATTR_NON_CONSISTENT;
  
        /* FBDev emulation requires kernel mapping */
        if (!kvmap)
@@@ -127,6 -129,19 +127,19 @@@ void exynos_drm_gem_destroy(struct exyn
        kfree(exynos_gem);
  }
  
+ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
+       .open = drm_gem_vm_open,
+       .close = drm_gem_vm_close,
+ };
+ static const struct drm_gem_object_funcs exynos_drm_gem_object_funcs = {
+       .free = exynos_drm_gem_free_object,
+       .get_sg_table = exynos_drm_gem_prime_get_sg_table,
+       .vmap = exynos_drm_gem_prime_vmap,
+       .vunmap = exynos_drm_gem_prime_vunmap,
+       .vm_ops = &exynos_drm_gem_vm_ops,
+ };
  static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
                                                  unsigned long size)
  {
        exynos_gem->size = size;
        obj = &exynos_gem->base;
  
+       obj->funcs = &exynos_drm_gem_object_funcs;
        ret = drm_gem_object_init(dev, obj, size);
        if (ret < 0) {
                DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n");
@@@ -429,10 -446,27 +444,10 @@@ exynos_drm_gem_prime_import_sg_table(st
  {
        struct exynos_drm_gem *exynos_gem;
  
 -      if (sgt->nents < 1)
 +      /* check if the entries in the sg_table are contiguous */
 +      if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) {
 +              DRM_ERROR("buffer chunks must be mapped contiguously");
                return ERR_PTR(-EINVAL);
 -
 -      /*
 -       * Check if the provided buffer has been mapped as contiguous
 -       * into DMA address space.
 -       */
 -      if (sgt->nents > 1) {
 -              dma_addr_t next_addr = sg_dma_address(sgt->sgl);
 -              struct scatterlist *s;
 -              unsigned int i;
 -
 -              for_each_sg(sgt->sgl, s, sgt->nents, i) {
 -                      if (!sg_dma_len(s))
 -                              break;
 -                      if (sg_dma_address(s) != next_addr) {
 -                              DRM_ERROR("buffer chunks must be mapped contiguously");
 -                              return ERR_PTR(-EINVAL);
 -                      }
 -                      next_addr = sg_dma_address(s) + sg_dma_len(s);
 -              }
        }
  
        exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
@@@ -48,9 -48,12 +48,9 @@@ static struct sg_table *i915_gem_map_dm
                src = sg_next(src);
        }
  
 -      if (!dma_map_sg_attrs(attachment->dev,
 -                            st->sgl, st->nents, dir,
 -                            DMA_ATTR_SKIP_CPU_SYNC)) {
 -              ret = -ENOMEM;
 +      ret = dma_map_sgtable(attachment->dev, st, dir, DMA_ATTR_SKIP_CPU_SYNC);
 +      if (ret)
                goto err_free_sg;
 -      }
  
        return st;
  
@@@ -70,21 -73,30 +70,28 @@@ static void i915_gem_unmap_dma_buf(stru
  {
        struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
  
 -      dma_unmap_sg_attrs(attachment->dev,
 -                         sg->sgl, sg->nents, dir,
 -                         DMA_ATTR_SKIP_CPU_SYNC);
 +      dma_unmap_sgtable(attachment->dev, sg, dir, DMA_ATTR_SKIP_CPU_SYNC);
        sg_free_table(sg);
        kfree(sg);
  
        i915_gem_object_unpin_pages(obj);
  }
  
- static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+ static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
  {
        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+       void *vaddr;
  
-       return i915_gem_object_pin_map(obj, I915_MAP_WB);
+       vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+       if (IS_ERR(vaddr))
+               return PTR_ERR(vaddr);
+       dma_buf_map_set_vaddr(map, vaddr);
+       return 0;
  }
  
- static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
  {
        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
  
@@@ -28,9 -28,10 +28,9 @@@ static struct sg_table *mock_map_dma_bu
                sg = sg_next(sg);
        }
  
 -      if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
 -              err = -ENOMEM;
 +      err = dma_map_sgtable(attachment->dev, st, dir, 0);
 +      if (err)
                goto err_st;
 -      }
  
        return st;
  
@@@ -45,7 -46,7 +45,7 @@@ static void mock_unmap_dma_buf(struct d
                               struct sg_table *st,
                               enum dma_data_direction dir)
  {
 -      dma_unmap_sg(attachment->dev, st->sgl, st->nents, dir);
 +      dma_unmap_sgtable(attachment->dev, st, dir, 0);
        sg_free_table(st);
        kfree(st);
  }
@@@ -61,18 -62,24 +61,24 @@@ static void mock_dmabuf_release(struct 
        kfree(mock);
  }
  
- static void *mock_dmabuf_vmap(struct dma_buf *dma_buf)
+ static int mock_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
  {
        struct mock_dmabuf *mock = to_mock(dma_buf);
+       void *vaddr;
  
-       return vm_map_ram(mock->pages, mock->npages, 0);
+       vaddr = vm_map_ram(mock->pages, mock->npages, 0);
+       if (!vaddr)
+               return -ENOMEM;
+       dma_buf_map_set_vaddr(map, vaddr);
+       return 0;
  }
  
- static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+ static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
  {
        struct mock_dmabuf *mock = to_mock(dma_buf);
  
-       vm_unmap_ram(vaddr, mock->npages);
+       vm_unmap_ram(map->vaddr, mock->npages);
  }
  
  static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
@@@ -58,6 -58,7 +58,6 @@@
  #include "display/intel_hotplug.h"
  #include "display/intel_overlay.h"
  #include "display/intel_pipe_crc.h"
 -#include "display/intel_psr.h"
  #include "display/intel_sprite.h"
  #include "display/intel_vga.h"
  
@@@ -215,6 -216,125 +215,6 @@@ intel_teardown_mchbar(struct drm_i915_p
                release_resource(&dev_priv->mch_res);
  }
  
 -/* part #1: call before irq install */
 -static int i915_driver_modeset_probe_noirq(struct drm_i915_private *i915)
 -{
 -      int ret;
 -
 -      if (i915_inject_probe_failure(i915))
 -              return -ENODEV;
 -
 -      if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
 -              ret = drm_vblank_init(&i915->drm,
 -                                    INTEL_NUM_PIPES(i915));
 -              if (ret)
 -                      return ret;
 -      }
 -
 -      intel_bios_init(i915);
 -
 -      ret = intel_vga_register(i915);
 -      if (ret)
 -              goto cleanup_bios;
 -
 -      intel_power_domains_init_hw(i915, false);
 -
 -      intel_csr_ucode_init(i915);
 -
 -      ret = intel_modeset_init_noirq(i915);
 -      if (ret)
 -              goto cleanup_vga_client_pw_domain_csr;
 -
 -      return 0;
 -
 -cleanup_vga_client_pw_domain_csr:
 -      intel_csr_ucode_fini(i915);
 -      intel_power_domains_driver_remove(i915);
 -      intel_vga_unregister(i915);
 -cleanup_bios:
 -      intel_bios_driver_remove(i915);
 -      return ret;
 -}
 -
 -/* part #2: call after irq install */
 -static int i915_driver_modeset_probe(struct drm_i915_private *i915)
 -{
 -      int ret;
 -
 -      /* Important: The output setup functions called by modeset_init need
 -       * working irqs for e.g. gmbus and dp aux transfers. */
 -      ret = intel_modeset_init(i915);
 -      if (ret)
 -              goto out;
 -
 -      ret = i915_gem_init(i915);
 -      if (ret)
 -              goto cleanup_modeset;
 -
 -      intel_overlay_setup(i915);
 -
 -      if (!HAS_DISPLAY(i915) || !INTEL_DISPLAY_ENABLED(i915))
 -              return 0;
 -
 -      ret = intel_fbdev_init(&i915->drm);
 -      if (ret)
 -              goto cleanup_gem;
 -
 -      /* Only enable hotplug handling once the fbdev is fully set up. */
 -      intel_hpd_init(i915);
 -
 -      intel_init_ipc(i915);
 -
 -      intel_psr_set_force_mode_changed(i915->psr.dp);
 -
 -      return 0;
 -
 -cleanup_gem:
 -      i915_gem_suspend(i915);
 -      i915_gem_driver_remove(i915);
 -      i915_gem_driver_release(i915);
 -cleanup_modeset:
 -      /* FIXME */
 -      intel_modeset_driver_remove(i915);
 -      intel_irq_uninstall(i915);
 -      intel_modeset_driver_remove_noirq(i915);
 -out:
 -      return ret;
 -}
 -
 -/* part #1: call before irq uninstall */
 -static void i915_driver_modeset_remove(struct drm_i915_private *i915)
 -{
 -      intel_modeset_driver_remove(i915);
 -}
 -
 -/* part #2: call after irq uninstall */
 -static void i915_driver_modeset_remove_noirq(struct drm_i915_private *i915)
 -{
 -      intel_csr_ucode_fini(i915);
 -
 -      intel_power_domains_driver_remove(i915);
 -
 -      intel_vga_unregister(i915);
 -
 -      intel_bios_driver_remove(i915);
 -}
 -
 -static void intel_init_dpio(struct drm_i915_private *dev_priv)
 -{
 -      /*
 -       * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
 -       * CHV x1 PHY (DP/HDMI D)
 -       * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
 -       */
 -      if (IS_CHERRYVIEW(dev_priv)) {
 -              DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
 -              DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
 -      } else if (IS_VALLEYVIEW(dev_priv)) {
 -              DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
 -      }
 -}
 -
  static int i915_workqueues_init(struct drm_i915_private *dev_priv)
  {
        /*
@@@ -343,6 -463,7 +343,6 @@@ static int i915_driver_early_probe(stru
        intel_detect_pch(dev_priv);
  
        intel_pm_setup(dev_priv);
 -      intel_init_dpio(dev_priv);
        ret = intel_power_domains_init(dev_priv);
        if (ret < 0)
                goto err_gem;
@@@ -677,7 -798,7 +677,7 @@@ static void i915_driver_register(struc
                drm_err(&dev_priv->drm,
                        "Failed to register driver for userspace access!\n");
  
 -      if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) {
 +      if (HAS_DISPLAY(dev_priv)) {
                /* Must be done after probing outputs */
                intel_opregion_register(dev_priv);
                acpi_video_register();
         * We need to coordinate the hotplugs with the asynchronous fbdev
         * configuration, for which we use the fbdev->async_cookie.
         */
 -      if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv))
 +      if (HAS_DISPLAY(dev_priv))
                drm_kms_helper_poll_init(dev);
  
        intel_power_domains_enable(dev_priv);
@@@ -867,7 -988,7 +867,7 @@@ int i915_driver_probe(struct pci_dev *p
        if (ret < 0)
                goto out_cleanup_mmio;
  
 -      ret = i915_driver_modeset_probe_noirq(i915);
 +      ret = intel_modeset_init_noirq(i915);
        if (ret < 0)
                goto out_cleanup_hw;
  
        if (ret)
                goto out_cleanup_modeset;
  
 -      ret = i915_driver_modeset_probe(i915);
 -      if (ret < 0)
 +      ret = intel_modeset_init_nogem(i915);
 +      if (ret)
                goto out_cleanup_irq;
  
 +      ret = i915_gem_init(i915);
 +      if (ret)
 +              goto out_cleanup_modeset2;
 +
 +      ret = intel_modeset_init(i915);
 +      if (ret)
 +              goto out_cleanup_gem;
 +
        i915_driver_register(i915);
  
        enable_rpm_wakeref_asserts(&i915->runtime_pm);
  
        return 0;
  
 +out_cleanup_gem:
 +      i915_gem_suspend(i915);
 +      i915_gem_driver_remove(i915);
 +      i915_gem_driver_release(i915);
 +out_cleanup_modeset2:
 +      /* FIXME clean up the error path */
 +      intel_modeset_driver_remove(i915);
 +      intel_irq_uninstall(i915);
 +      intel_modeset_driver_remove_noirq(i915);
 +      goto out_cleanup_modeset;
  out_cleanup_irq:
        intel_irq_uninstall(i915);
  out_cleanup_modeset:
 -      i915_driver_modeset_remove_noirq(i915);
 +      intel_modeset_driver_remove_nogem(i915);
  out_cleanup_hw:
        i915_driver_hw_remove(i915);
        intel_memory_regions_driver_release(i915);
@@@ -942,7 -1045,7 +942,7 @@@ void i915_driver_remove(struct drm_i915
  
        intel_gvt_driver_remove(i915);
  
 -      i915_driver_modeset_remove(i915);
 +      intel_modeset_driver_remove(i915);
  
        intel_irq_uninstall(i915);
  
        i915_reset_error_state(i915);
        i915_gem_driver_remove(i915);
  
 -      i915_driver_modeset_remove_noirq(i915);
 +      intel_modeset_driver_remove_nogem(i915);
  
        i915_driver_hw_remove(i915);
  
@@@ -1750,12 -1853,8 +1750,8 @@@ static struct drm_driver driver = 
        .lastclose = i915_driver_lastclose,
        .postclose = i915_driver_postclose,
  
-       .gem_close_object = i915_gem_close_object,
-       .gem_free_object_unlocked = i915_gem_free_object,
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-       .gem_prime_export = i915_gem_prime_export,
        .gem_prime_import = i915_gem_prime_import,
  
        .dumb_create = i915_gem_dumb_create,
@@@ -85,9 -85,6 +85,6 @@@ static struct drm_driver mock_driver = 
        .name = "mock",
        .driver_features = DRIVER_GEM,
        .release = mock_device_release,
-       .gem_close_object = i915_gem_close_object,
-       .gem_free_object_unlocked = i915_gem_free_object,
  };
  
  static void release_dev(struct device *dev)
@@@ -116,11 -113,11 +113,11 @@@ static struct dev_pm_domain pm_domain 
  
  struct drm_i915_private *mock_gem_device(void)
  {
 -      struct drm_i915_private *i915;
 -      struct pci_dev *pdev;
  #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
 -      struct dev_iommu iommu;
 +      static struct dev_iommu fake_iommu = { .priv = (void *)-1 };
  #endif
 +      struct drm_i915_private *i915;
 +      struct pci_dev *pdev;
  
        pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
        if (!pdev)
        dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  
  #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
 -      /* HACK HACK HACK to disable iommu for the fake device; force identity mapping */
 -      memset(&iommu, 0, sizeof(iommu));
 -      iommu.priv = (void *)-1;
 -      pdev->dev.iommu = &iommu;
 +      /* HACK to disable iommu for the fake device; force identity mapping */
 +      pdev->dev.iommu = &fake_iommu;
  #endif
        if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
                put_device(&pdev->dev);
@@@ -481,7 -481,7 +481,7 @@@ static void mtk_drm_crtc_hw_config(stru
                mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
                cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
                cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
 -              cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event);
 +              cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
                mtk_crtc_ddp_config(crtc, cmdq_handle);
                cmdq_pkt_finalize(cmdq_handle);
                cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
@@@ -517,7 -517,7 +517,7 @@@ void mtk_drm_crtc_async_update(struct d
  }
  
  static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
-                                      struct drm_crtc_state *old_state)
+                                      struct drm_atomic_state *state)
  {
        struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
        struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
  }
  
  static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
-                                       struct drm_crtc_state *old_state)
+                                       struct drm_atomic_state *state)
  {
        struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
        struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
@@@ -831,19 -831,13 +831,19 @@@ int mtk_drm_crtc_create(struct drm_devi
                        drm_crtc_index(&mtk_crtc->base));
                mtk_crtc->cmdq_client = NULL;
        }
 -      ret = of_property_read_u32_index(priv->mutex_node,
 -                                       "mediatek,gce-events",
 -                                       drm_crtc_index(&mtk_crtc->base),
 -                                       &mtk_crtc->cmdq_event);
 -      if (ret)
 -              dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
 -                      drm_crtc_index(&mtk_crtc->base));
 +
 +      if (mtk_crtc->cmdq_client) {
 +              ret = of_property_read_u32_index(priv->mutex_node,
 +                                               "mediatek,gce-events",
 +                                               drm_crtc_index(&mtk_crtc->base),
 +                                               &mtk_crtc->cmdq_event);
 +              if (ret) {
 +                      dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
 +                              drm_crtc_index(&mtk_crtc->base));
 +                      cmdq_mbox_destroy(mtk_crtc->cmdq_client);
 +                      mtk_crtc->cmdq_client = NULL;
 +              }
 +      }
  #endif
        return 0;
  }
@@@ -27,6 -27,7 +27,6 @@@
  
  #include "mtk_drm_crtc.h"
  #include "mtk_drm_ddp.h"
 -#include "mtk_drm_ddp.h"
  #include "mtk_drm_ddp_comp.h"
  #include "mtk_drm_drv.h"
  #include "mtk_drm_gem.h"
@@@ -73,19 -74,6 +73,19 @@@ static const enum mtk_ddp_comp_id mt270
        DDP_COMPONENT_DPI0,
  };
  
 +static const enum mtk_ddp_comp_id mt7623_mtk_ddp_main[] = {
 +      DDP_COMPONENT_OVL0,
 +      DDP_COMPONENT_RDMA0,
 +      DDP_COMPONENT_COLOR0,
 +      DDP_COMPONENT_BLS,
 +      DDP_COMPONENT_DPI0,
 +};
 +
 +static const enum mtk_ddp_comp_id mt7623_mtk_ddp_ext[] = {
 +      DDP_COMPONENT_RDMA1,
 +      DDP_COMPONENT_DSI0,
 +};
 +
  static const enum mtk_ddp_comp_id mt2712_mtk_ddp_main[] = {
        DDP_COMPONENT_OVL0,
        DDP_COMPONENT_COLOR0,
@@@ -139,14 -127,6 +139,14 @@@ static const struct mtk_mmsys_driver_da
        .shadow_register = true,
  };
  
 +static const struct mtk_mmsys_driver_data mt7623_mmsys_driver_data = {
 +      .main_path = mt7623_mtk_ddp_main,
 +      .main_len = ARRAY_SIZE(mt7623_mtk_ddp_main),
 +      .ext_path = mt7623_mtk_ddp_ext,
 +      .ext_len = ARRAY_SIZE(mt7623_mtk_ddp_ext),
 +      .shadow_register = true,
 +};
 +
  static const struct mtk_mmsys_driver_data mt2712_mmsys_driver_data = {
        .main_path = mt2712_mtk_ddp_main,
        .main_len = ARRAY_SIZE(mt2712_mtk_ddp_main),
@@@ -185,7 -165,7 +185,7 @@@ static int mtk_drm_kms_init(struct drm_
  
        ret = drmm_mode_config_init(drm);
        if (ret)
 -              return ret;
 +              goto put_mutex_dev;
  
        drm->mode_config.min_width = 64;
        drm->mode_config.min_height = 64;
  
        ret = component_bind_all(drm->dev, drm);
        if (ret)
 -              return ret;
 +              goto put_mutex_dev;
  
        /*
         * We currently support two fixed data streams, each optional,
        }
        if (!dma_dev->dma_parms) {
                ret = -ENOMEM;
 -              goto err_component_unbind;
 +              goto put_dma_dev;
        }
  
        ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
  err_unset_dma_parms:
        if (private->dma_parms_allocated)
                dma_dev->dma_parms = NULL;
 +put_dma_dev:
 +      put_device(private->dma_dev);
  err_component_unbind:
        component_unbind_all(drm->dev, drm);
 -
 +put_mutex_dev:
 +      put_device(private->mutex_dev);
        return ret;
  }
  
@@@ -324,18 -301,13 +324,13 @@@ struct drm_gem_object *mtk_drm_gem_prim
  static struct drm_driver mtk_drm_driver = {
        .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
  
-       .gem_free_object_unlocked = mtk_drm_gem_free_object,
-       .gem_vm_ops = &drm_gem_cma_vm_ops,
        .dumb_create = mtk_drm_gem_dumb_create,
  
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_import = mtk_drm_gem_prime_import,
-       .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
        .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
        .gem_prime_mmap = mtk_drm_gem_mmap_buf,
-       .gem_prime_vmap = mtk_drm_gem_prime_vmap,
-       .gem_prime_vunmap = mtk_drm_gem_prime_vunmap,
        .fops = &mtk_drm_fops,
  
        .name = DRIVER_NAME,
@@@ -445,8 -417,6 +440,8 @@@ static const struct of_device_id mtk_dd
  static const struct of_device_id mtk_drm_of_ids[] = {
        { .compatible = "mediatek,mt2701-mmsys",
          .data = &mt2701_mmsys_driver_data},
 +      { .compatible = "mediatek,mt7623-mmsys",
 +        .data = &mt7623_mmsys_driver_data},
        { .compatible = "mediatek,mt2712-mmsys",
          .data = &mt2712_mmsys_driver_data},
        { .compatible = "mediatek,mt8173-mmsys",
@@@ -569,13 -539,8 +564,13 @@@ err_pm
        pm_runtime_disable(dev);
  err_node:
        of_node_put(private->mutex_node);
 -      for (i = 0; i < DDP_COMPONENT_ID_MAX; i++)
 +      for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) {
                of_node_put(private->comp_node[i]);
 +              if (private->ddp_comp[i]) {
 +                      put_device(private->ddp_comp[i]->larb_dev);
 +                      private->ddp_comp[i] = NULL;
 +              }
 +      }
        return ret;
  }
  
@@@ -8,11 -8,20 +8,20 @@@
  #include <drm/drm.h>
  #include <drm/drm_device.h>
  #include <drm/drm_gem.h>
+ #include <drm/drm_gem_cma_helper.h>
  #include <drm/drm_prime.h>
  
  #include "mtk_drm_drv.h"
  #include "mtk_drm_gem.h"
  
+ static const struct drm_gem_object_funcs mtk_drm_gem_object_funcs = {
+       .free = mtk_drm_gem_free_object,
+       .get_sg_table = mtk_gem_prime_get_sg_table,
+       .vmap = mtk_drm_gem_prime_vmap,
+       .vunmap = mtk_drm_gem_prime_vunmap,
+       .vm_ops = &drm_gem_cma_vm_ops,
+ };
  static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
                                                unsigned long size)
  {
@@@ -25,6 -34,8 +34,8 @@@
        if (!mtk_gem_obj)
                return ERR_PTR(-ENOMEM);
  
+       mtk_gem_obj->base.funcs = &mtk_drm_gem_object_funcs;
        ret = drm_gem_object_init(dev, &mtk_gem_obj->base, size);
        if (ret < 0) {
                DRM_ERROR("failed to initialize gem object\n");
@@@ -212,28 -223,46 +223,28 @@@ struct drm_gem_object *mtk_gem_prime_im
                        struct dma_buf_attachment *attach, struct sg_table *sg)
  {
        struct mtk_drm_gem_obj *mtk_gem;
 -      int ret;
 -      struct scatterlist *s;
 -      unsigned int i;
 -      dma_addr_t expected;
  
 -      mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size);
 +      /* check if the entries in the sg_table are contiguous */
 +      if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
 +              DRM_ERROR("sg_table is not contiguous");
 +              return ERR_PTR(-EINVAL);
 +      }
  
 +      mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size);
        if (IS_ERR(mtk_gem))
                return ERR_CAST(mtk_gem);
  
 -      expected = sg_dma_address(sg->sgl);
 -      for_each_sg(sg->sgl, s, sg->nents, i) {
 -              if (!sg_dma_len(s))
 -                      break;
 -
 -              if (sg_dma_address(s) != expected) {
 -                      DRM_ERROR("sg_table is not contiguous");
 -                      ret = -EINVAL;
 -                      goto err_gem_free;
 -              }
 -              expected = sg_dma_address(s) + sg_dma_len(s);
 -      }
 -
        mtk_gem->dma_addr = sg_dma_address(sg->sgl);
        mtk_gem->sg = sg;
  
        return &mtk_gem->base;
 -
 -err_gem_free:
 -      kfree(mtk_gem);
 -      return ERR_PTR(ret);
  }
  
  void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
  {
        struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
        struct sg_table *sgt;
 -      struct sg_page_iter iter;
        unsigned int npages;
 -      unsigned int i = 0;
  
        if (mtk_gem->kvaddr)
                return mtk_gem->kvaddr;
        if (!mtk_gem->pages)
                goto out;
  
 -      for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
 -              mtk_gem->pages[i++] = sg_page_iter_page(&iter);
 -              if (i > npages)
 -                      break;
 -      }
 +      drm_prime_sg_to_page_addr_arrays(sgt, mtk_gem->pages, NULL, npages);
 +
        mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
                               pgprot_writecombine(PAGE_KERNEL));
  
@@@ -11,6 -11,7 +11,7 @@@
  #include <linux/ktime.h>
  #include <linux/bits.h>
  
+ #include <drm/drm_atomic.h>
  #include <drm/drm_crtc.h>
  #include <drm/drm_flip_work.h>
  #include <drm/drm_mode.h>
@@@ -265,6 -266,11 +266,6 @@@ enum dpu_intf_mode dpu_crtc_get_intf_mo
  {
        struct drm_encoder *encoder;
  
 -      if (!crtc) {
 -              DPU_ERROR("invalid crtc\n");
 -              return INTF_MODE_NONE;
 -      }
 -
        /*
         * TODO: This function is called from dpu debugfs and as part of atomic
         * check. When called from debugfs, the crtc->mutex must be held to
@@@ -292,6 -298,7 +293,6 @@@ void dpu_crtc_vblank_callback(struct dr
                dpu_crtc->vblank_cb_time = ktime_get();
        else
                dpu_crtc->vblank_cb_count++;
 -      _dpu_crtc_complete_flip(crtc);
        drm_crtc_handle_vblank(crtc);
        trace_dpu_crtc_vblank_cb(DRMID(crtc));
  }
@@@ -396,7 -403,6 +397,7 @@@ static void dpu_crtc_frame_event_cb(voi
  void dpu_crtc_complete_commit(struct drm_crtc *crtc)
  {
        trace_dpu_crtc_complete_commit(DRMID(crtc));
 +      _dpu_crtc_complete_flip(crtc);
  }
  
  static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
  
                trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
        }
 -
 -      drm_mode_debug_printmodeline(adj_mode);
  }
  
  static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
@@@ -450,6 -458,7 +451,6 @@@ static void _dpu_crtc_setup_cp_blocks(s
        struct dpu_crtc_mixer *mixer = cstate->mixers;
        struct dpu_hw_pcc_cfg cfg;
        struct dpu_hw_ctl *ctl;
 -      struct dpu_hw_mixer *lm;
        struct dpu_hw_dspp *dspp;
        int i;
  
  
        for (i = 0; i < cstate->num_mixers; i++) {
                ctl = mixer[i].lm_ctl;
 -              lm = mixer[i].hw_lm;
                dspp = mixer[i].hw_dspp;
  
                if (!dspp || !dspp->ops.setup_pcc)
  static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
                struct drm_crtc_state *old_state)
  {
 -      struct dpu_crtc *dpu_crtc;
 -      struct dpu_crtc_state *cstate;
 +      struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
        struct drm_encoder *encoder;
 -      struct drm_device *dev;
 -      unsigned long flags;
 -
 -      if (!crtc) {
 -              DPU_ERROR("invalid crtc\n");
 -              return;
 -      }
  
        if (!crtc->state->enable) {
                DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
  
        DPU_DEBUG("crtc%d\n", crtc->base.id);
  
 -      dpu_crtc = to_dpu_crtc(crtc);
 -      cstate = to_dpu_crtc_state(crtc->state);
 -      dev = crtc->dev;
 -
        _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
  
 -      if (dpu_crtc->event) {
 -              WARN_ON(dpu_crtc->event);
 -      } else {
 -              spin_lock_irqsave(&dev->event_lock, flags);
 -              dpu_crtc->event = crtc->state->event;
 -              crtc->state->event = NULL;
 -              spin_unlock_irqrestore(&dev->event_lock, flags);
 -      }
 -
        /* encoder will trigger pending mask now */
        drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
                dpu_encoder_trigger_kickoff_pending(encoder);
@@@ -553,11 -584,14 +554,11 @@@ static void dpu_crtc_atomic_flush(struc
                return;
        }
  
 -      if (dpu_crtc->event) {
 -              DPU_DEBUG("already received dpu_crtc->event\n");
 -      } else {
 -              spin_lock_irqsave(&dev->event_lock, flags);
 -              dpu_crtc->event = crtc->state->event;
 -              crtc->state->event = NULL;
 -              spin_unlock_irqrestore(&dev->event_lock, flags);
 -      }
 +      WARN_ON(dpu_crtc->event);
 +      spin_lock_irqsave(&dev->event_lock, flags);
 +      dpu_crtc->event = crtc->state->event;
 +      crtc->state->event = NULL;
 +      spin_unlock_irqrestore(&dev->event_lock, flags);
  
        /*
         * If no mixers has been allocated in dpu_crtc_atomic_check(),
  static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
                struct drm_crtc_state *state)
  {
 -      struct dpu_crtc_state *cstate;
 -
 -      if (!crtc || !state) {
 -              DPU_ERROR("invalid argument(s)\n");
 -              return;
 -      }
 -
 -      cstate = to_dpu_crtc_state(state);
 +      struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
  
        DPU_DEBUG("crtc%d\n", crtc->base.id);
  
@@@ -691,8 -732,14 +692,8 @@@ static void dpu_crtc_reset(struct drm_c
   */
  static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
  {
 -      struct dpu_crtc_state *cstate, *old_cstate;
 +      struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state);
  
 -      if (!crtc || !crtc->state) {
 -              DPU_ERROR("invalid argument(s)\n");
 -              return NULL;
 -      }
 -
 -      old_cstate = to_dpu_crtc_state(crtc->state);
        cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
        if (!cstate) {
                DPU_ERROR("failed to allocate state\n");
  }
  
  static void dpu_crtc_disable(struct drm_crtc *crtc,
-                            struct drm_crtc_state *old_crtc_state)
+                            struct drm_atomic_state *state)
  {
 -      struct dpu_crtc *dpu_crtc;
 -      struct dpu_crtc_state *cstate;
+       struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+                                                                             crtc);
 +      struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
 +      struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
        struct drm_encoder *encoder;
        unsigned long flags;
        bool release_bandwidth = false;
  
 -      if (!crtc || !crtc->state) {
 -              DPU_ERROR("invalid crtc\n");
 -              return;
 -      }
 -      dpu_crtc = to_dpu_crtc(crtc);
 -      cstate = to_dpu_crtc_state(crtc->state);
 -
        DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
  
        /* Disable/save vblank irq handling */
  }
  
  static void dpu_crtc_enable(struct drm_crtc *crtc,
-               struct drm_crtc_state *old_crtc_state)
+               struct drm_atomic_state *state)
  {
 -      struct dpu_crtc *dpu_crtc;
 +      struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
        struct drm_encoder *encoder;
        bool request_bandwidth = false;
  
 -      if (!crtc) {
 -              DPU_ERROR("invalid crtc\n");
 -              return;
 -      }
 -
        pm_runtime_get_sync(crtc->dev->dev);
  
        DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
 -      dpu_crtc = to_dpu_crtc(crtc);
  
        drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
                /* in video mode, we hold an extra bandwidth reference
@@@ -814,15 -876,15 +817,15 @@@ struct plane_state 
  static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
                struct drm_crtc_state *state)
  {
 -      struct dpu_crtc *dpu_crtc;
 +      struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
 +      struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
        struct plane_state *pstates;
 -      struct dpu_crtc_state *cstate;
  
        const struct drm_plane_state *pstate;
        struct drm_plane *plane;
        struct drm_display_mode *mode;
  
 -      int cnt = 0, rc = 0, mixer_width, i, z_pos;
 +      int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
  
        struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
        int multirect_count = 0;
        int left_zpos_cnt = 0, right_zpos_cnt = 0;
        struct drm_rect crtc_rect = { 0 };
  
 -      if (!crtc) {
 -              DPU_ERROR("invalid crtc\n");
 -              return -EINVAL;
 -      }
 -
        pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
  
 -      dpu_crtc = to_dpu_crtc(crtc);
 -      cstate = to_dpu_crtc_state(state);
 -
        if (!state->enable || !state->active) {
                DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
                                crtc->base.id, state->enable, state->active);
  
        memset(pipe_staged, 0, sizeof(pipe_staged));
  
 -      mixer_width = mode->hdisplay / cstate->num_mixers;
 +      if (cstate->num_mixers) {
 +              mixer_width = mode->hdisplay / cstate->num_mixers;
  
 -      _dpu_crtc_setup_lm_bounds(crtc, state);
 +              _dpu_crtc_setup_lm_bounds(crtc, state);
 +      }
  
        crtc_rect.x2 = mode->hdisplay;
        crtc_rect.y2 = mode->vdisplay;
@@@ -1177,7 -1245,23 +1180,7 @@@ static int _dpu_debugfs_status_show(str
        return 0;
  }
  
 -static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
 -{
 -      return single_open(file, _dpu_debugfs_status_show, inode->i_private);
 -}
 -
 -#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)                          \
 -static int __prefix ## _open(struct inode *inode, struct file *file)  \
 -{                                                                     \
 -      return single_open(file, __prefix ## _show, inode->i_private);  \
 -}                                                                     \
 -static const struct file_operations __prefix ## _fops = {             \
 -      .owner = THIS_MODULE,                                           \
 -      .open = __prefix ## _open,                                      \
 -      .release = single_release,                                      \
 -      .read = seq_read,                                               \
 -      .llseek = seq_lseek,                                            \
 -}
 +DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status);
  
  static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
  {
  
        return 0;
  }
 -DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
 +DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state);
  
  static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
  {
        struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
  
 -      static const struct file_operations debugfs_status_fops = {
 -              .open =         _dpu_debugfs_status_open,
 -              .read =         seq_read,
 -              .llseek =       seq_lseek,
 -              .release =      single_release,
 -      };
 -
        dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
                        crtc->dev->primary->debugfs_root);
  
        debugfs_create_file("status", 0400,
                        dpu_crtc->debugfs_root,
 -                      dpu_crtc, &debugfs_status_fops);
 +                      dpu_crtc, &_dpu_debugfs_status_fops);
        debugfs_create_file("state", 0600,
                        dpu_crtc->debugfs_root,
                        &dpu_crtc->base,
@@@ -453,7 -453,15 +453,7 @@@ static int msm_drm_init(struct device *
        if (ret)
                goto err_msm_uninit;
  
 -      if (!dev->dma_parms) {
 -              dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
 -                                            GFP_KERNEL);
 -              if (!dev->dma_parms) {
 -                      ret = -ENOMEM;
 -                      goto err_msm_uninit;
 -              }
 -      }
 -      dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
 +      dma_set_max_seg_size(dev, UINT_MAX);
  
        msm_gem_shrinker_init(ddev);
  
@@@ -586,10 -594,9 +586,10 @@@ static int context_init(struct drm_devi
        if (!ctx)
                return -ENOMEM;
  
 +      kref_init(&ctx->ref);
        msm_submitqueue_init(dev, ctx);
  
 -      ctx->aspace = priv->gpu ? priv->gpu->aspace : NULL;
 +      ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
        file->driver_priv = ctx;
  
        return 0;
@@@ -608,7 -615,7 +608,7 @@@ static int msm_open(struct drm_device *
  static void context_close(struct msm_file_private *ctx)
  {
        msm_submitqueue_close(ctx);
 -      kfree(ctx);
 +      msm_file_private_put(ctx);
  }
  
  static void msm_postclose(struct drm_device *dev, struct drm_file *file)
@@@ -772,19 -779,18 +772,19 @@@ static int msm_ioctl_gem_cpu_fini(struc
  }
  
  static int msm_ioctl_gem_info_iova(struct drm_device *dev,
 -              struct drm_gem_object *obj, uint64_t *iova)
 +              struct drm_file *file, struct drm_gem_object *obj,
 +              uint64_t *iova)
  {
 -      struct msm_drm_private *priv = dev->dev_private;
 +      struct msm_file_private *ctx = file->driver_priv;
  
 -      if (!priv->gpu)
 +      if (!ctx->aspace)
                return -EINVAL;
  
        /*
         * Don't pin the memory here - just get an address so that userspace can
         * be productive
         */
 -      return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
 +      return msm_gem_get_iova(obj, ctx->aspace, iova);
  }
  
  static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
                args->value = msm_gem_mmap_offset(obj);
                break;
        case MSM_INFO_GET_IOVA:
 -              ret = msm_ioctl_gem_info_iova(dev, obj, &args->value);
 +              ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value);
                break;
        case MSM_INFO_SET_NAME:
                /* length check should leave room for terminating null: */
@@@ -972,12 -978,6 +972,6 @@@ static const struct drm_ioctl_desc msm_
        DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
  };
  
- static const struct vm_operations_struct vm_ops = {
-       .fault = msm_gem_fault,
-       .open = drm_gem_vm_open,
-       .close = drm_gem_vm_close,
- };
  static const struct file_operations fops = {
        .owner              = THIS_MODULE,
        .open               = drm_open,
@@@ -1003,18 -1003,11 +997,11 @@@ static struct drm_driver msm_driver = 
        .irq_preinstall     = msm_irq_preinstall,
        .irq_postinstall    = msm_irq_postinstall,
        .irq_uninstall      = msm_irq_uninstall,
-       .gem_free_object_unlocked = msm_gem_free_object,
-       .gem_vm_ops         = &vm_ops,
        .dumb_create        = msm_gem_dumb_create,
        .dumb_map_offset    = msm_gem_dumb_map_offset,
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-       .gem_prime_pin      = msm_gem_prime_pin,
-       .gem_prime_unpin    = msm_gem_prime_unpin,
-       .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
        .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
-       .gem_prime_vmap     = msm_gem_prime_vmap,
-       .gem_prime_vunmap   = msm_gem_prime_vunmap,
        .gem_prime_mmap     = msm_gem_prime_mmap,
  #ifdef CONFIG_DEBUG_FS
        .debugfs_init       = msm_debugfs_init,
@@@ -1352,7 -1345,6 +1339,7 @@@ static int __init msm_drm_register(void
        msm_dsi_register();
        msm_edp_register();
        msm_hdmi_register();
 +      msm_dp_register();
        adreno_register();
        return platform_driver_register(&msm_platform_driver);
  }
@@@ -1361,7 -1353,6 +1348,7 @@@ static void __exit msm_drm_unregister(v
  {
        DBG("fini");
        platform_driver_unregister(&msm_platform_driver);
 +      msm_dp_unregister();
        msm_hdmi_unregister();
        adreno_unregister();
        msm_edp_unregister();
@@@ -57,7 -57,6 +57,7 @@@ struct msm_file_private 
        struct list_head submitqueues;
        int queueid;
        struct msm_gem_address_space *aspace;
 +      struct kref ref;
  };
  
  enum msm_mdp_plane_property {
@@@ -160,8 -159,6 +160,8 @@@ struct msm_drm_private 
        /* DSI is shared by mdp4 and mdp5 */
        struct msm_dsi *dsi[2];
  
 +      struct msm_dp *dp;
 +
        /* when we have more than one 'msm_gpu' these need to be an array: */
        struct msm_gpu *gpu;
        struct msm_file_private *lastctx;
@@@ -251,10 -248,6 +251,10 @@@ int msm_gem_map_vma(struct msm_gem_addr
  void msm_gem_close_vma(struct msm_gem_address_space *aspace,
                struct msm_gem_vma *vma);
  
 +
 +struct msm_gem_address_space *
 +msm_gem_address_space_get(struct msm_gem_address_space *aspace);
 +
  void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
  
  struct msm_gem_address_space *
@@@ -276,7 -269,6 +276,6 @@@ void msm_gem_shrinker_cleanup(struct dr
  int msm_gem_mmap_obj(struct drm_gem_object *obj,
                        struct vm_area_struct *vma);
  int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
- vm_fault_t msm_gem_fault(struct vm_fault *vmf);
  uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
  int msm_gem_get_iova(struct drm_gem_object *obj,
                struct msm_gem_address_space *aspace, uint64_t *iova);
@@@ -309,8 -301,9 +308,8 @@@ void msm_gem_put_vaddr(struct drm_gem_o
  int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
  int msm_gem_sync_object(struct drm_gem_object *obj,
                struct msm_fence_context *fctx, bool exclusive);
 -void msm_gem_move_to_active(struct drm_gem_object *obj,
 -              struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence);
 -void msm_gem_move_to_inactive(struct drm_gem_object *obj);
 +void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu);
 +void msm_gem_active_put(struct drm_gem_object *obj);
  int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
  int msm_gem_cpu_fini(struct drm_gem_object *obj);
  void msm_gem_free_object(struct drm_gem_object *obj);
@@@ -384,63 -377,6 +383,63 @@@ static inline int msm_dsi_modeset_init(
  }
  #endif
  
 +#ifdef CONFIG_DRM_MSM_DP
 +int __init msm_dp_register(void);
 +void __exit msm_dp_unregister(void);
 +int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
 +                       struct drm_encoder *encoder);
 +int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder);
 +int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder);
 +int msm_dp_display_pre_disable(struct msm_dp *dp, struct drm_encoder *encoder);
 +void msm_dp_display_mode_set(struct msm_dp *dp, struct drm_encoder *encoder,
 +                              struct drm_display_mode *mode,
 +                              struct drm_display_mode *adjusted_mode);
 +void msm_dp_irq_postinstall(struct msm_dp *dp_display);
 +
 +void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor);
 +
 +#else
 +static inline int __init msm_dp_register(void)
 +{
 +      return -EINVAL;
 +}
 +static inline void __exit msm_dp_unregister(void)
 +{
 +}
 +static inline int msm_dp_modeset_init(struct msm_dp *dp_display,
 +                                     struct drm_device *dev,
 +                                     struct drm_encoder *encoder)
 +{
 +      return -EINVAL;
 +}
 +static inline int msm_dp_display_enable(struct msm_dp *dp,
 +                                      struct drm_encoder *encoder)
 +{
 +      return -EINVAL;
 +}
 +static inline int msm_dp_display_disable(struct msm_dp *dp,
 +                                      struct drm_encoder *encoder)
 +{
 +      return -EINVAL;
 +}
 +static inline void msm_dp_display_mode_set(struct msm_dp *dp,
 +                              struct drm_encoder *encoder,
 +                              struct drm_display_mode *mode,
 +                              struct drm_display_mode *adjusted_mode)
 +{
 +}
 +
 +static inline void msm_dp_irq_postinstall(struct msm_dp *dp_display)
 +{
 +}
 +
 +static inline void msm_dp_debugfs_init(struct msm_dp *dp_display,
 +              struct drm_minor *minor)
 +{
 +}
 +
 +#endif
 +
  void __init msm_mdp_register(void);
  void __exit msm_mdp_unregister(void);
  void __init msm_dpu_register(void);
@@@ -461,9 -397,8 +460,9 @@@ void msm_perf_debugfs_cleanup(struct ms
  #else
  static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
  __printf(3, 4)
 -static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
 -              const char *fmt, ...) {}
 +static inline void msm_rd_dump_submit(struct msm_rd_state *rd,
 +                      struct msm_gem_submit *submit,
 +                      const char *fmt, ...) {}
  static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}
  static inline void msm_perf_debugfs_cleanup(struct msm_drm_private *priv) {}
  #endif
@@@ -483,8 -418,7 +482,8 @@@ struct msm_gpu_submitqueue
  int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
  struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
                u32 id);
 -int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
 +int msm_submitqueue_create(struct drm_device *drm,
 +              struct msm_file_private *ctx,
                u32 prio, u32 flags, u32 *id);
  int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
                struct drm_msm_submitqueue_query *args);
@@@ -493,26 -427,6 +492,26 @@@ void msm_submitqueue_close(struct msm_f
  
  void msm_submitqueue_destroy(struct kref *kref);
  
 +static inline void __msm_file_private_destroy(struct kref *kref)
 +{
 +      struct msm_file_private *ctx = container_of(kref,
 +              struct msm_file_private, ref);
 +
 +      msm_gem_address_space_put(ctx->aspace);
 +      kfree(ctx);
 +}
 +
 +static inline void msm_file_private_put(struct msm_file_private *ctx)
 +{
 +      kref_put(&ctx->ref, __msm_file_private_destroy);
 +}
 +
 +static inline struct msm_file_private *msm_file_private_get(
 +      struct msm_file_private *ctx)
 +{
 +      kref_get(&ctx->ref);
 +      return ctx;
 +}
  
  #define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
  #define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
@@@ -4,7 -4,6 +4,7 @@@
   * Author: Rob Clark <robdclark@gmail.com>
   */
  
 +#include <linux/dma-map-ops.h>
  #include <linux/spinlock.h>
  #include <linux/shmem_fs.h>
  #include <linux/dma-buf.h>
@@@ -53,14 -52,26 +53,14 @@@ static void sync_for_device(struct msm_
  {
        struct device *dev = msm_obj->base.dev->dev;
  
 -      if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
 -              dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
 -                      msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 -      } else {
 -              dma_map_sg(dev, msm_obj->sgt->sgl,
 -                      msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 -      }
 +      dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
  }
  
  static void sync_for_cpu(struct msm_gem_object *msm_obj)
  {
        struct device *dev = msm_obj->base.dev->dev;
  
 -      if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
 -              dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
 -                      msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 -      } else {
 -              dma_unmap_sg(dev, msm_obj->sgt->sgl,
 -                      msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 -      }
 +      dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
  }
  
  /* allocate pages from VRAM carveout, used when no IOMMU: */
@@@ -236,7 -247,7 +236,7 @@@ int msm_gem_mmap(struct file *filp, str
        return msm_gem_mmap_obj(vma->vm_private_data, vma);
  }
  
- vm_fault_t msm_gem_fault(struct vm_fault *vmf)
static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
  {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_gem_object *obj = vma->vm_private_data;
@@@ -742,31 -753,31 +742,31 @@@ int msm_gem_sync_object(struct drm_gem_
        return 0;
  }
  
 -void msm_gem_move_to_active(struct drm_gem_object *obj,
 -              struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
 +void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
  {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 +      WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
        WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
 -      msm_obj->gpu = gpu;
 -      if (exclusive)
 -              dma_resv_add_excl_fence(obj->resv, fence);
 -      else
 -              dma_resv_add_shared_fence(obj->resv, fence);
 -      list_del_init(&msm_obj->mm_list);
 -      list_add_tail(&msm_obj->mm_list, &gpu->active_list);
 +
 +      if (!atomic_fetch_inc(&msm_obj->active_count)) {
 +              msm_obj->gpu = gpu;
 +              list_del_init(&msm_obj->mm_list);
 +              list_add_tail(&msm_obj->mm_list, &gpu->active_list);
 +      }
  }
  
 -void msm_gem_move_to_inactive(struct drm_gem_object *obj)
 +void msm_gem_active_put(struct drm_gem_object *obj)
  {
 -      struct drm_device *dev = obj->dev;
 -      struct msm_drm_private *priv = dev->dev_private;
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 +      struct msm_drm_private *priv = obj->dev->dev_private;
  
 -      WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 +      WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
  
 -      msm_obj->gpu = NULL;
 -      list_del_init(&msm_obj->mm_list);
 -      list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 +      if (!atomic_dec_return(&msm_obj->active_count)) {
 +              msm_obj->gpu = NULL;
 +              list_del_init(&msm_obj->mm_list);
 +              list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 +      }
  }
  
  int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
@@@ -841,28 -852,11 +841,28 @@@ void msm_gem_describe(struct drm_gem_ob
  
                seq_puts(m, "      vmas:");
  
 -              list_for_each_entry(vma, &msm_obj->vmas, list)
 -                      seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
 -                              vma->aspace != NULL ? vma->aspace->name : NULL,
 -                              vma->iova, vma->mapped ? "mapped" : "unmapped",
 +              list_for_each_entry(vma, &msm_obj->vmas, list) {
 +                      const char *name, *comm;
 +                      if (vma->aspace) {
 +                              struct msm_gem_address_space *aspace = vma->aspace;
 +                              struct task_struct *task =
 +                                      get_pid_task(aspace->pid, PIDTYPE_PID);
 +                              if (task) {
 +                                      comm = kstrdup(task->comm, GFP_KERNEL);
 +                              } else {
 +                                      comm = NULL;
 +                              }
 +                              name = aspace->name;
 +                      } else {
 +                              name = comm = NULL;
 +                      }
 +                      seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
 +                              name, comm ? ":" : "", comm ? comm : "",
 +                              vma->aspace, vma->iova,
 +                              vma->mapped ? "mapped" : "unmapped",
                                vma->inuse);
 +                      kfree(comm);
 +              }
  
                seq_puts(m, "\n");
        }
@@@ -1000,6 -994,22 +1000,22 @@@ int msm_gem_new_handle(struct drm_devic
        return ret;
  }
  
+ static const struct vm_operations_struct vm_ops = {
+       .fault = msm_gem_fault,
+       .open = drm_gem_vm_open,
+       .close = drm_gem_vm_close,
+ };
+ static const struct drm_gem_object_funcs msm_gem_object_funcs = {
+       .free = msm_gem_free_object,
+       .pin = msm_gem_prime_pin,
+       .unpin = msm_gem_prime_unpin,
+       .get_sg_table = msm_gem_prime_get_sg_table,
+       .vmap = msm_gem_prime_vmap,
+       .vunmap = msm_gem_prime_vunmap,
+       .vm_ops = &vm_ops,
+ };
  static int msm_gem_new_impl(struct drm_device *dev,
                uint32_t size, uint32_t flags,
                struct drm_gem_object **obj)
        INIT_LIST_HEAD(&msm_obj->vmas);
  
        *obj = &msm_obj->base;
+       (*obj)->funcs = &msm_gem_object_funcs;
  
        return 0;
  }
@@@ -169,6 -169,17 +169,17 @@@ nouveau_gem_object_close(struct drm_gem
        ttm_bo_unreserve(&nvbo->bo);
  }
  
+ const struct drm_gem_object_funcs nouveau_gem_object_funcs = {
+       .free = nouveau_gem_object_del,
+       .open = nouveau_gem_object_open,
+       .close = nouveau_gem_object_close,
+       .pin = nouveau_gem_prime_pin,
+       .unpin = nouveau_gem_prime_unpin,
+       .get_sg_table = nouveau_gem_prime_get_sg_table,
+       .vmap = nouveau_gem_prime_vmap,
+       .vunmap = nouveau_gem_prime_vunmap,
+ };
  int
  nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
                uint32_t tile_mode, uint32_t tile_flags,
        if (IS_ERR(nvbo))
                return PTR_ERR(nvbo);
  
+       nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
        /* Initialize the embedded gem-object. We return a single gem-reference
         * to the caller, instead of a normal nouveau_bo ttm reference. */
        ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
        if (ret) {
 -              nouveau_bo_ref(NULL, &nvbo);
 +              drm_gem_object_release(&nvbo->bo.base);
 +              kfree(nvbo);
                return ret;
        }
  
        if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
                nvbo->valid_domains &= domain;
  
-       nvbo->bo.persistent_swap_storage = nvbo->bo.base.filp;
        *pnvbo = nvbo;
        return 0;
  }
@@@ -92,7 -92,7 +92,7 @@@ nouveau_mem_fini(struct nouveau_mem *me
  }
  
  int
- nouveau_mem_host(struct ttm_resource *reg, struct ttm_dma_tt *tt)
+ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
  {
        struct nouveau_mem *mem = nouveau_mem(reg);
        struct nouveau_cli *cli = mem->cli;
                mem->comp = 0;
        }
  
-       if (tt->ttm.sg) args.sgl = tt->ttm.sg->sgl;
-       else            args.dma = tt->dma_address;
+       if (tt->sg)
+               args.sgl = tt->sg->sgl;
+       else
+               args.dma = tt->dma_address;
  
        mutex_lock(&drm->master.lock);
        cli->base.super = true;
@@@ -176,8 -178,6 +178,8 @@@ voi
  nouveau_mem_del(struct ttm_resource *reg)
  {
        struct nouveau_mem *mem = nouveau_mem(reg);
 +      if (!mem)
 +              return;
        nouveau_mem_fini(mem);
        kfree(reg->mm_node);
        reg->mm_node = NULL;
@@@ -487,7 -487,7 +487,7 @@@ static vm_fault_t omap_gem_fault_2d(str
   * vma->vm_private_data points to the GEM object that is backing this
   * mapping.
   */
- vm_fault_t omap_gem_fault(struct vm_fault *vmf)
static vm_fault_t omap_gem_fault(struct vm_fault *vmf)
  {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_gem_object *obj = vma->vm_private_data;
@@@ -1089,7 -1089,7 +1089,7 @@@ void omap_gem_describe_objects(struct l
   * Constructor & Destructor
   */
  
- void omap_gem_free_object(struct drm_gem_object *obj)
static void omap_gem_free_object(struct drm_gem_object *obj)
  {
        struct drm_device *dev = obj->dev;
        struct omap_drm_private *priv = dev->dev_private;
@@@ -1169,6 -1169,18 +1169,18 @@@ static bool omap_gem_validate_flags(str
        return true;
  }
  
+ static const struct vm_operations_struct omap_gem_vm_ops = {
+       .fault = omap_gem_fault,
+       .open = drm_gem_vm_open,
+       .close = drm_gem_vm_close,
+ };
+ static const struct drm_gem_object_funcs omap_gem_object_funcs = {
+       .free = omap_gem_free_object,
+       .export = omap_gem_prime_export,
+       .vm_ops = &omap_gem_vm_ops,
+ };
  /* GEM buffer object constructor */
  struct drm_gem_object *omap_gem_new(struct drm_device *dev,
                union omap_gem_size gsize, u32 flags)
                size = PAGE_ALIGN(gsize.bytes);
        }
  
+       obj->funcs = &omap_gem_object_funcs;
        /* Initialize the GEM object. */
        if (!(flags & OMAP_BO_MEM_SHMEM)) {
                drm_gem_private_object_init(dev, obj, size);
@@@ -1297,9 -1311,10 +1311,9 @@@ struct drm_gem_object *omap_gem_new_dma
                omap_obj->dma_addr = sg_dma_address(sgt->sgl);
        } else {
                /* Create pages list from sgt */
 -              struct sg_page_iter iter;
                struct page **pages;
                unsigned int npages;
 -              unsigned int i = 0;
 +              unsigned int ret;
  
                npages = DIV_ROUND_UP(size, PAGE_SIZE);
                pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
                }
  
                omap_obj->pages = pages;
 -
 -              for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
 -                      pages[i++] = sg_page_iter_page(&iter);
 -                      if (i > npages)
 -                              break;
 -              }
 -
 -              if (WARN_ON(i != npages)) {
 +              ret = drm_prime_sg_to_page_addr_arrays(sgt, pages, NULL,
 +                                                     npages);
 +              if (ret) {
                        omap_gem_free_object(obj);
                        obj = ERR_PTR(-ENOMEM);
                        goto done;
@@@ -273,10 -273,7 +273,7 @@@ static void radeon_unpin_work_func(stru
        /* unpin of the old buffer */
        r = radeon_bo_reserve(work->old_rbo, false);
        if (likely(r == 0)) {
-               r = radeon_bo_unpin(work->old_rbo);
-               if (unlikely(r != 0)) {
-                       DRM_ERROR("failed to unpin buffer after flip\n");
-               }
+               radeon_bo_unpin(work->old_rbo);
                radeon_bo_unreserve(work->old_rbo);
        } else
                DRM_ERROR("failed to reserve buffer after flip\n");
@@@ -607,9 -604,7 +604,7 @@@ pflip_cleanup
                DRM_ERROR("failed to reserve new rbo in error path\n");
                goto cleanup;
        }
-       if (unlikely(radeon_bo_unpin(new_rbo) != 0)) {
-               DRM_ERROR("failed to unpin new rbo in error path\n");
-       }
+       radeon_bo_unpin(new_rbo);
        radeon_bo_unreserve(new_rbo);
  
  cleanup:
@@@ -933,7 -928,7 +928,7 @@@ static void avivo_get_fb_ref_div(unsign
  
        /* get matching reference and feedback divider */
        *ref_div = min(max(den/post_div, 1u), ref_div_max);
 -      *fb_div = max(nom * *ref_div * post_div / den, 1u);
 +      *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
  
        /* limit fb divider to its maximum */
        if (*fb_div > fb_div_max) {
@@@ -10,6 -10,7 +10,7 @@@
  
  #include <drm/drm.h>
  #include <drm/drm_gem.h>
+ #include <drm/drm_gem_cma_helper.h>
  #include <drm/drm_prime.h>
  #include <drm/drm_vma_manager.h>
  
@@@ -36,8 -37,8 +37,8 @@@ static int rockchip_gem_iommu_map(struc
  
        rk_obj->dma_addr = rk_obj->mm.start;
  
 -      ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl,
 -                         rk_obj->sgt->nents, prot);
 +      ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt,
 +                              prot);
        if (ret < rk_obj->base.size) {
                DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
                          ret, rk_obj->base.size);
@@@ -99,10 -100,11 +100,10 @@@ static int rockchip_gem_get_pages(struc
         * TODO: Replace this by drm_clflush_sg() once it can be implemented
         * without relying on symbols that are not exported.
         */
 -      for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i)
 +      for_each_sgtable_sg(rk_obj->sgt, s, i)
                sg_dma_address(s) = sg_phys(s);
  
 -      dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
 -                             DMA_TO_DEVICE);
 +      dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE);
  
        return 0;
  
@@@ -295,6 -297,14 +296,14 @@@ static void rockchip_gem_release_object
        kfree(rk_obj);
  }
  
+ static const struct drm_gem_object_funcs rockchip_gem_object_funcs = {
+       .free = rockchip_gem_free_object,
+       .get_sg_table = rockchip_gem_prime_get_sg_table,
+       .vmap = rockchip_gem_prime_vmap,
+       .vunmap = rockchip_gem_prime_vunmap,
+       .vm_ops = &drm_gem_cma_vm_ops,
+ };
  static struct rockchip_gem_object *
        rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
  {
  
        obj = &rk_obj->base;
  
+       obj->funcs = &rockchip_gem_object_funcs;
        drm_gem_object_init(drm, obj, size);
  
        return rk_obj;
@@@ -337,7 -349,7 +348,7 @@@ err_free_rk_obj
  }
  
  /*
-  * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked
+  * rockchip_gem_free_object - (struct drm_gem_object_funcs)->free
   * callback function
   */
  void rockchip_gem_free_object(struct drm_gem_object *obj)
                if (private->domain) {
                        rockchip_gem_iommu_unmap(rk_obj);
                } else {
 -                      dma_unmap_sg(drm->dev, rk_obj->sgt->sgl,
 -                                   rk_obj->sgt->nents, DMA_BIDIRECTIONAL);
 +                      dma_unmap_sgtable(drm->dev, rk_obj->sgt,
 +                                        DMA_BIDIRECTIONAL, 0);
                }
                drm_prime_gem_destroy(obj, rk_obj->sgt);
        } else {
@@@ -460,6 -472,23 +471,6 @@@ struct sg_table *rockchip_gem_prime_get
        return sgt;
  }
  
 -static unsigned long rockchip_sg_get_contiguous_size(struct sg_table *sgt,
 -                                                   int count)
 -{
 -      struct scatterlist *s;
 -      dma_addr_t expected = sg_dma_address(sgt->sgl);
 -      unsigned int i;
 -      unsigned long size = 0;
 -
 -      for_each_sg(sgt->sgl, s, count, i) {
 -              if (sg_dma_address(s) != expected)
 -                      break;
 -              expected = sg_dma_address(s) + sg_dma_len(s);
 -              size += sg_dma_len(s);
 -      }
 -      return size;
 -}
 -
  static int
  rockchip_gem_iommu_map_sg(struct drm_device *drm,
                          struct dma_buf_attachment *attach,
@@@ -476,13 -505,15 +487,13 @@@ rockchip_gem_dma_map_sg(struct drm_devi
                        struct sg_table *sg,
                        struct rockchip_gem_object *rk_obj)
  {
 -      int count = dma_map_sg(drm->dev, sg->sgl, sg->nents,
 -                             DMA_BIDIRECTIONAL);
 -      if (!count)
 -              return -EINVAL;
 +      int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
 +      if (err)
 +              return err;
  
 -      if (rockchip_sg_get_contiguous_size(sg, count) < attach->dmabuf->size) {
 +      if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
                DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
 -              dma_unmap_sg(drm->dev, sg->sgl, sg->nents,
 -                           DMA_BIDIRECTIONAL);
 +              dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
                return -EINVAL;
        }
  
@@@ -98,8 -98,8 +98,8 @@@ static struct sg_table *tegra_bo_pin(st
                 * the SG table needs to be copied to avoid overwriting any
                 * other potential users of the original SG table.
                 */
 -              err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, obj->sgt->nents,
 -                                           GFP_KERNEL);
 +              err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl,
 +                                           obj->sgt->orig_nents, GFP_KERNEL);
                if (err < 0)
                        goto free;
        } else {
@@@ -132,24 -132,29 +132,29 @@@ static void tegra_bo_unpin(struct devic
  static void *tegra_bo_mmap(struct host1x_bo *bo)
  {
        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+       struct dma_buf_map map;
+       int ret;
  
-       if (obj->vaddr)
+       if (obj->vaddr) {
                return obj->vaddr;
-       else if (obj->gem.import_attach)
-               return dma_buf_vmap(obj->gem.import_attach->dmabuf);
-       else
+       } else if (obj->gem.import_attach) {
+               ret = dma_buf_vmap(obj->gem.import_attach->dmabuf, &map);
+               return ret ? NULL : map.vaddr;
+       } else {
                return vmap(obj->pages, obj->num_pages, VM_MAP,
                            pgprot_writecombine(PAGE_KERNEL));
+       }
  }
  
  static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
  {
        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+       struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(addr);
  
        if (obj->vaddr)
                return;
        else if (obj->gem.import_attach)
-               dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr);
+               dma_buf_vunmap(obj->gem.import_attach->dmabuf, &map);
        else
                vunmap(addr);
  }
@@@ -196,7 -201,8 +201,7 @@@ static int tegra_bo_iommu_map(struct te
  
        bo->iova = bo->mm->start;
  
 -      bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl,
 -                              bo->sgt->nents, prot);
 +      bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
        if (!bo->size) {
                dev_err(tegra->drm->dev, "failed to map buffer\n");
                err = -ENOMEM;
@@@ -230,6 -236,12 +235,12 @@@ static int tegra_bo_iommu_unmap(struct 
        return 0;
  }
  
+ static const struct drm_gem_object_funcs tegra_gem_object_funcs = {
+       .free = tegra_bo_free_object,
+       .export = tegra_gem_prime_export,
+       .vm_ops = &tegra_bo_vm_ops,
+ };
  static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
                                              size_t size)
  {
        if (!bo)
                return ERR_PTR(-ENOMEM);
  
+       bo->gem.funcs = &tegra_gem_object_funcs;
        host1x_bo_init(&bo->base, &tegra_bo_ops);
        size = round_up(size, PAGE_SIZE);
  
@@@ -263,7 -277,8 +276,7 @@@ free
  static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
  {
        if (bo->pages) {
 -              dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
 -                           DMA_FROM_DEVICE);
 +              dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
                drm_gem_put_pages(&bo->gem, bo->pages, true, true);
                sg_free_table(bo->sgt);
                kfree(bo->sgt);
@@@ -288,9 -303,12 +301,9 @@@ static int tegra_bo_get_pages(struct dr
                goto put_pages;
        }
  
 -      err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
 -                       DMA_FROM_DEVICE);
 -      if (err == 0) {
 -              err = -EFAULT;
 +      err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
 +      if (err)
                goto free_sgt;
 -      }
  
        return 0;
  
@@@ -566,7 -584,7 +579,7 @@@ tegra_gem_prime_map_dma_buf(struct dma_
                        goto free;
        }
  
 -      if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
 +      if (dma_map_sgtable(attach->dev, sgt, dir, 0))
                goto free;
  
        return sgt;
@@@ -585,7 -603,7 +598,7 @@@ static void tegra_gem_prime_unmap_dma_b
        struct tegra_bo *bo = to_tegra_bo(gem);
  
        if (bo->pages)
 -              dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
 +              dma_unmap_sgtable(attach->dev, sgt, dir, 0);
  
        sg_free_table(sgt);
        kfree(sgt);
@@@ -604,7 -622,8 +617,7 @@@ static int tegra_gem_prime_begin_cpu_ac
        struct drm_device *drm = gem->dev;
  
        if (bo->pages)
 -              dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents,
 -                                  DMA_FROM_DEVICE);
 +              dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
  
        return 0;
  }
@@@ -617,7 -636,8 +630,7 @@@ static int tegra_gem_prime_end_cpu_acce
        struct drm_device *drm = gem->dev;
  
        if (bo->pages)
 -              dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
 -                                     DMA_TO_DEVICE);
 +              dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
  
        return 0;
  }
@@@ -634,15 -654,17 +647,17 @@@ static int tegra_gem_prime_mmap(struct 
        return __tegra_gem_mmap(gem, vma);
  }
  
- static void *tegra_gem_prime_vmap(struct dma_buf *buf)
+ static int tegra_gem_prime_vmap(struct dma_buf *buf, struct dma_buf_map *map)
  {
        struct drm_gem_object *gem = buf->priv;
        struct tegra_bo *bo = to_tegra_bo(gem);
  
-       return bo->vaddr;
+       dma_buf_map_set_vaddr(map, bo->vaddr);
+       return 0;
  }
  
- static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
+ static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct dma_buf_map *map)
  {
  }
  
@@@ -115,10 -115,7 +115,7 @@@ static void ttm_bo_add_mem_to_lru(struc
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_resource_manager *man;
  
-       if (!list_empty(&bo->lru))
-               return;
-       if (mem->placement & TTM_PL_FLAG_NO_EVICT)
+       if (!list_empty(&bo->lru) || bo->pin_count)
                return;
  
        man = ttm_manager_type(bdev, mem->mem_type);
@@@ -165,7 -162,7 +162,7 @@@ void ttm_bo_move_to_lru_tail(struct ttm
        ttm_bo_del_from_lru(bo);
        ttm_bo_add_mem_to_lru(bo, &bo->mem);
  
-       if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
+       if (bulk && !bo->pin_count) {
                switch (bo->mem.mem_type) {
                case TTM_PL_TT:
                        ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
@@@ -255,49 -252,17 +252,17 @@@ static int ttm_bo_handle_move_mem(struc
                if (ret)
                        goto out_err;
  
-               ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
-               if (ret)
-                       goto out_err;
                if (mem->mem_type != TTM_PL_SYSTEM) {
-                       ret = ttm_tt_populate(bdev, bo->ttm, ctx);
+                       ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
                        if (ret)
                                goto out_err;
-                       ret = ttm_bo_tt_bind(bo, mem);
-                       if (ret)
-                               goto out_err;
-               }
-               if (bo->mem.mem_type == TTM_PL_SYSTEM) {
-                       if (bdev->driver->move_notify)
-                               bdev->driver->move_notify(bo, evict, mem);
-                       bo->mem = *mem;
-                       goto moved;
                }
        }
  
-       if (bdev->driver->move_notify)
-               bdev->driver->move_notify(bo, evict, mem);
-       if (old_man->use_tt && new_man->use_tt)
-               ret = ttm_bo_move_ttm(bo, ctx, mem);
-       else if (bdev->driver->move)
-               ret = bdev->driver->move(bo, evict, ctx, mem);
-       else
-               ret = ttm_bo_move_memcpy(bo, ctx, mem);
-       if (ret) {
-               if (bdev->driver->move_notify) {
-                       swap(*mem, bo->mem);
-                       bdev->driver->move_notify(bo, false, mem);
-                       swap(*mem, bo->mem);
-               }
+       ret = bdev->driver->move(bo, evict, ctx, mem);
+       if (ret)
                goto out_err;
-       }
  
- moved:
        ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
        return 0;
  
@@@ -319,8 -284,8 +284,8 @@@ out_err
  
  static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
  {
-       if (bo->bdev->driver->move_notify)
-               bo->bdev->driver->move_notify(bo, false, NULL);
+       if (bo->bdev->driver->delete_mem_notify)
+               bo->bdev->driver->delete_mem_notify(bo);
  
        ttm_bo_tt_destroy(bo);
        ttm_resource_free(bo, &bo->mem);
@@@ -540,12 -505,12 +505,12 @@@ static void ttm_bo_release(struct kref 
                spin_lock(&ttm_bo_glob.lru_lock);
  
                /*
-                * Make NO_EVICT bos immediately available to
+                * Make pinned bos immediately available to
                 * shrinkers, now that they are queued for
                 * destruction.
                 */
-               if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
-                       bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
+               if (bo->pin_count) {
+                       bo->pin_count = 0;
                        ttm_bo_del_from_lru(bo);
                        ttm_bo_add_mem_to_lru(bo, &bo->mem);
                }
@@@ -647,7 -612,7 +612,7 @@@ bool ttm_bo_eviction_valuable(struct tt
        /* Don't evict this BO if it's outside of the
         * requested placement range
         */
 -      if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
 +      if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) ||
            (place->lpfn && place->lpfn <= bo->mem.start))
                return false;
  
@@@ -860,35 -825,11 +825,11 @@@ static int ttm_bo_mem_force_space(struc
        return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
  }
  
- static uint32_t ttm_bo_select_caching(struct ttm_resource_manager *man,
-                                     uint32_t cur_placement,
-                                     uint32_t proposed_placement)
- {
-       uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
-       uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
-       /**
-        * Keep current caching if possible.
-        */
-       if ((cur_placement & caching) != 0)
-               result |= (cur_placement & caching);
-       else if ((TTM_PL_FLAG_CACHED & caching) != 0)
-               result |= TTM_PL_FLAG_CACHED;
-       else if ((TTM_PL_FLAG_WC & caching) != 0)
-               result |= TTM_PL_FLAG_WC;
-       else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
-               result |= TTM_PL_FLAG_UNCACHED;
-       return result;
- }
  /**
   * ttm_bo_mem_placement - check if placement is compatible
   * @bo: BO to find memory for
   * @place: where to search
   * @mem: the memory object to fill in
-  * @ctx: operation context
   *
   * Check if placement is compatible and fill in mem structure.
   * Returns -EBUSY if placement won't work or negative error code.
   */
  static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
                                const struct ttm_place *place,
-                               struct ttm_resource *mem,
-                               struct ttm_operation_ctx *ctx)
+                               struct ttm_resource *mem)
  {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_resource_manager *man;
-       uint32_t cur_flags = 0;
  
        man = ttm_manager_type(bdev, place->mem_type);
        if (!man || !ttm_resource_manager_used(man))
                return -EBUSY;
  
-       cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
-                                         place->flags);
-       cur_flags |= place->flags & ~TTM_PL_MASK_CACHING;
        mem->mem_type = place->mem_type;
-       mem->placement = cur_flags;
+       mem->placement = place->flags;
  
        spin_lock(&ttm_bo_glob.lru_lock);
        ttm_bo_del_from_lru(bo);
@@@ -947,7 -882,7 +882,7 @@@ int ttm_bo_mem_space(struct ttm_buffer_
                const struct ttm_place *place = &placement->placement[i];
                struct ttm_resource_manager *man;
  
-               ret = ttm_bo_mem_placement(bo, place, mem, ctx);
+               ret = ttm_bo_mem_placement(bo, place, mem);
                if (ret)
                        continue;
  
        for (i = 0; i < placement->num_busy_placement; ++i) {
                const struct ttm_place *place = &placement->busy_placement[i];
  
-               ret = ttm_bo_mem_placement(bo, place, mem, ctx);
+               ret = ttm_bo_mem_placement(bo, place, mem);
                if (ret)
                        continue;
  
@@@ -1045,8 -980,7 +980,7 @@@ static bool ttm_bo_places_compat(const 
                        continue;
  
                *new_flags = heap->flags;
-               if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
-                   (mem->mem_type == heap->mem_type) &&
+               if ((mem->mem_type == heap->mem_type) &&
                    (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
                     (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
                        return true;
@@@ -1100,9 -1034,6 +1034,6 @@@ int ttm_bo_validate(struct ttm_buffer_o
                ret = ttm_bo_move_buffer(bo, placement, ctx);
                if (ret)
                        return ret;
-       } else {
-               bo->mem.placement &= TTM_PL_MASK_CACHING;
-               bo->mem.placement |= new_flags & ~TTM_PL_MASK_CACHING;
        }
        /*
         * We might need to add a TTM.
@@@ -1170,8 -1101,9 +1101,9 @@@ int ttm_bo_init_reserved(struct ttm_bo_
        bo->mem.bus.offset = 0;
        bo->mem.bus.addr = NULL;
        bo->moving = NULL;
-       bo->mem.placement = TTM_PL_FLAG_CACHED;
+       bo->mem.placement = 0;
        bo->acc_size = acc_size;
+       bo->pin_count = 0;
        bo->sg = sg;
        if (resv) {
                bo->base.resv = resv;
@@@ -1251,19 -1183,6 +1183,6 @@@ int ttm_bo_init(struct ttm_bo_device *b
  }
  EXPORT_SYMBOL(ttm_bo_init);
  
- static size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
-                             unsigned long bo_size,
-                             unsigned struct_size)
- {
-       unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
-       size_t size = 0;
-       size += ttm_round_pot(struct_size);
-       size += ttm_round_pot(npages * sizeof(void *));
-       size += ttm_round_pot(sizeof(struct ttm_tt));
-       return size;
- }
  size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
                           unsigned long bo_size,
                           unsigned struct_size)
  
        size += ttm_round_pot(struct_size);
        size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
-       size += ttm_round_pot(sizeof(struct ttm_dma_tt));
+       size += ttm_round_pot(sizeof(struct ttm_tt));
        return size;
  }
  EXPORT_SYMBOL(ttm_bo_dma_acc_size);
  
- int ttm_bo_create(struct ttm_bo_device *bdev,
-                       unsigned long size,
-                       enum ttm_bo_type type,
-                       struct ttm_placement *placement,
-                       uint32_t page_alignment,
-                       bool interruptible,
-                       struct ttm_buffer_object **p_bo)
- {
-       struct ttm_buffer_object *bo;
-       size_t acc_size;
-       int ret;
-       bo = kzalloc(sizeof(*bo), GFP_KERNEL);
-       if (unlikely(bo == NULL))
-               return -ENOMEM;
-       acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
-       ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
-                         interruptible, acc_size,
-                         NULL, NULL, NULL);
-       if (likely(ret == 0))
-               *p_bo = bo;
-       return ret;
- }
- EXPORT_SYMBOL(ttm_bo_create);
- int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
- {
-       struct ttm_resource_manager *man = ttm_manager_type(bdev, mem_type);
-       if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
-               pr_err("Illegal memory manager memory type %u\n", mem_type);
-               return -EINVAL;
-       }
-       if (!man) {
-               pr_err("Memory type %u has not been initialized\n", mem_type);
-               return 0;
-       }
-       return ttm_resource_manager_force_list_clean(bdev, man);
- }
- EXPORT_SYMBOL(ttm_bo_evict_mm);
  static void ttm_bo_global_kobj_release(struct kobject *kobj)
  {
        struct ttm_bo_global *glob =
@@@ -1506,8 -1380,9 +1380,9 @@@ EXPORT_SYMBOL(ttm_bo_wait)
   * A buffer object shrink method that tries to swap out the first
   * buffer object on the bo_global::swap_lru list.
   */
- int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
+ int ttm_bo_swapout(struct ttm_operation_ctx *ctx)
  {
+       struct ttm_bo_global *glob = &ttm_bo_glob;
        struct ttm_buffer_object *bo;
        int ret = -EBUSY;
        bool locked;
         * Move to system cached
         */
  
-       if (bo->mem.mem_type != TTM_PL_SYSTEM ||
-           bo->ttm->caching_state != tt_cached) {
+       if (bo->mem.mem_type != TTM_PL_SYSTEM) {
                struct ttm_operation_ctx ctx = { false, false };
                struct ttm_resource evict_mem;
  
                evict_mem = bo->mem;
                evict_mem.mm_node = NULL;
-               evict_mem.placement = TTM_PL_FLAG_CACHED;
+               evict_mem.placement = 0;
                evict_mem.mem_type = TTM_PL_SYSTEM;
  
                ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
        if (bo->bdev->driver->swap_notify)
                bo->bdev->driver->swap_notify(bo);
  
-       ret = ttm_tt_swapout(bo->bdev, bo->ttm, bo->persistent_swap_storage);
+       ret = ttm_tt_swapout(bo->bdev, bo->ttm);
  out:
  
        /**
  }
  EXPORT_SYMBOL(ttm_bo_swapout);
  
- void ttm_bo_swapout_all(void)
- {
-       struct ttm_operation_ctx ctx = {
-               .interruptible = false,
-               .no_wait_gpu = false
-       };
-       while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
- }
- EXPORT_SYMBOL(ttm_bo_swapout_all);
  void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
  {
        if (bo->ttm == NULL)
        bo->ttm = NULL;
  }
  
- int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem)
- {
-       return bo->bdev->driver->ttm_tt_bind(bo->bdev, bo->ttm, mem);
- }
- void ttm_bo_tt_unbind(struct ttm_buffer_object *bo)
- {
-       bo->bdev->driver->ttm_tt_unbind(bo->bdev, bo->ttm);
- }
@@@ -472,8 -472,10 +472,10 @@@ int vc4_crtc_disable_at_boot(struct drm
  }
  
  static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
-                                   struct drm_crtc_state *old_state)
+                                   struct drm_atomic_state *state)
  {
+       struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+                                                                        crtc);
        struct vc4_crtc_state *old_vc4_state = to_vc4_crtc_state(old_state);
        struct drm_device *dev = crtc->dev;
  
  }
  
  static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
-                                  struct drm_crtc_state *old_state)
+                                  struct drm_atomic_state *state)
  {
+       struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+                                                                        crtc);
        struct drm_device *dev = crtc->dev;
        struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
        struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
@@@ -852,19 -856,11 +856,19 @@@ void vc4_crtc_destroy_state(struct drm_
  
  void vc4_crtc_reset(struct drm_crtc *crtc)
  {
 +      struct vc4_crtc_state *vc4_crtc_state;
 +
        if (crtc->state)
                vc4_crtc_destroy_state(crtc, crtc->state);
 -      crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
 -      if (crtc->state)
 -              __drm_atomic_helper_crtc_reset(crtc, crtc->state);
 +
 +      vc4_crtc_state = kzalloc(sizeof(*vc4_crtc_state), GFP_KERNEL);
 +      if (!vc4_crtc_state) {
 +              crtc->state = NULL;
 +              return;
 +      }
 +
 +      vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
 +      __drm_atomic_helper_crtc_reset(crtc, &vc4_crtc_state->base);
  }
  
  static const struct drm_crtc_funcs vc4_crtc_funcs = {
@@@ -140,12 -140,6 +140,6 @@@ static void vc4_close(struct drm_devic
        kfree(vc4file);
  }
  
- static const struct vm_operations_struct vc4_vm_ops = {
-       .fault = vc4_fault,
-       .open = drm_gem_vm_open,
-       .close = drm_gem_vm_close,
- };
  static const struct file_operations vc4_drm_fops = {
        .owner = THIS_MODULE,
        .open = drm_open,
@@@ -195,16 -189,10 +189,10 @@@ static struct drm_driver vc4_drm_drive
  #endif
  
        .gem_create_object = vc4_create_object,
-       .gem_free_object_unlocked = vc4_free_object,
-       .gem_vm_ops = &vc4_vm_ops,
  
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-       .gem_prime_export = vc4_prime_export,
-       .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
        .gem_prime_import_sg_table = vc4_prime_import_sg_table,
-       .gem_prime_vmap = vc4_prime_vmap,
-       .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
        .gem_prime_mmap = vc4_prime_mmap,
  
        .dumb_create = vc4_dumb_create,
@@@ -314,7 -302,6 +302,7 @@@ unbind_all
        component_unbind_all(dev, drm);
  gem_destroy:
        vc4_gem_destroy(drm);
 +      drm_mode_config_cleanup(drm);
        vc4_bo_cache_destroy(drm);
  dev_put:
        drm_dev_put(drm);
@@@ -287,7 -287,7 +287,7 @@@ struct vc4_bo 
  static inline struct vc4_bo *
  to_vc4_bo(struct drm_gem_object *bo)
  {
 -      return (struct vc4_bo *)bo;
 +      return container_of(to_drm_gem_cma_obj(bo), struct vc4_bo, base);
  }
  
  struct vc4_fence {
  static inline struct vc4_fence *
  to_vc4_fence(struct dma_fence *fence)
  {
 -      return (struct vc4_fence *)fence;
 +      return container_of(fence, struct vc4_fence, base);
  }
  
  struct vc4_seqno_cb {
@@@ -347,7 -347,7 +347,7 @@@ struct vc4_plane 
  static inline struct vc4_plane *
  to_vc4_plane(struct drm_plane *plane)
  {
 -      return (struct vc4_plane *)plane;
 +      return container_of(plane, struct vc4_plane, base);
  }
  
  enum vc4_scaling_mode {
@@@ -423,7 -423,7 +423,7 @@@ struct vc4_plane_state 
  static inline struct vc4_plane_state *
  to_vc4_plane_state(struct drm_plane_state *state)
  {
 -      return (struct vc4_plane_state *)state;
 +      return container_of(state, struct vc4_plane_state, base);
  }
  
  enum vc4_encoder_type {
@@@ -499,7 -499,7 +499,7 @@@ struct vc4_crtc 
  static inline struct vc4_crtc *
  to_vc4_crtc(struct drm_crtc *crtc)
  {
 -      return (struct vc4_crtc *)crtc;
 +      return container_of(crtc, struct vc4_crtc, base);
  }
  
  static inline const struct vc4_crtc_data *
@@@ -532,12 -532,10 +532,12 @@@ struct vc4_crtc_state 
        } margins;
  };
  
 +#define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1)
 +
  static inline struct vc4_crtc_state *
  to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
  {
 -      return (struct vc4_crtc_state *)crtc_state;
 +      return container_of(crtc_state, struct vc4_crtc_state, base);
  }
  
  #define V3D_READ(offset) readl(vc4->v3d->regs + offset)
@@@ -801,7 -799,6 +801,6 @@@ int vc4_get_hang_state_ioctl(struct drm
                             struct drm_file *file_priv);
  int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv);
- vm_fault_t vc4_fault(struct vm_fault *vmf);
  int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
  int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
  struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
@@@ -31,8 -31,7 +31,7 @@@
  static int virtio_gpu_virglrenderer_workaround = 1;
  module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
  
- static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
-                                      uint32_t *resid)
+ int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
  {
        if (virtio_gpu_virglrenderer_workaround) {
                /*
@@@ -72,8 -71,9 +71,8 @@@ void virtio_gpu_cleanup_object(struct v
  
                if (shmem->pages) {
                        if (shmem->mapped) {
 -                              dma_unmap_sg(vgdev->vdev->dev.parent,
 -                                           shmem->pages->sgl, shmem->mapped,
 -                                           DMA_TO_DEVICE);
 +                              dma_unmap_sgtable(vgdev->vdev->dev.parent,
 +                                           shmem->pages, DMA_TO_DEVICE, 0);
                                shmem->mapped = 0;
                        }
  
                }
  
                drm_gem_shmem_free_object(&bo->base.base);
+       } else if (virtio_gpu_is_vram(bo)) {
+               struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+               spin_lock(&vgdev->host_visible_lock);
+               if (drm_mm_node_allocated(&vram->vram_node))
+                       drm_mm_remove_node(&vram->vram_node);
+               spin_unlock(&vgdev->host_visible_lock);
+               drm_gem_free_mmap_offset(&vram->base.base.base);
+               drm_gem_object_release(&vram->base.base.base);
+               kfree(vram);
        }
  }
  
@@@ -107,6 -119,7 +118,7 @@@ static const struct drm_gem_object_func
        .close = virtio_gpu_gem_object_close,
  
        .print_info = drm_gem_shmem_print_info,
+       .export = virtgpu_gem_prime_export,
        .pin = drm_gem_shmem_pin,
        .unpin = drm_gem_shmem_unpin,
        .get_sg_table = drm_gem_shmem_get_sg_table,
@@@ -163,13 -176,13 +175,13 @@@ static int virtio_gpu_object_shmem_init
        }
  
        if (use_dma_api) {
 -              shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent,
 -                                         shmem->pages->sgl,
 -                                         shmem->pages->nents,
 -                                         DMA_TO_DEVICE);
 -              *nents = shmem->mapped;
 +              ret = dma_map_sgtable(vgdev->vdev->dev.parent,
 +                                    shmem->pages, DMA_TO_DEVICE, 0);
 +              if (ret)
 +                      return ret;
 +              *nents = shmem->mapped = shmem->pages->nents;
        } else {
 -              *nents = shmem->pages->nents;
 +              *nents = shmem->pages->orig_nents;
        }
  
        *ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
                return -ENOMEM;
        }
  
 -      for_each_sg(shmem->pages->sgl, sg, *nents, si) {
 -              (*ents)[si].addr = cpu_to_le64(use_dma_api
 -                                             ? sg_dma_address(sg)
 -                                             : sg_phys(sg));
 -              (*ents)[si].length = cpu_to_le32(sg->length);
 -              (*ents)[si].padding = 0;
 +      if (use_dma_api) {
 +              for_each_sgtable_dma_sg(shmem->pages, sg, si) {
 +                      (*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
 +                      (*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
 +                      (*ents)[si].padding = 0;
 +              }
 +      } else {
 +              for_each_sgtable_sg(shmem->pages, sg, si) {
 +                      (*ents)[si].addr = cpu_to_le64(sg_phys(sg));
 +                      (*ents)[si].length = cpu_to_le32(sg->length);
 +                      (*ents)[si].padding = 0;
 +              }
        }
 +
        return 0;
  }
  
@@@ -234,21 -240,24 +246,24 @@@ int virtio_gpu_object_create(struct vir
                        goto err_put_objs;
        }
  
-       if (params->virgl) {
-               virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
-                                                 objs, fence);
-       } else {
-               virtio_gpu_cmd_create_resource(vgdev, bo, params,
-                                              objs, fence);
-       }
        ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
        if (ret != 0) {
                virtio_gpu_free_object(&shmem_obj->base);
                return ret;
        }
  
-       virtio_gpu_object_attach(vgdev, bo, ents, nents);
+       if (params->blob) {
+               virtio_gpu_cmd_resource_create_blob(vgdev, bo, params,
+                                                   ents, nents);
+       } else if (params->virgl) {
+               virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
+                                                 objs, fence);
+               virtio_gpu_object_attach(vgdev, bo, ents, nents);
+       } else {
+               virtio_gpu_cmd_create_resource(vgdev, bo, params,
+                                              objs, fence);
+               virtio_gpu_object_attach(vgdev, bo, ents, nents);
+       }
  
        *bo_ptr = bo;
        return 0;
@@@ -302,7 -302,7 +302,7 @@@ static struct sg_table *vmalloc_to_sgt(
                return NULL;
        }
  
 -      for_each_sg(sgt->sgl, sg, *sg_ents, i) {
 +      for_each_sgtable_sg(sgt, sg, i) {
                pg = vmalloc_to_page(data);
                if (!pg) {
                        sg_free_table(sgt);
@@@ -608,8 -608,9 +608,8 @@@ void virtio_gpu_cmd_transfer_to_host_2d
        struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
  
        if (use_dma_api)
 -              dma_sync_sg_for_device(vgdev->vdev->dev.parent,
 -                                     shmem->pages->sgl, shmem->pages->nents,
 -                                     DMA_TO_DEVICE);
 +              dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
 +                                          shmem->pages, DMA_TO_DEVICE);
  
        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
        memset(cmd_p, 0, sizeof(*cmd_p));
@@@ -1016,6 -1017,8 +1016,8 @@@ virtio_gpu_cmd_resource_create_3d(struc
  void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
                                        uint32_t ctx_id,
                                        uint64_t offset, uint32_t level,
+                                       uint32_t stride,
+                                       uint32_t layer_stride,
                                        struct drm_virtgpu_3d_box *box,
                                        struct virtio_gpu_object_array *objs,
                                        struct virtio_gpu_fence *fence)
        struct virtio_gpu_transfer_host_3d *cmd_p;
        struct virtio_gpu_vbuffer *vbuf;
        bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
-       struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
  
-       if (use_dma_api)
+       if (virtio_gpu_is_shmem(bo) && use_dma_api) {
+               struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
 -
 -              dma_sync_sg_for_device(vgdev->vdev->dev.parent,
 -                                     shmem->pages->sgl, shmem->pages->nents,
 -                                     DMA_TO_DEVICE);
 +              dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
 +                                          shmem->pages, DMA_TO_DEVICE);
+       }
  
        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
        memset(cmd_p, 0, sizeof(*cmd_p));
        convert_to_hw_box(&cmd_p->box, box);
        cmd_p->offset = cpu_to_le64(offset);
        cmd_p->level = cpu_to_le32(level);
+       cmd_p->stride = cpu_to_le32(stride);
+       cmd_p->layer_stride = cpu_to_le32(layer_stride);
  
        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
  }
  void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
                                          uint32_t ctx_id,
                                          uint64_t offset, uint32_t level,
+                                         uint32_t stride,
+                                         uint32_t layer_stride,
                                          struct drm_virtgpu_3d_box *box,
                                          struct virtio_gpu_object_array *objs,
                                          struct virtio_gpu_fence *fence)
        convert_to_hw_box(&cmd_p->box, box);
        cmd_p->offset = cpu_to_le64(offset);
        cmd_p->level = cpu_to_le32(level);
+       cmd_p->stride = cpu_to_le32(stride);
+       cmd_p->layer_stride = cpu_to_le32(layer_stride);
  
        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
  }
@@@ -1125,14 -1137,14 +1134,14 @@@ static void virtio_gpu_cmd_resource_uui
        uint32_t resp_type = le32_to_cpu(resp->hdr.type);
  
        spin_lock(&vgdev->resource_export_lock);
-       WARN_ON(obj->uuid_state != UUID_INITIALIZING);
+       WARN_ON(obj->uuid_state != STATE_INITIALIZING);
  
        if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
-           obj->uuid_state == UUID_INITIALIZING) {
-               memcpy(&obj->uuid.b, resp->uuid, sizeof(obj->uuid.b));
-               obj->uuid_state = UUID_INITIALIZED;
+           obj->uuid_state == STATE_INITIALIZING) {
+               import_uuid(&obj->uuid, resp->uuid);
+               obj->uuid_state = STATE_OK;
        } else {
-               obj->uuid_state = UUID_INITIALIZATION_FAILED;
+               obj->uuid_state = STATE_ERR;
        }
        spin_unlock(&vgdev->resource_export_lock);
  
@@@ -1151,7 -1163,7 +1160,7 @@@ virtio_gpu_cmd_resource_assign_uuid(str
        resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
        if (!resp_buf) {
                spin_lock(&vgdev->resource_export_lock);
-               bo->uuid_state = UUID_INITIALIZATION_FAILED;
+               bo->uuid_state = STATE_ERR;
                spin_unlock(&vgdev->resource_export_lock);
                virtio_gpu_array_put_free(objs);
                return -ENOMEM;
        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
        return 0;
  }
+ static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
+                                          struct virtio_gpu_vbuffer *vbuf)
+ {
+       struct virtio_gpu_object *bo =
+               gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
+       struct virtio_gpu_resp_map_info *resp =
+               (struct virtio_gpu_resp_map_info *)vbuf->resp_buf;
+       struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+       uint32_t resp_type = le32_to_cpu(resp->hdr.type);
+       spin_lock(&vgdev->host_visible_lock);
+       if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) {
+               vram->map_info = resp->map_info;
+               vram->map_state = STATE_OK;
+       } else {
+               vram->map_state = STATE_ERR;
+       }
+       spin_unlock(&vgdev->host_visible_lock);
+       wake_up_all(&vgdev->resp_wq);
+ }
+ int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
+                      struct virtio_gpu_object_array *objs, uint64_t offset)
+ {
+       struct virtio_gpu_resource_map_blob *cmd_p;
+       struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+       struct virtio_gpu_vbuffer *vbuf;
+       struct virtio_gpu_resp_map_info *resp_buf;
+       resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
+       if (!resp_buf) {
+               virtio_gpu_array_put_free(objs);
+               return -ENOMEM;
+       }
+       cmd_p = virtio_gpu_alloc_cmd_resp
+               (vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
+                sizeof(struct virtio_gpu_resp_map_info), resp_buf);
+       memset(cmd_p, 0, sizeof(*cmd_p));
+       cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB);
+       cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+       cmd_p->offset = cpu_to_le64(offset);
+       vbuf->objs = objs;
+       virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+       return 0;
+ }
+ void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
+                         struct virtio_gpu_object *bo)
+ {
+       struct virtio_gpu_resource_unmap_blob *cmd_p;
+       struct virtio_gpu_vbuffer *vbuf;
+       cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+       memset(cmd_p, 0, sizeof(*cmd_p));
+       cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB);
+       cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+       virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ }
+ void
+ virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
+                                   struct virtio_gpu_object *bo,
+                                   struct virtio_gpu_object_params *params,
+                                   struct virtio_gpu_mem_entry *ents,
+                                   uint32_t nents)
+ {
+       struct virtio_gpu_resource_create_blob *cmd_p;
+       struct virtio_gpu_vbuffer *vbuf;
+       cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+       memset(cmd_p, 0, sizeof(*cmd_p));
+       cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB);
+       cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id);
+       cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+       cmd_p->blob_mem = cpu_to_le32(params->blob_mem);
+       cmd_p->blob_flags = cpu_to_le32(params->blob_flags);
+       cmd_p->blob_id = cpu_to_le64(params->blob_id);
+       cmd_p->size = cpu_to_le64(params->size);
+       cmd_p->nr_entries = cpu_to_le32(nents);
+       vbuf->data_buf = ents;
+       vbuf->data_size = sizeof(*ents) * nents;
+       virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+       bo->created = true;
+ }
+ void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
+                                    uint32_t scanout_id,
+                                    struct virtio_gpu_object *bo,
+                                    struct drm_framebuffer *fb,
+                                    uint32_t width, uint32_t height,
+                                    uint32_t x, uint32_t y)
+ {
+       uint32_t i;
+       struct virtio_gpu_set_scanout_blob *cmd_p;
+       struct virtio_gpu_vbuffer *vbuf;
+       uint32_t format = virtio_gpu_translate_format(fb->format->format);
+       cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+       memset(cmd_p, 0, sizeof(*cmd_p));
+       cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB);
+       cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+       cmd_p->scanout_id = cpu_to_le32(scanout_id);
+       cmd_p->format = cpu_to_le32(format);
+       cmd_p->width  = cpu_to_le32(fb->width);
+       cmd_p->height = cpu_to_le32(fb->height);
+       for (i = 0; i < 4; i++) {
+               cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]);
+               cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]);
+       }
+       cmd_p->r.width = cpu_to_le32(width);
+       cmd_p->r.height = cpu_to_le32(height);
+       cmd_p->r.x = cpu_to_le32(x);
+       cmd_p->r.y = cpu_to_le32(y);
+       virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ }
@@@ -60,7 -60,7 +60,7 @@@ static int vmw_gmrid_man_get_node(struc
  
        id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
        if (id < 0)
 -              return (id != -ENOMEM ? 0 : id);
 +              return id;
  
        spin_lock(&gman->lock);
  
@@@ -143,7 -143,7 +143,7 @@@ void vmw_gmrid_man_fini(struct vmw_priv
  
        ttm_resource_manager_set_used(man, false);
  
-       ttm_resource_manager_force_list_clean(&dev_priv->bdev, man);
+       ttm_resource_manager_evict_all(&dev_priv->bdev, man);
  
        ttm_resource_manager_cleanup(man);
  
@@@ -26,8 -26,6 +26,8 @@@ static struct vmw_thp_manager *to_thp_m
        return container_of(man, struct vmw_thp_manager, manager);
  }
  
 +static const struct ttm_resource_manager_func vmw_thp_func;
 +
  static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node,
                                  unsigned long align_pages,
                                  const struct ttm_place *place,
@@@ -103,7 -101,7 +103,7 @@@ found_unlock
                mem->start = node->start;
        }
  
 -      return 0;
 +      return ret;
  }
  
  
@@@ -134,7 -132,6 +134,7 @@@ int vmw_thp_init(struct vmw_private *de
        ttm_resource_manager_init(&rman->manager,
                                  dev_priv->vram_size >> PAGE_SHIFT);
  
 +      rman->manager.func = &vmw_thp_func;
        drm_mm_init(&rman->mm, 0, rman->manager.size);
        spin_lock_init(&rman->lock);
  
@@@ -152,7 -149,7 +152,7 @@@ void vmw_thp_fini(struct vmw_private *d
  
        ttm_resource_manager_set_used(man, false);
  
-       ret = ttm_resource_manager_force_list_clean(&dev_priv->bdev, man);
+       ret = ttm_resource_manager_evict_all(&dev_priv->bdev, man);
        if (ret)
                return;
        spin_lock(&rman->lock);
@@@ -174,7 -171,7 +174,7 @@@ static void vmw_thp_debug(struct ttm_re
        spin_unlock(&rman->lock);
  }
  
 -const struct ttm_resource_manager_func vmw_thp_func = {
 +static const struct ttm_resource_manager_func vmw_thp_func = {
        .alloc = vmw_thp_get_node,
        .free = vmw_thp_put_node,
        .debug = vmw_thp_debug
@@@ -34,56 -34,28 +34,28 @@@ static const struct ttm_place vram_plac
        .fpfn = 0,
        .lpfn = 0,
        .mem_type = TTM_PL_VRAM,
-       .flags = TTM_PL_FLAG_CACHED
- };
- static const struct ttm_place vram_ne_placement_flags = {
-       .fpfn = 0,
-       .lpfn = 0,
-       .mem_type = TTM_PL_VRAM,
-       .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+       .flags = 0
  };
  
  static const struct ttm_place sys_placement_flags = {
        .fpfn = 0,
        .lpfn = 0,
        .mem_type = TTM_PL_SYSTEM,
-       .flags = TTM_PL_FLAG_CACHED
- };
- static const struct ttm_place sys_ne_placement_flags = {
-       .fpfn = 0,
-       .lpfn = 0,
-       .mem_type = TTM_PL_SYSTEM,
-       .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+       .flags = 0
  };
  
  static const struct ttm_place gmr_placement_flags = {
        .fpfn = 0,
        .lpfn = 0,
        .mem_type = VMW_PL_GMR,
-       .flags = TTM_PL_FLAG_CACHED
- };
- static const struct ttm_place gmr_ne_placement_flags = {
-       .fpfn = 0,
-       .lpfn = 0,
-       .mem_type = VMW_PL_GMR,
-       .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+       .flags = 0
  };
  
  static const struct ttm_place mob_placement_flags = {
        .fpfn = 0,
        .lpfn = 0,
        .mem_type = VMW_PL_MOB,
-       .flags = TTM_PL_FLAG_CACHED
- };
- static const struct ttm_place mob_ne_placement_flags = {
-       .fpfn = 0,
-       .lpfn = 0,
-       .mem_type = VMW_PL_MOB,
-       .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+       .flags = 0
  };
  
  struct ttm_placement vmw_vram_placement = {
@@@ -98,12 -70,12 +70,12 @@@ static const struct ttm_place vram_gmr_
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = TTM_PL_VRAM,
-               .flags = TTM_PL_FLAG_CACHED
+               .flags = 0
        }, {
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = VMW_PL_GMR,
-               .flags = TTM_PL_FLAG_CACHED
+               .flags = 0
        }
  };
  
@@@ -112,12 -84,12 +84,12 @@@ static const struct ttm_place gmr_vram_
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = VMW_PL_GMR,
-               .flags = TTM_PL_FLAG_CACHED
+               .flags = 0
        }, {
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = TTM_PL_VRAM,
-               .flags = TTM_PL_FLAG_CACHED
+               .flags = 0
        }
  };
  
@@@ -128,29 -100,6 +100,6 @@@ struct ttm_placement vmw_vram_gmr_place
        .busy_placement = &gmr_placement_flags
  };
  
- static const struct ttm_place vram_gmr_ne_placement_flags[] = {
-       {
-               .fpfn = 0,
-               .lpfn = 0,
-               .mem_type = TTM_PL_VRAM,
-               .flags = TTM_PL_FLAG_CACHED |
-                        TTM_PL_FLAG_NO_EVICT
-       }, {
-               .fpfn = 0,
-               .lpfn = 0,
-               .mem_type = VMW_PL_GMR,
-               .flags = TTM_PL_FLAG_CACHED |
-                        TTM_PL_FLAG_NO_EVICT
-       }
- };
- struct ttm_placement vmw_vram_gmr_ne_placement = {
-       .num_placement = 2,
-       .placement = vram_gmr_ne_placement_flags,
-       .num_busy_placement = 1,
-       .busy_placement = &gmr_ne_placement_flags
- };
  struct ttm_placement vmw_vram_sys_placement = {
        .num_placement = 1,
        .placement = &vram_placement_flags,
        .busy_placement = &sys_placement_flags
  };
  
- struct ttm_placement vmw_vram_ne_placement = {
-       .num_placement = 1,
-       .placement = &vram_ne_placement_flags,
-       .num_busy_placement = 1,
-       .busy_placement = &vram_ne_placement_flags
- };
  struct ttm_placement vmw_sys_placement = {
        .num_placement = 1,
        .placement = &sys_placement_flags,
        .busy_placement = &sys_placement_flags
  };
  
- struct ttm_placement vmw_sys_ne_placement = {
-       .num_placement = 1,
-       .placement = &sys_ne_placement_flags,
-       .num_busy_placement = 1,
-       .busy_placement = &sys_ne_placement_flags
- };
  static const struct ttm_place evictable_placement_flags[] = {
        {
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = TTM_PL_SYSTEM,
-               .flags = TTM_PL_FLAG_CACHED
+               .flags = 0
        }, {
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = TTM_PL_VRAM,
-               .flags = TTM_PL_FLAG_CACHED
+               .flags = 0
        }, {
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = VMW_PL_GMR,
-               .flags = TTM_PL_FLAG_CACHED
+               .flags = 0
        }, {
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = VMW_PL_MOB,
-               .flags = TTM_PL_FLAG_CACHED
+               .flags = 0
        }
  };
  
@@@ -208,17 -143,17 +143,17 @@@ static const struct ttm_place nonfixed_
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = TTM_PL_SYSTEM,
-               .flags = TTM_PL_FLAG_CACHED
+               .flags = 0
        }, {
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = VMW_PL_GMR,
-               .flags = TTM_PL_FLAG_CACHED
+               .flags = 0
        }, {
                .fpfn = 0,
                .lpfn = 0,
                .mem_type = VMW_PL_MOB,
-               .flags = TTM_PL_FLAG_CACHED
+               .flags = 0
        }
  };
  
@@@ -243,13 -178,6 +178,6 @@@ struct ttm_placement vmw_mob_placement 
        .busy_placement = &mob_placement_flags
  };
  
- struct ttm_placement vmw_mob_ne_placement = {
-       .num_placement = 1,
-       .num_busy_placement = 1,
-       .placement = &mob_ne_placement_flags,
-       .busy_placement = &mob_ne_placement_flags
- };
  struct ttm_placement vmw_nonfixed_placement = {
        .num_placement = 3,
        .placement = nonfixed_placement_flags,
  };
  
  struct vmw_ttm_tt {
-       struct ttm_dma_tt dma_ttm;
+       struct ttm_tt dma_ttm;
        struct vmw_private *dev_priv;
        int gmr_id;
        struct vmw_mob *mob;
@@@ -384,7 -312,8 +312,7 @@@ static void vmw_ttm_unmap_from_dma(stru
  {
        struct device *dev = vmw_tt->dev_priv->dev->dev;
  
 -      dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
 -              DMA_BIDIRECTIONAL);
 +      dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
        vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
  }
  
  static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
  {
        struct device *dev = vmw_tt->dev_priv->dev->dev;
 -      int ret;
  
 -      ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
 -                       DMA_BIDIRECTIONAL);
 -      if (unlikely(ret == 0))
 -              return -ENOMEM;
 -
 -      vmw_tt->sgt.nents = ret;
 -
 -      return 0;
 +      return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
  }
  
  /**
@@@ -432,14 -369,13 +360,14 @@@ static int vmw_ttm_map_dma(struct vmw_t
        int ret = 0;
        static size_t sgl_size;
        static size_t sgt_size;
 +      struct scatterlist *sg;
  
        if (vmw_tt->mapped)
                return 0;
  
        vsgt->mode = dev_priv->map_mode;
-       vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
-       vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
+       vsgt->pages = vmw_tt->dma_ttm.pages;
+       vsgt->num_pages = vmw_tt->dma_ttm.num_pages;
        vsgt->addrs = vmw_tt->dma_ttm.dma_address;
        vsgt->sgt = &vmw_tt->sgt;
  
                if (unlikely(ret != 0))
                        return ret;
  
 -              ret = __sg_alloc_table_from_pages
 -                      (&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
 -                       (unsigned long) vsgt->num_pages << PAGE_SHIFT,
 -                       dma_get_max_seg_size(dev_priv->dev->dev),
 -                       GFP_KERNEL);
 -              if (unlikely(ret != 0))
 +              sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
 +                              vsgt->num_pages, 0,
 +                              (unsigned long) vsgt->num_pages << PAGE_SHIFT,
 +                              dma_get_max_seg_size(dev_priv->dev->dev),
 +                              NULL, 0, GFP_KERNEL);
 +              if (IS_ERR(sg)) {
 +                      ret = PTR_ERR(sg);
                        goto out_sg_alloc_fail;
 +              }
  
 -              if (vsgt->num_pages > vmw_tt->sgt.nents) {
 +              if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
                        uint64_t over_alloc =
                                sgl_size * (vsgt->num_pages -
 -                                          vmw_tt->sgt.nents);
 +                                          vmw_tt->sgt.orig_nents);
  
                        ttm_mem_global_free(glob, over_alloc);
                        vmw_tt->sg_alloc_size -= over_alloc;
@@@ -549,7 -483,7 +477,7 @@@ static void vmw_ttm_unmap_dma(struct vm
  const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
  {
        struct vmw_ttm_tt *vmw_tt =
-               container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+               container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
  
        return &vmw_tt->vsgt;
  }
@@@ -559,7 -493,7 +487,7 @@@ static int vmw_ttm_bind(struct ttm_bo_d
                        struct ttm_tt *ttm, struct ttm_resource *bo_mem)
  {
        struct vmw_ttm_tt *vmw_be =
-               container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+               container_of(ttm, struct vmw_ttm_tt, dma_ttm);
        int ret = 0;
  
        if (!bo_mem)
@@@ -603,7 -537,7 +531,7 @@@ static void vmw_ttm_unbind(struct ttm_b
                           struct ttm_tt *ttm)
  {
        struct vmw_ttm_tt *vmw_be =
-               container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+               container_of(ttm, struct vmw_ttm_tt, dma_ttm);
  
        if (!vmw_be->bound)
                return;
  static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
  {
        struct vmw_ttm_tt *vmw_be =
-               container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+               container_of(ttm, struct vmw_ttm_tt, dma_ttm);
  
        vmw_ttm_unbind(bdev, ttm);
        ttm_tt_destroy_common(bdev, ttm);
        vmw_ttm_unmap_dma(vmw_be);
        if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
-               ttm_dma_tt_fini(&vmw_be->dma_ttm);
+               ttm_tt_fini(&vmw_be->dma_ttm);
        else
                ttm_tt_fini(ttm);
  
@@@ -649,7 -583,7 +577,7 @@@ static int vmw_ttm_populate(struct ttm_
                            struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
  {
        struct vmw_ttm_tt *vmw_tt =
-               container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+               container_of(ttm, struct vmw_ttm_tt, dma_ttm);
        struct vmw_private *dev_priv = vmw_tt->dev_priv;
        struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
        int ret;
@@@ -678,7 -612,7 +606,7 @@@ static void vmw_ttm_unpopulate(struct t
                               struct ttm_tt *ttm)
  {
        struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
-                                                dma_ttm.ttm);
+                                                dma_ttm);
        struct vmw_private *dev_priv = vmw_tt->dev_priv;
        struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
  
@@@ -713,13 -647,15 +641,15 @@@ static struct ttm_tt *vmw_ttm_tt_create
        vmw_be->mob = NULL;
  
        if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
-               ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
+               ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags,
+                                     ttm_cached);
        else
-               ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
+               ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags,
+                                 ttm_cached);
        if (unlikely(ret != 0))
                goto out_no_init;
  
-       return &vmw_be->dma_ttm.ttm;
+       return &vmw_be->dma_ttm;
  out_no_init:
        kfree(vmw_be);
        return NULL;
@@@ -752,6 -688,7 +682,7 @@@ static int vmw_ttm_io_mem_reserve(struc
                mem->bus.offset = (mem->start << PAGE_SHIFT) +
                        dev_priv->vram_start;
                mem->bus.is_iomem = true;
+               mem->bus.caching = ttm_cached;
                break;
        default:
                return -EINVAL;
@@@ -773,6 -710,8 +704,8 @@@ static void vmw_move_notify(struct ttm_
                            bool evict,
                            struct ttm_resource *mem)
  {
+       if (!mem)
+               return;
        vmw_bo_move_notify(bo, mem);
        vmw_query_move_notify(bo, mem);
  }
@@@ -789,19 -728,65 +722,65 @@@ static void vmw_swap_notify(struct ttm_
        (void) ttm_bo_wait(bo, false, false);
  }
  
+ static int vmw_move(struct ttm_buffer_object *bo,
+                   bool evict,
+                   struct ttm_operation_ctx *ctx,
+                   struct ttm_resource *new_mem)
+ {
+       struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->mem.mem_type);
+       struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
+       int ret;
+       if (new_man->use_tt && new_mem->mem_type != TTM_PL_SYSTEM) {
+               ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem);
+               if (ret)
+                       return ret;
+       }
+       vmw_move_notify(bo, evict, new_mem);
+       if (old_man->use_tt && new_man->use_tt) {
+               if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+                       ttm_bo_assign_mem(bo, new_mem);
+                       return 0;
+               }
+               ret = ttm_bo_wait_ctx(bo, ctx);
+               if (ret)
+                       goto fail;
+               vmw_ttm_unbind(bo->bdev, bo->ttm);
+               ttm_resource_free(bo, &bo->mem);
+               ttm_bo_assign_mem(bo, new_mem);
+               return 0;
+       } else {
+               ret = ttm_bo_move_memcpy(bo, ctx, new_mem);
+               if (ret)
+                       goto fail;
+       }
+       return 0;
+ fail:
+       swap(*new_mem, bo->mem);
+       vmw_move_notify(bo, false, new_mem);
+       swap(*new_mem, bo->mem);
+       return ret;
+ }
+ static void
+ vmw_delete_mem_notify(struct ttm_buffer_object *bo)
+ {
+       vmw_move_notify(bo, false, NULL);
+ }
  
  struct ttm_bo_driver vmw_bo_driver = {
        .ttm_tt_create = &vmw_ttm_tt_create,
        .ttm_tt_populate = &vmw_ttm_populate,
        .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
-       .ttm_tt_bind = &vmw_ttm_bind,
-       .ttm_tt_unbind = &vmw_ttm_unbind,
        .ttm_tt_destroy = &vmw_ttm_destroy,
        .eviction_valuable = ttm_bo_eviction_valuable,
        .evict_flags = vmw_evict_flags,
-       .move = NULL,
+       .move = vmw_move,
        .verify_access = vmw_verify_access,
-       .move_notify = vmw_move_notify,
+       .delete_mem_notify = vmw_delete_mem_notify,
        .swap_notify = vmw_swap_notify,
        .io_mem_reserve = &vmw_ttm_io_mem_reserve,
  };
@@@ -817,11 -802,9 +796,9 @@@ int vmw_bo_create_and_populate(struct v
        struct ttm_buffer_object *bo;
        int ret;
  
-       ret = ttm_bo_create(&dev_priv->bdev, bo_size,
-                           ttm_bo_type_device,
-                           &vmw_sys_ne_placement,
-                           0, false, &bo);
+       ret = vmw_bo_create_kernel(dev_priv, bo_size,
+                                  &vmw_sys_placement,
+                                  &bo);
        if (unlikely(ret != 0))
                return ret;
  
        ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx);
        if (likely(ret == 0)) {
                struct vmw_ttm_tt *vmw_tt =
-                       container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+                       container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
                ret = vmw_ttm_map_dma(vmw_tt);
        }
  
@@@ -57,6 -57,19 +57,19 @@@ static void gem_free_pages_array(struc
        xen_obj->pages = NULL;
  }
  
+ static const struct vm_operations_struct xen_drm_drv_vm_ops = {
+       .open           = drm_gem_vm_open,
+       .close          = drm_gem_vm_close,
+ };
+ static const struct drm_gem_object_funcs xen_drm_front_gem_object_funcs = {
+       .free = xen_drm_front_gem_object_free,
+       .get_sg_table = xen_drm_front_gem_get_sg_table,
+       .vmap = xen_drm_front_gem_prime_vmap,
+       .vunmap = xen_drm_front_gem_prime_vunmap,
+       .vm_ops = &xen_drm_drv_vm_ops,
+ };
  static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
                                             size_t size)
  {
@@@ -67,6 -80,8 +80,8 @@@
        if (!xen_obj)
                return ERR_PTR(-ENOMEM);
  
+       xen_obj->base.funcs = &xen_drm_front_gem_object_funcs;
        ret = drm_gem_object_init(dev, &xen_obj->base, size);
        if (ret < 0) {
                kfree(xen_obj);
@@@ -218,7 -233,7 +233,7 @@@ xen_drm_front_gem_import_sg_table(struc
                return ERR_PTR(ret);
  
        DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
 -                size, sgt->nents);
 +                size, sgt->orig_nents);
  
        return &xen_obj->base;
  }
@@@ -42,6 -42,11 +42,6 @@@ struct vb2_dc_buf 
        struct dma_buf_attachment       *db_attach;
  };
  
 -static inline bool vb2_dc_buffer_consistent(unsigned long attr)
 -{
 -      return !(attr & DMA_ATTR_NON_CONSISTENT);
 -}
 -
  /*********************************************/
  /*        scatterlist table functions        */
  /*********************************************/
@@@ -53,10 -58,10 +53,10 @@@ static unsigned long vb2_dc_get_contigu
        unsigned int i;
        unsigned long size = 0;
  
 -      for_each_sg(sgt->sgl, s, sgt->nents, i) {
 +      for_each_sgtable_dma_sg(sgt, s, i) {
                if (sg_dma_address(s) != expected)
                        break;
 -              expected = sg_dma_address(s) + sg_dma_len(s);
 +              expected += sg_dma_len(s);
                size += sg_dma_len(s);
        }
        return size;
@@@ -76,9 -81,13 +76,13 @@@ static void *vb2_dc_cookie(void *buf_pr
  static void *vb2_dc_vaddr(void *buf_priv)
  {
        struct vb2_dc_buf *buf = buf_priv;
+       struct dma_buf_map map;
+       int ret;
  
-       if (!buf->vaddr && buf->db_attach)
-               buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
+       if (!buf->vaddr && buf->db_attach) {
+               ret = dma_buf_vmap(buf->db_attach->dmabuf, &map);
+               buf->vaddr = ret ? NULL : map.vaddr;
+       }
  
        return buf->vaddr;
  }
@@@ -98,7 -107,8 +102,7 @@@ static void vb2_dc_prepare(void *buf_pr
        if (!sgt)
                return;
  
 -      dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
 -                             buf->dma_dir);
 +      dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
  }
  
  static void vb2_dc_finish(void *buf_priv)
        if (!sgt)
                return;
  
 -      dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
 +      dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
  }
  
  /*********************************************/
@@@ -269,8 -279,8 +273,8 @@@ static void vb2_dc_dmabuf_ops_detach(st
                 * memory locations do not require any explicit cache
                 * maintenance prior or after being used by the device.
                 */
 -              dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
 -                                 attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
 +              dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
 +                                DMA_ATTR_SKIP_CPU_SYNC);
        sg_free_table(sgt);
        kfree(attach);
        db_attach->priv = NULL;
@@@ -295,8 -305,8 +299,8 @@@ static struct sg_table *vb2_dc_dmabuf_o
  
        /* release any previous cache */
        if (attach->dma_dir != DMA_NONE) {
 -              dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
 -                                 attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
 +              dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
 +                                DMA_ATTR_SKIP_CPU_SYNC);
                attach->dma_dir = DMA_NONE;
        }
  
         * mapping to the client with new direction, no cache sync
         * required see comment in vb2_dc_dmabuf_ops_detach()
         */
 -      sgt->nents = dma_map_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
 -                                    dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
 -      if (!sgt->nents) {
 +      if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,
 +                          DMA_ATTR_SKIP_CPU_SYNC)) {
                pr_err("failed to map scatterlist\n");
                mutex_unlock(lock);
                return ERR_PTR(-EIO);
@@@ -334,6 -345,13 +338,6 @@@ static in
  vb2_dc_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
                                   enum dma_data_direction direction)
  {
 -      struct vb2_dc_buf *buf = dbuf->priv;
 -      struct sg_table *sgt = buf->dma_sgt;
 -
 -      if (vb2_dc_buffer_consistent(buf->attrs))
 -              return 0;
 -
 -      dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
        return 0;
  }
  
@@@ -341,14 -359,23 +345,16 @@@ static in
  vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
                                 enum dma_data_direction direction)
  {
 -      struct vb2_dc_buf *buf = dbuf->priv;
 -      struct sg_table *sgt = buf->dma_sgt;
 -
 -      if (vb2_dc_buffer_consistent(buf->attrs))
 -              return 0;
 -
 -      dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
        return 0;
  }
  
- static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
+ static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
  {
        struct vb2_dc_buf *buf = dbuf->priv;
  
-       return buf->vaddr;
+       dma_buf_map_set_vaddr(map, buf->vaddr);
+       return 0;
  }
  
  static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
@@@ -434,8 -461,8 +440,8 @@@ static void vb2_dc_put_userptr(void *bu
                 * No need to sync to CPU, it's already synced to the CPU
                 * since the finish() memop will have been called before this.
                 */
 -              dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
 -                                 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
 +              dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
 +                                DMA_ATTR_SKIP_CPU_SYNC);
                pages = frame_vector_pages(buf->vec);
                /* sgt should exist only if vector contains pages... */
                BUG_ON(IS_ERR(pages));
@@@ -532,8 -559,9 +538,8 @@@ static void *vb2_dc_get_userptr(struct 
         * No need to sync to the device, this will happen later when the
         * prepare() memop is called.
         */
 -      sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
 -                                    buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
 -      if (sgt->nents <= 0) {
 +      if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
 +                          DMA_ATTR_SKIP_CPU_SYNC)) {
                pr_err("failed to map scatterlist\n");
                ret = -EIO;
                goto fail_sgt_init;
@@@ -555,7 -583,8 +561,7 @@@ out
        return buf;
  
  fail_map_sg:
 -      dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
 -                         buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
 +      dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
  
  fail_sgt_init:
        sg_free_table(sgt);
@@@ -619,6 -648,7 +625,7 @@@ static void vb2_dc_unmap_dmabuf(void *m
  {
        struct vb2_dc_buf *buf = mem_priv;
        struct sg_table *sgt = buf->dma_sgt;
+       struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
  
        if (WARN_ON(!buf->db_attach)) {
                pr_err("trying to unpin a not attached buffer\n");
        }
  
        if (buf->vaddr) {
-               dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
+               dma_buf_vunmap(buf->db_attach->dmabuf, &map);
                buf->vaddr = NULL;
        }
        dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
@@@ -123,7 -123,8 +123,7 @@@ static void *vb2_dma_sg_alloc(struct de
        /*
         * NOTE: dma-sg allocates memory using the page allocator directly, so
         * there is no memory consistency guarantee, hence dma-sg ignores DMA
 -       * attributes passed from the upper layer. That means that
 -       * V4L2_FLAG_MEMORY_NON_CONSISTENT has no effect on dma-sg buffers.
 +       * attributes passed from the upper layer.
         */
        buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *),
                                    GFP_KERNEL | __GFP_ZERO);
         * No need to sync to the device, this will happen later when the
         * prepare() memop is called.
         */
 -      sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
 -                                    buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
 -      if (!sgt->nents)
 +      if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
 +                          DMA_ATTR_SKIP_CPU_SYNC))
                goto fail_map;
  
        buf->handler.refcount = &buf->refcount;
@@@ -184,8 -186,8 +184,8 @@@ static void vb2_dma_sg_put(void *buf_pr
        if (refcount_dec_and_test(&buf->refcount)) {
                dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
                        buf->num_pages);
 -              dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
 -                                 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
 +              dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
 +                                DMA_ATTR_SKIP_CPU_SYNC);
                if (buf->vaddr)
                        vm_unmap_ram(buf->vaddr, buf->num_pages);
                sg_free_table(buf->dma_sgt);
@@@ -202,7 -204,8 +202,7 @@@ static void vb2_dma_sg_prepare(void *bu
        struct vb2_dma_sg_buf *buf = buf_priv;
        struct sg_table *sgt = buf->dma_sgt;
  
 -      dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
 -                             buf->dma_dir);
 +      dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
  }
  
  static void vb2_dma_sg_finish(void *buf_priv)
        struct vb2_dma_sg_buf *buf = buf_priv;
        struct sg_table *sgt = buf->dma_sgt;
  
 -      dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
 +      dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
  }
  
  static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
         * No need to sync to the device, this will happen later when the
         * prepare() memop is called.
         */
 -      sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
 -                                    buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
 -      if (!sgt->nents)
 +      if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
 +                          DMA_ATTR_SKIP_CPU_SYNC))
                goto userptr_fail_map;
  
        return buf;
@@@ -280,7 -284,8 +280,7 @@@ static void vb2_dma_sg_put_userptr(voi
  
        dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
               __func__, buf->num_pages);
 -      dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
 -                         DMA_ATTR_SKIP_CPU_SYNC);
 +      dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
        if (buf->vaddr)
                vm_unmap_ram(buf->vaddr, buf->num_pages);
        sg_free_table(buf->dma_sgt);
  static void *vb2_dma_sg_vaddr(void *buf_priv)
  {
        struct vb2_dma_sg_buf *buf = buf_priv;
+       struct dma_buf_map map;
+       int ret;
  
        BUG_ON(!buf);
  
        if (!buf->vaddr) {
-               if (buf->db_attach)
-                       buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
-               else
+               if (buf->db_attach) {
+                       ret = dma_buf_vmap(buf->db_attach->dmabuf, &map);
+                       buf->vaddr = ret ? NULL : map.vaddr;
+               } else {
                        buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
+               }
        }
  
        /* add offset in case userptr is not page-aligned */
@@@ -403,7 -412,8 +407,7 @@@ static void vb2_dma_sg_dmabuf_ops_detac
  
        /* release the scatterlist cache */
        if (attach->dma_dir != DMA_NONE)
 -              dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
 -                      attach->dma_dir);
 +              dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
        sg_free_table(sgt);
        kfree(attach);
        db_attach->priv = NULL;
@@@ -428,12 -438,15 +432,12 @@@ static struct sg_table *vb2_dma_sg_dmab
  
        /* release any previous cache */
        if (attach->dma_dir != DMA_NONE) {
 -              dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
 -                      attach->dma_dir);
 +              dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
                attach->dma_dir = DMA_NONE;
        }
  
        /* mapping to the client with new direction */
 -      sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
 -                              dma_dir);
 -      if (!sgt->nents) {
 +      if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
                pr_err("failed to map scatterlist\n");
                mutex_unlock(lock);
                return ERR_PTR(-EIO);
@@@ -480,11 -493,13 +484,13 @@@ vb2_dma_sg_dmabuf_ops_end_cpu_access(st
        return 0;
  }
  
- static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
+ static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
  {
        struct vb2_dma_sg_buf *buf = dbuf->priv;
  
-       return vb2_dma_sg_vaddr(buf);
+       dma_buf_map_set_vaddr(map, buf->vaddr);
+       return 0;
  }
  
  static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
@@@ -565,6 -580,7 +571,7 @@@ static void vb2_dma_sg_unmap_dmabuf(voi
  {
        struct vb2_dma_sg_buf *buf = mem_priv;
        struct sg_table *sgt = buf->dma_sgt;
+       struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
  
        if (WARN_ON(!buf->db_attach)) {
                pr_err("trying to unpin a not attached buffer\n");
        }
  
        if (buf->vaddr) {
-               dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
+               dma_buf_vunmap(buf->db_attach->dmabuf, &map);
                buf->vaddr = NULL;
        }
        dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
@@@ -229,7 -229,7 +229,7 @@@ static int vb2_vmalloc_dmabuf_ops_attac
                kfree(attach);
                return ret;
        }
 -      for_each_sg(sgt->sgl, sg, sgt->nents, i) {
 +      for_each_sgtable_sg(sgt, sg, i) {
                struct page *page = vmalloc_to_page(vaddr);
  
                if (!page) {
@@@ -259,7 -259,8 +259,7 @@@ static void vb2_vmalloc_dmabuf_ops_deta
  
        /* release the scatterlist cache */
        if (attach->dma_dir != DMA_NONE)
 -              dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
 -                      attach->dma_dir);
 +              dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
        sg_free_table(sgt);
        kfree(attach);
        db_attach->priv = NULL;
@@@ -284,12 -285,15 +284,12 @@@ static struct sg_table *vb2_vmalloc_dma
  
        /* release any previous cache */
        if (attach->dma_dir != DMA_NONE) {
 -              dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
 -                      attach->dma_dir);
 +              dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
                attach->dma_dir = DMA_NONE;
        }
  
        /* mapping to the client with new direction */
 -      sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
 -                              dma_dir);
 -      if (!sgt->nents) {
 +      if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
                pr_err("failed to map scatterlist\n");
                mutex_unlock(lock);
                return ERR_PTR(-EIO);
@@@ -314,11 -318,13 +314,13 @@@ static void vb2_vmalloc_dmabuf_ops_rele
        vb2_vmalloc_put(dbuf->priv);
  }
  
- static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
+ static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
  {
        struct vb2_vmalloc_buf *buf = dbuf->priv;
  
-       return buf->vaddr;
+       dma_buf_map_set_vaddr(map, buf->vaddr);
+       return 0;
  }
  
  static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
@@@ -370,26 -376,33 +372,33 @@@ static struct dma_buf *vb2_vmalloc_get_
  static int vb2_vmalloc_map_dmabuf(void *mem_priv)
  {
        struct vb2_vmalloc_buf *buf = mem_priv;
+       struct dma_buf_map map;
+       int ret;
  
-       buf->vaddr = dma_buf_vmap(buf->dbuf);
+       ret = dma_buf_vmap(buf->dbuf, &map);
+       if (ret)
+               return -EFAULT;
+       buf->vaddr = map.vaddr;
  
-       return buf->vaddr ? 0 : -EFAULT;
+       return 0;
  }
  
  static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
  {
        struct vb2_vmalloc_buf *buf = mem_priv;
+       struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
  
-       dma_buf_vunmap(buf->dbuf, buf->vaddr);
+       dma_buf_vunmap(buf->dbuf, &map);
        buf->vaddr = NULL;
  }
  
  static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
  {
        struct vb2_vmalloc_buf *buf = mem_priv;
+       struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
  
        if (buf->vaddr)
-               dma_buf_vunmap(buf->dbuf, buf->vaddr);
+               dma_buf_vunmap(buf->dbuf, &map);
  
        kfree(buf);
  }
diff --combined drivers/misc/fastrpc.c
  #define FASTRPC_RMID_INIT_CREATE_ATTR 7
  #define FASTRPC_RMID_INIT_CREATE_STATIC       8
  
 +/* Protection Domain(PD) ids */
 +#define AUDIO_PD      (0) /* also GUEST_OS PD? */
 +#define USER_PD               (1)
 +#define SENSORS_PD    (2)
 +
  #define miscdev_to_cctx(d) container_of(d, struct fastrpc_channel_ctx, miscdev)
  
  static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
@@@ -523,7 -518,7 +523,7 @@@ fastrpc_map_dma_buf(struct dma_buf_atta
  
        table = &a->sgt;
  
 -      if (!dma_map_sg(attachment->dev, table->sgl, table->nents, dir))
 +      if (!dma_map_sgtable(attachment->dev, table, dir, 0))
                return ERR_PTR(-ENOMEM);
  
        return table;
@@@ -533,7 -528,7 +533,7 @@@ static void fastrpc_unmap_dma_buf(struc
                                  struct sg_table *table,
                                  enum dma_data_direction dir)
  {
 -      dma_unmap_sg(attach->dev, table->sgl, table->nents, dir);
 +      dma_unmap_sgtable(attach->dev, table, dir, 0);
  }
  
  static void fastrpc_release(struct dma_buf *dmabuf)
@@@ -586,11 -581,13 +586,13 @@@ static void fastrpc_dma_buf_detatch(str
        kfree(a);
  }
  
- static void *fastrpc_vmap(struct dma_buf *dmabuf)
+ static int fastrpc_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
  {
        struct fastrpc_buf *buf = dmabuf->priv;
  
-       return buf->virt;
+       dma_buf_map_set_vaddr(map, buf->virt);
+       return 0;
  }
  
  static int fastrpc_mmap(struct dma_buf *dmabuf,
@@@ -1042,7 -1039,7 +1044,7 @@@ static int fastrpc_init_create_process(
        inbuf.pageslen = 1;
        inbuf.attrs = init.attrs;
        inbuf.siglen = init.siglen;
 -      fl->pd = 1;
 +      fl->pd = USER_PD;
  
        if (init.filelen && init.filefd) {
                err = fastrpc_map_create(fl, init.filefd, init.filelen, &map);
@@@ -1281,7 -1278,7 +1283,7 @@@ static int fastrpc_dmabuf_alloc(struct 
        return 0;
  }
  
 -static int fastrpc_init_attach(struct fastrpc_user *fl)
 +static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
  {
        struct fastrpc_invoke_args args[1];
        int tgid = fl->tgid;
        args[0].fd = -1;
        args[0].reserved = 0;
        sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
 -      fl->pd = 0;
 +      fl->pd = pd;
  
        return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
                                       sc, &args[0]);
@@@ -1482,10 -1479,7 +1484,10 @@@ static long fastrpc_device_ioctl(struc
                err = fastrpc_invoke(fl, argp);
                break;
        case FASTRPC_IOCTL_INIT_ATTACH:
 -              err = fastrpc_init_attach(fl);
 +              err = fastrpc_init_attach(fl, AUDIO_PD);
 +              break;
 +      case FASTRPC_IOCTL_INIT_ATTACH_SNS:
 +              err = fastrpc_init_attach(fl, SENSORS_PD);
                break;
        case FASTRPC_IOCTL_INIT_CREATE:
                err = fastrpc_init_create_process(fl, argp);
@@@ -28,8 -28,6 +28,8 @@@
  #include <linux/types.h>
  #include <drm/drm_connector.h>
  
 +struct drm_device;
 +
  /*
   * Unless otherwise noted, all values are from the DP 1.1a spec.  Note that
   * DP and DPCD versions are independent.  Differences from 1.0 are not noted,
  #define DP_AUX_I2C_REPLY_DEFER                (0x2 << 2)
  #define DP_AUX_I2C_REPLY_MASK         (0x3 << 2)
  
- /* AUX CH addresses */
- /* DPCD */
+ /* DPCD Field Address Mapping */
+ /* Receiver Capability */
  #define DP_DPCD_REV                         0x000
  # define DP_DPCD_REV_10                     0x10
  # define DP_DPCD_REV_11                     0x11
  
  #define DP_MAX_DOWNSPREAD                   0x003
  # define DP_MAX_DOWNSPREAD_0_5                    (1 << 0)
+ # define DP_STREAM_REGENERATION_STATUS_CAP  (1 << 1) /* 2.0 */
  # define DP_NO_AUX_HANDSHAKE_LINK_TRAINING  (1 << 6)
  # define DP_TPS4_SUPPORTED                  (1 << 7)
  
  
  #define DP_MAIN_LINK_CHANNEL_CODING         0x006
  # define DP_CAP_ANSI_8B10B                (1 << 0)
+ # define DP_CAP_ANSI_128B132B               (1 << 1) /* 2.0 */
  
  #define DP_DOWN_STREAM_PORT_COUNT         0x007
  # define DP_PORT_COUNT_MASK               0x0f
  #define DP_FAUX_CAP                       0x020   /* 1.2 */
  # define DP_FAUX_CAP_1                            (1 << 0)
  
+ #define DP_SINK_VIDEO_FALLBACK_FORMATS      0x020   /* 2.0 */
+ # define DP_FALLBACK_1024x768_60HZ_24BPP    (1 << 0)
+ # define DP_FALLBACK_1280x720_60HZ_24BPP    (1 << 1)
+ # define DP_FALLBACK_1920x1080_60HZ_24BPP   (1 << 2)
  #define DP_MSTM_CAP                       0x021   /* 1.2 */
  # define DP_MST_CAP                       (1 << 0)
+ # define DP_SINGLE_STREAM_SIDEBAND_MSG      (1 << 1) /* 2.0 */
  
  #define DP_NUMBER_OF_AUDIO_ENDPOINTS      0x022   /* 1.2 */
  
  # define DP_DS_PORT_TYPE_DP_DUALMODE        5
  # define DP_DS_PORT_TYPE_WIRELESS           6
  # define DP_DS_PORT_HPD                           (1 << 3)
 +# define DP_DS_NON_EDID_MASK              (0xf << 4)
 +# define DP_DS_NON_EDID_720x480i_60       (1 << 4)
 +# define DP_DS_NON_EDID_720x480i_50       (2 << 4)
 +# define DP_DS_NON_EDID_1920x1080i_60     (3 << 4)
 +# define DP_DS_NON_EDID_1920x1080i_50     (4 << 4)
 +# define DP_DS_NON_EDID_1280x720_60       (5 << 4)
 +# define DP_DS_NON_EDID_1280x720_50       (7 << 4)
  /* offset 1 for VGA is maximum megapixels per second / 8 */
 -/* offset 2 */
 +/* offset 1 for DVI/HDMI is maximum TMDS clock in Mbps / 2.5 */
 +/* offset 2 for VGA/DVI/HDMI */
  # define DP_DS_MAX_BPC_MASK               (3 << 0)
  # define DP_DS_8BPC                       0
  # define DP_DS_10BPC                      1
  # define DP_DS_12BPC                      2
  # define DP_DS_16BPC                      3
 +/* offset 3 for DVI */
 +# define DP_DS_DVI_DUAL_LINK              (1 << 1)
 +# define DP_DS_DVI_HIGH_COLOR_DEPTH       (1 << 2)
 +/* offset 3 for HDMI */
 +# define DP_DS_HDMI_FRAME_SEQ_TO_FRAME_PACK (1 << 0)
 +# define DP_DS_HDMI_YCBCR422_PASS_THROUGH   (1 << 1)
 +# define DP_DS_HDMI_YCBCR420_PASS_THROUGH   (1 << 2)
 +# define DP_DS_HDMI_YCBCR444_TO_422_CONV    (1 << 3)
 +# define DP_DS_HDMI_YCBCR444_TO_420_CONV    (1 << 4)
  
  #define DP_MAX_DOWNSTREAM_PORTS                   0x10
  
  #define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1  0x0a1
  #define DP_DSC_BRANCH_MAX_LINE_WIDTH        0x0a2
  
- /* link configuration */
+ /* Link Configuration */
  #define       DP_LINK_BW_SET                      0x100
  # define DP_LINK_RATE_TABLE               0x00    /* eDP 1.4 */
  # define DP_LINK_BW_1_62                  0x06
  # define DP_LINK_BW_2_7                           0x0a
  # define DP_LINK_BW_5_4                           0x14    /* 1.2 */
  # define DP_LINK_BW_8_1                           0x1e    /* 1.4 */
+ # define DP_LINK_BW_10                      0x01    /* 2.0 128b/132b Link Layer */
+ # define DP_LINK_BW_13_5                    0x04    /* 2.0 128b/132b Link Layer */
+ # define DP_LINK_BW_20                      0x02    /* 2.0 128b/132b Link Layer */
  
  #define DP_LANE_COUNT_SET                 0x101
  # define DP_LANE_COUNT_MASK               0x0f
  # define DP_TRAIN_PRE_EMPHASIS_SHIFT      3
  # define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED  (1 << 5)
  
+ # define DP_TX_FFE_PRESET_VALUE_MASK        (0xf << 0) /* 2.0 128b/132b Link Layer */
  #define DP_DOWNSPREAD_CTRL                0x107
  # define DP_SPREAD_AMP_0_5                (1 << 4)
  # define DP_MSA_TIMING_PAR_IGNORE_EN      (1 << 7) /* eDP */
  
  #define DP_MAIN_LINK_CHANNEL_CODING_SET           0x108
  # define DP_SET_ANSI_8B10B                (1 << 0)
+ # define DP_SET_ANSI_128B132B               (1 << 1)
  
  #define DP_I2C_SPEED_CONTROL_STATUS       0x109   /* DPI */
  /* bitmask as for DP_I2C_SPEED_CAP */
  # define DP_LINK_QUAL_PATTERN_ERROR_RATE    2
  # define DP_LINK_QUAL_PATTERN_PRBS7       3
  # define DP_LINK_QUAL_PATTERN_80BIT_CUSTOM  4
- # define DP_LINK_QUAL_PATTERN_HBR2_EYE      5
- # define DP_LINK_QUAL_PATTERN_MASK        7
+ # define DP_LINK_QUAL_PATTERN_CP2520_PAT_1  5
+ # define DP_LINK_QUAL_PATTERN_CP2520_PAT_2  6
+ # define DP_LINK_QUAL_PATTERN_CP2520_PAT_3  7
+ /* DP 2.0 UHBR10, UHBR13.5, UHBR20 */
+ # define DP_LINK_QUAL_PATTERN_128B132B_TPS1 0x08
+ # define DP_LINK_QUAL_PATTERN_128B132B_TPS2 0x10
+ # define DP_LINK_QUAL_PATTERN_PRSBS9        0x18
+ # define DP_LINK_QUAL_PATTERN_PRSBS11       0x20
+ # define DP_LINK_QUAL_PATTERN_PRSBS15       0x28
+ # define DP_LINK_QUAL_PATTERN_PRSBS23       0x30
+ # define DP_LINK_QUAL_PATTERN_PRSBS31       0x38
+ # define DP_LINK_QUAL_PATTERN_CUSTOM        0x40
+ # define DP_LINK_QUAL_PATTERN_SQUARE        0x48
  
  #define DP_TRAINING_LANE0_1_SET2          0x10f
  #define DP_TRAINING_LANE2_3_SET2          0x110
  #define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1
  #define DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1c2
  
+ /* Link/Sink Device Status */
  #define DP_SINK_COUNT                     0x200
  /* prior to 1.2 bit 7 was reserved mbz */
  # define DP_GET_SINK_COUNT(x)             ((((x) & 0x80) >> 1) | ((x) & 0x3f))
  #define DP_LINK_STATUS_UPDATED                    (1 << 7)
  
  #define DP_SINK_STATUS                            0x205
- #define DP_RECEIVE_PORT_0_STATUS          (1 << 0)
- #define DP_RECEIVE_PORT_1_STATUS          (1 << 1)
+ # define DP_RECEIVE_PORT_0_STATUS         (1 << 0)
+ # define DP_RECEIVE_PORT_1_STATUS         (1 << 1)
+ # define DP_STREAM_REGENERATION_STATUS      (1 << 2) /* 2.0 */
  
  #define DP_ADJUST_REQUEST_LANE0_1         0x206
  #define DP_ADJUST_REQUEST_LANE2_3         0x207
  # define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK   0xc0
  # define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT  6
  
+ /* DP 2.0 128b/132b Link Layer */
+ # define DP_ADJUST_TX_FFE_PRESET_LANE0_MASK  (0xf << 0)
+ # define DP_ADJUST_TX_FFE_PRESET_LANE0_SHIFT 0
+ # define DP_ADJUST_TX_FFE_PRESET_LANE1_MASK  (0xf << 4)
+ # define DP_ADJUST_TX_FFE_PRESET_LANE1_SHIFT 4
  #define DP_ADJUST_REQUEST_POST_CURSOR2      0x20c
  # define DP_ADJUST_POST_CURSOR2_LANE0_MASK  0x03
  # define DP_ADJUST_POST_CURSOR2_LANE0_SHIFT 0
  #define DP_VC_PAYLOAD_ID_SLOT_1             0x2c1   /* 1.2 MST */
  /* up to ID_SLOT_63 at 0x2ff */
  
+ /* Source Device-specific */
  #define DP_SOURCE_OUI                     0x300
+ /* Sink Device-specific */
  #define DP_SINK_OUI                       0x400
+ /* Branch Device-specific */
  #define DP_BRANCH_OUI                     0x500
  #define DP_BRANCH_ID                        0x503
  #define DP_BRANCH_REVISION_START            0x509
  #define DP_BRANCH_HW_REV                    0x509
  #define DP_BRANCH_SW_REV                    0x50A
  
+ /* Link/Sink Device Power Control */
  #define DP_SET_POWER                        0x600
  # define DP_SET_POWER_D0                    0x1
  # define DP_SET_POWER_D3                    0x2
  # define DP_SET_POWER_MASK                  0x3
  # define DP_SET_POWER_D3_AUX_ON             0x5
  
+ /* eDP-specific */
  #define DP_EDP_DPCD_REV                           0x700    /* eDP 1.2 */
  # define DP_EDP_11                        0x00
  # define DP_EDP_12                        0x01
  #define DP_EDP_REGIONAL_BACKLIGHT_BASE      0x740    /* eDP 1.4 */
  #define DP_EDP_REGIONAL_BACKLIGHT_0       0x741    /* eDP 1.4 */
  
+ /* Sideband MSG Buffers */
  #define DP_SIDEBAND_MSG_DOWN_REQ_BASE     0x1000   /* 1.2 MST */
  #define DP_SIDEBAND_MSG_UP_REP_BASE       0x1200   /* 1.2 MST */
  #define DP_SIDEBAND_MSG_DOWN_REP_BASE     0x1400   /* 1.2 MST */
  #define DP_SIDEBAND_MSG_UP_REQ_BASE       0x1600   /* 1.2 MST */
  
+ /* DPRX Event Status Indicator */
  #define DP_SINK_COUNT_ESI                 0x2002   /* 1.2 */
  /* 0-5 sink count */
  # define DP_SINK_COUNT_CP_READY             (1 << 6)
  #define DP_LANE_ALIGN_STATUS_UPDATED_ESI       0x200e /* status same as 0x204 */
  #define DP_SINK_STATUS_ESI                     0x200f /* status same as 0x205 */
  
+ /* Extended Receiver Capability: See DP_DPCD_REV for definitions */
  #define DP_DP13_DPCD_REV                    0x2200
- #define DP_DP13_MAX_LINK_RATE               0x2201
  
  #define DP_DPRX_FEATURE_ENUMERATION_LIST    0x2210  /* DP 1.3 */
  # define DP_GTC_CAP                                   (1 << 0)  /* DP 1.3 */
  # define DP_VSC_EXT_CEA_SDP_SUPPORTED                 (1 << 6)  /* DP 1.4 */
  # define DP_VSC_EXT_CEA_SDP_CHAINING_SUPPORTED                (1 << 7)  /* DP 1.4 */
  
+ #define DP_128B132B_SUPPORTED_LINK_RATES       0x2215 /* 2.0 */
+ # define DP_UHBR10                             (1 << 0)
+ # define DP_UHBR20                             (1 << 1)
+ # define DP_UHBR13_5                           (1 << 2)
+ #define DP_128B132B_TRAINING_AUX_RD_INTERVAL   0x2216 /* 2.0 */
+ # define DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK 0x7f
+ /* Protocol Converter Extension */
  /* HDMI CEC tunneling over AUX DP 1.3 section 5.3.3.3.1 DPCD 1.4+ */
  #define DP_CEC_TUNNELING_CAPABILITY            0x3000
  # define DP_CEC_TUNNELING_CAPABLE               (1 << 0)
  #define DP_CEC_TX_MESSAGE_BUFFER               0x3020
  #define DP_CEC_MESSAGE_BUFFER_LENGTH             0x10
  
 +#define DP_PROTOCOL_CONVERTER_CONTROL_0               0x3050 /* DP 1.3 */
 +# define DP_HDMI_DVI_OUTPUT_CONFIG            (1 << 0) /* DP 1.3 */
 +#define DP_PROTOCOL_CONVERTER_CONTROL_1               0x3051 /* DP 1.3 */
 +# define DP_CONVERSION_TO_YCBCR420_ENABLE     (1 << 0) /* DP 1.3 */
 +# define DP_HDMI_EDID_PROCESSING_DISABLE      (1 << 1) /* DP 1.4 */
 +# define DP_HDMI_AUTONOMOUS_SCRAMBLING_DISABLE        (1 << 2) /* DP 1.4 */
 +# define DP_HDMI_FORCE_SCRAMBLING             (1 << 3) /* DP 1.4 */
 +#define DP_PROTOCOL_CONVERTER_CONTROL_2               0x3052 /* DP 1.3 */
 +# define DP_CONVERSION_TO_YCBCR422_ENABLE     (1 << 0) /* DP 1.3 */
 +
+ /* HDCP 1.3 and HDCP 2.2 */
  #define DP_AUX_HDCP_BKSV              0x68000
  #define DP_AUX_HDCP_RI_PRIME          0x68005
  #define DP_AUX_HDCP_AKSV              0x68007
  #define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET    0x69494
  #define DP_HDCP_2_2_REG_DBG_OFFSET            0x69518
  
- /* Link Training (LT)-tunable PHY Repeaters */
+ /* LTTPR: Link Training (LT)-tunable PHY Repeaters */
  #define DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV 0xf0000 /* 1.3 */
  #define DP_MAX_LINK_RATE_PHY_REPEATER                     0xf0001 /* 1.4a */
  #define DP_PHY_REPEATER_CNT                               0xf0002 /* 1.3 */
  #define DP_POWER_DOWN_PHY             0x25
  #define DP_SINK_EVENT_NOTIFY          0x30
  #define DP_QUERY_STREAM_ENC_STATUS    0x38
 +#define  DP_QUERY_STREAM_ENC_STATUS_STATE_NO_EXIST    0
 +#define  DP_QUERY_STREAM_ENC_STATUS_STATE_INACTIVE    1
 +#define  DP_QUERY_STREAM_ENC_STATUS_STATE_ACTIVE      2
  
  /* DP 1.2 MST sideband reply types */
  #define DP_SIDEBAND_REPLY_ACK         0x00
  #define DP_MST_PHYSICAL_PORT_0 0
  #define DP_MST_LOGICAL_PORT_0 8
  
 +#define DP_LINK_CONSTANT_N_VALUE 0x8000
  #define DP_LINK_STATUS_SIZE      6
  bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
                          int lane_count);
@@@ -1652,35 -1671,13 +1704,35 @@@ bool drm_dp_send_real_edid_checksum(str
  int drm_dp_read_downstream_info(struct drm_dp_aux *aux,
                                const u8 dpcd[DP_RECEIVER_CAP_SIZE],
                                u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]);
 -int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 -                              const u8 port_cap[4]);
 +bool drm_dp_downstream_is_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                             const u8 port_cap[4], u8 type);
 +bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                             const u8 port_cap[4],
 +                             const struct edid *edid);
 +int drm_dp_downstream_max_dotclock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                                 const u8 port_cap[4]);
 +int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                                   const u8 port_cap[4],
 +                                   const struct edid *edid);
 +int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                                   const u8 port_cap[4],
 +                                   const struct edid *edid);
  int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 -                            const u8 port_cap[4]);
 +                            const u8 port_cap[4],
 +                            const struct edid *edid);
 +bool drm_dp_downstream_420_passthrough(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                                     const u8 port_cap[4]);
 +bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                                           const u8 port_cap[4]);
 +struct drm_display_mode *drm_dp_downstream_mode(struct drm_device *dev,
 +                                              const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                                              const u8 port_cap[4]);
  int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]);
 -void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 -                           const u8 port_cap[4], struct drm_dp_aux *aux);
 +void drm_dp_downstream_debug(struct seq_file *m,
 +                           const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                           const u8 port_cap[4],
 +                           const struct edid *edid,
 +                           struct drm_dp_aux *aux);
  enum drm_mode_subconnector
  drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
                         const u8 port_cap[4]);
@@@ -1836,7 -1833,7 +1888,7 @@@ static inline void drm_dp_cec_unset_edi
   * @link_rate: Requested Link rate from DPCD 0x219
   * @num_lanes: Number of lanes requested by sing through DPCD 0x220
   * @phy_pattern: DP Phy test pattern from DPCD 0x248
 - * @hb2_reset: DP HBR2_COMPLIANCE_SCRAMBLER_RESET from DCPD 0x24A and 0x24B
 + * @hbr2_reset: DP HBR2_COMPLIANCE_SCRAMBLER_RESET from DCPD 0x24A and 0x24B
   * @custom80: DP Test_80BIT_CUSTOM_PATTERN from DPCDs 0x250 through 0x259
   * @enhanced_frame_cap: flag for enhanced frame capability.
   */
diff --combined include/drm/drm_prime.h
@@@ -54,6 -54,7 +54,7 @@@ struct device
  struct dma_buf_export_info;
  struct dma_buf;
  struct dma_buf_attachment;
+ struct dma_buf_map;
  
  enum dma_data_direction;
  
@@@ -82,8 -83,8 +83,8 @@@ struct sg_table *drm_gem_map_dma_buf(st
  void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
                           struct sg_table *sgt,
                           enum dma_data_direction dir);
void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf);
- void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map);
+ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map);
  
  int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
  int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
@@@ -93,8 -94,6 +94,8 @@@ struct sg_table *drm_prime_pages_to_sg(
  struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
                                     int flags);
  
 +unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt);
 +
  /* helper functions for importing */
  struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
                                                struct dma_buf *dma_buf,